diff --git a/.editorconfig b/.editorconfig --- a/.editorconfig +++ b/.editorconfig @@ -11,3 +11,8 @@ trim_trailing_whitespace = true indent_size = 8 indent_style = tab trim_trailing_whitespace = true + +[*.t] +indent_size = 2 +indent_style = space +trim_trailing_whitespace = false diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -31,8 +31,6 @@ build contrib/chg/chg contrib/hgsh/hgsh contrib/vagrant/.vagrant -contrib/docker/debian-* -contrib/docker/ubuntu-* dist packages doc/common.txt diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -164,6 +164,39 @@ i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n # Packaging targets +packaging_targets := \ + centos5 \ + centos6 \ + centos7 \ + deb \ + docker-centos5 \ + docker-centos6 \ + docker-centos7 \ + docker-debian-jessie \ + docker-debian-stretch \ + docker-fedora20 \ + docker-fedora21 \ + docker-fedora28 \ + docker-ubuntu-trusty \ + docker-ubuntu-trusty-ppa \ + docker-ubuntu-xenial \ + docker-ubuntu-xenial-ppa \ + docker-ubuntu-artful \ + docker-ubuntu-artful-ppa \ + docker-ubuntu-bionic \ + docker-ubuntu-bionic-ppa \ + fedora20 \ + fedora21 \ + fedora28 \ + linux-wheels \ + linux-wheels-x86_64 \ + linux-wheels-i686 \ + ppa + +# Forward packaging targets for convenience. +$(packaging_targets): + $(MAKE) -C contrib/packaging $@ + osx: rm -rf build/mercurial /usr/bin/python2.7 setup.py install --optimize=1 \ @@ -197,127 +230,14 @@ osx: --identifier org.mercurial-scm.mercurial \ --version "$${HGVER}" \ build/mercurial.pkg && \ - productbuild --distribution contrib/macosx/distribution.xml \ + productbuild --distribution contrib/packaging/macosx/distribution.xml \ --package-path build/ \ --version "$${HGVER}" \ - --resources contrib/macosx/ \ + --resources contrib/packaging/macosx/ \ "$${OUTPUTDIR:-dist/}"/Mercurial-"$${HGVER}"-macosx"$${OSXVER}".pkg -deb: - contrib/builddeb - -ppa: - contrib/builddeb --source-only - -contrib/docker/debian-%: contrib/docker/debian.template - sed "s/__CODENAME__/$*/" $< > $@ - -docker-debian-jessie: contrib/docker/debian-jessie - contrib/dockerdeb debian jessie - -docker-debian-stretch: contrib/docker/debian-stretch - contrib/dockerdeb debian stretch - -contrib/docker/ubuntu-%: contrib/docker/ubuntu.template - sed "s/__CODENAME__/$*/" $< > $@ - -docker-ubuntu-trusty: contrib/docker/ubuntu-trusty - contrib/dockerdeb ubuntu trusty - -docker-ubuntu-trusty-ppa: contrib/docker/ubuntu-trusty - contrib/dockerdeb ubuntu trusty --source-only - -docker-ubuntu-xenial: contrib/docker/ubuntu-xenial - contrib/dockerdeb ubuntu xenial - -docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial - contrib/dockerdeb ubuntu xenial --source-only - -docker-ubuntu-artful: contrib/docker/ubuntu-artful - contrib/dockerdeb ubuntu artful - -docker-ubuntu-artful-ppa: contrib/docker/ubuntu-artful - contrib/dockerdeb ubuntu artful --source-only - -docker-ubuntu-bionic: contrib/docker/ubuntu-bionic - contrib/dockerdeb ubuntu bionic - -docker-ubuntu-bionic-ppa: contrib/docker/ubuntu-bionic - contrib/dockerdeb ubuntu bionic --source-only - -fedora20: - mkdir -p packages/fedora20 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/fedora20 - cp rpmbuild/SRPMS/* packages/fedora20 - rm -rf rpmbuild - -docker-fedora20: - mkdir -p packages/fedora20 - contrib/dockerrpm fedora20 - -fedora21: - mkdir -p packages/fedora21 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/fedora21 - cp rpmbuild/SRPMS/* packages/fedora21 - rm -rf rpmbuild - -docker-fedora21: - mkdir -p packages/fedora21 - contrib/dockerrpm fedora21 - -centos5: - mkdir -p packages/centos5 - contrib/buildrpm --withpython - cp rpmbuild/RPMS/*/* packages/centos5 - cp rpmbuild/SRPMS/* packages/centos5 - -docker-centos5: - mkdir -p packages/centos5 - contrib/dockerrpm centos5 --withpython - -centos6: - mkdir -p packages/centos6 - contrib/buildrpm --withpython - cp rpmbuild/RPMS/*/* packages/centos6 - cp rpmbuild/SRPMS/* packages/centos6 - -docker-centos6: - mkdir -p packages/centos6 - contrib/dockerrpm centos6 --withpython - -centos7: - mkdir -p packages/centos7 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/centos7 - cp rpmbuild/SRPMS/* packages/centos7 - -docker-centos7: - mkdir -p packages/centos7 - contrib/dockerrpm centos7 - -linux-wheels: linux-wheels-x86_64 linux-wheels-i686 - -linux-wheels-x86_64: - docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`:/src quay.io/pypa/manylinux1_x86_64 /src/contrib/build-linux-wheels.sh - -linux-wheels-i686: - docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`:/src quay.io/pypa/manylinux1_i686 linux32 /src/contrib/build-linux-wheels.sh - .PHONY: help all local build doc cleanbutpackages clean install install-bin \ install-doc install-home install-home-bin install-home-doc \ dist dist-notests check tests check-code format-c update-pot \ - osx deb ppa \ - docker-debian-jessie \ - docker-debian-stretch \ - docker-ubuntu-trusty docker-ubuntu-trusty-ppa \ - docker-ubuntu-xenial docker-ubuntu-xenial-ppa \ - docker-ubuntu-artful docker-ubuntu-artful-ppa \ - docker-ubuntu-bionic docker-ubuntu-bionic-ppa \ - fedora20 docker-fedora20 \ - fedora21 docker-fedora21 \ - centos5 docker-centos5 \ - centos6 docker-centos6 \ - centos7 docker-centos7 \ - linux-wheels + $(packaging_targets) \ + osx diff --git a/contrib/all-revsets.txt b/contrib/all-revsets.txt --- a/contrib/all-revsets.txt +++ b/contrib/all-revsets.txt @@ -135,3 +135,7 @@ head() and author("mpm") # testing the mutable phases set draft() secret() + +# test finding common ancestors +heads(commonancestors(last(head(), 2))) +heads(commonancestors(head())) diff --git a/contrib/base-revsets.txt b/contrib/base-revsets.txt --- a/contrib/base-revsets.txt +++ b/contrib/base-revsets.txt @@ -46,3 +46,4 @@ parents(20000) (20000::) - (20000) # The one below is used by rebase (children(ancestor(tip~5, tip)) and ::(tip~5)):: +heads(commonancestors(last(head(), 2))) diff --git a/mercurial/__init__.py b/contrib/byteify-strings.py old mode 100644 new mode 100755 copy from mercurial/__init__.py copy to contrib/byteify-strings.py --- a/mercurial/__init__.py +++ b/contrib/byteify-strings.py @@ -1,4 +1,6 @@ -# __init__.py - Startup and module loading logic for Mercurial. +#!/usr/bin/env python3 +# +# byteify-strings.py - transform string literals to be Python 3 safe # # Copyright 2015 Gregory Szorc # @@ -7,293 +9,217 @@ from __future__ import absolute_import +import argparse +import contextlib +import errno +import os import sys - -# Allow 'from mercurial import demandimport' to keep working. -import hgdemandimport -demandimport = hgdemandimport - -__all__ = [] - -# Python 3 uses a custom module loader that transforms source code between -# source file reading and compilation. This is done by registering a custom -# finder that changes the spec for Mercurial modules to use a custom loader. -if sys.version_info[0] >= 3: - import importlib - import importlib.abc - import io - import token - import tokenize +import tempfile +import token +import tokenize - class hgpathentryfinder(importlib.abc.MetaPathFinder): - """A sys.meta_path finder that uses a custom module loader.""" - def find_spec(self, fullname, path, target=None): - # Only handle Mercurial-related modules. - if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')): - return None - # don't try to parse binary - if fullname.startswith('mercurial.cext.'): - return None - # third-party packages are expected to be dual-version clean - if fullname.startswith('mercurial.thirdparty'): - return None - # zstd is already dual-version clean, don't try and mangle it - if fullname.startswith('mercurial.zstd'): - return None - # pywatchman is already dual-version clean, don't try and mangle it - if fullname.startswith('hgext.fsmonitor.pywatchman'): - return None +def adjusttokenpos(t, ofs): + """Adjust start/end column of the given token""" + return t._replace(start=(t.start[0], t.start[1] + ofs), + end=(t.end[0], t.end[1] + ofs)) + +def replacetokens(tokens, opts): + """Transform a stream of tokens from raw to Python 3. + + Returns a generator of possibly rewritten tokens. - # Try to find the module using other registered finders. - spec = None - for finder in sys.meta_path: - if finder == self: - continue - - spec = finder.find_spec(fullname, path, target=target) - if spec: - break - - # This is a Mercurial-related module but we couldn't find it - # using the previously-registered finders. This likely means - # the module doesn't exist. - if not spec: - return None + The input token list may be mutated as part of processing. However, + its changes do not necessarily match the output token stream. + """ + sysstrtokens = set() - # TODO need to support loaders from alternate specs, like zip - # loaders. - loader = hgloader(spec.name, spec.origin) - # Can't use util.safehasattr here because that would require - # importing util, and we're in import code. - if hasattr(spec.loader, 'loader'): # hasattr-py3-only - # This is a nested loader (maybe a lazy loader?) - spec.loader.loader = loader - else: - spec.loader = loader - return spec + # The following utility functions access the tokens list and i index of + # the for i, t enumerate(tokens) loop below + def _isop(j, *o): + """Assert that tokens[j] is an OP with one of the given values""" + try: + return tokens[j].type == token.OP and tokens[j].string in o + except IndexError: + return False - def replacetokens(tokens, fullname): - """Transform a stream of tokens from raw to Python 3. - - It is called by the custom module loading machinery to rewrite - source/tokens between source decoding and compilation. + def _findargnofcall(n): + """Find arg n of a call expression (start at 0) - Returns a generator of possibly rewritten tokens. + Returns index of the first token of that argument, or None if + there is not that many arguments. - The input token list may be mutated as part of processing. However, - its changes do not necessarily match the output token stream. + Assumes that token[i + 1] is '('. - REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION - OR CACHED FILES WON'T GET INVALIDATED PROPERLY. """ - futureimpline = False - - # The following utility functions access the tokens list and i index of - # the for i, t enumerate(tokens) loop below - def _isop(j, *o): - """Assert that tokens[j] is an OP with one of the given values""" - try: - return tokens[j].type == token.OP and tokens[j].string in o - except IndexError: - return False - - def _findargnofcall(n): - """Find arg n of a call expression (start at 0) - - Returns index of the first token of that argument, or None if - there is not that many arguments. - - Assumes that token[i + 1] is '('. + nested = 0 + for j in range(i + 2, len(tokens)): + if _isop(j, ')', ']', '}'): + # end of call, tuple, subscription or dict / set + nested -= 1 + if nested < 0: + return None + elif n == 0: + # this is the starting position of arg + return j + elif _isop(j, '(', '[', '{'): + nested += 1 + elif _isop(j, ',') and nested == 0: + n -= 1 - """ - nested = 0 - for j in range(i + 2, len(tokens)): - if _isop(j, ')', ']', '}'): - # end of call, tuple, subscription or dict / set - nested -= 1 - if nested < 0: - return None - elif n == 0: - # this is the starting position of arg - return j - elif _isop(j, '(', '[', '{'): - nested += 1 - elif _isop(j, ',') and nested == 0: - n -= 1 + return None + + def _ensuresysstr(j): + """Make sure the token at j is a system string + + Remember the given token so the string transformer won't add + the byte prefix. - return None + Ignores tokens that are not strings. Assumes bounds checking has + already been done. - def _ensureunicode(j): - """Make sure the token at j is a unicode string + """ + st = tokens[j] + if st.type == token.STRING and st.string.startswith(("'", '"')): + sysstrtokens.add(st) - This rewrites a string token to include the unicode literal prefix - so the string transformer won't add the byte prefix. - - Ignores tokens that are not strings. Assumes bounds checking has - already been done. + coldelta = 0 # column increment for new opening parens + coloffset = -1 # column offset for the current line (-1: TBD) + parens = [(0, 0, 0)] # stack of (line, end-column, column-offset) + for i, t in enumerate(tokens): + # Compute the column offset for the current line, such that + # the current line will be aligned to the last opening paren + # as before. + if coloffset < 0: + if t.start[1] == parens[-1][1]: + coloffset = parens[-1][2] + elif t.start[1] + 1 == parens[-1][1]: + # fix misaligned indent of s/util.Abort/error.Abort/ + coloffset = parens[-1][2] + (parens[-1][1] - t.start[1]) + else: + coloffset = 0 - """ - st = tokens[j] - if st.type == token.STRING and st.string.startswith(("'", '"')): - tokens[j] = st._replace(string='u%s' % st.string) - - for i, t in enumerate(tokens): - # Convert most string literals to byte literals. String literals - # in Python 2 are bytes. String literals in Python 3 are unicode. - # Most strings in Mercurial are bytes and unicode strings are rare. - # Rather than rewrite all string literals to use ``b''`` to indicate - # byte strings, we apply this token transformer to insert the ``b`` - # prefix nearly everywhere. - if t.type == token.STRING: - s = t.string + # Reset per-line attributes at EOL. + if t.type in (token.NEWLINE, tokenize.NL): + yield adjusttokenpos(t, coloffset) + coldelta = 0 + coloffset = -1 + continue - # Preserve docstrings as string literals. This is inconsistent - # with regular unprefixed strings. However, the - # "from __future__" parsing (which allows a module docstring to - # exist before it) doesn't properly handle the docstring if it - # is b''' prefixed, leading to a SyntaxError. We leave all - # docstrings as unprefixed to avoid this. This means Mercurial - # components touching docstrings need to handle unicode, - # unfortunately. - if s[0:3] in ("'''", '"""'): - yield t - continue + # Remember the last paren position. + if _isop(i, '(', '[', '{'): + parens.append(t.end + (coloffset + coldelta,)) + elif _isop(i, ')', ']', '}'): + parens.pop() - # If the first character isn't a quote, it is likely a string - # prefixing character (such as 'b', 'u', or 'r'. Ignore. - if s[0] not in ("'", '"'): - yield t - continue + # Convert most string literals to byte literals. String literals + # in Python 2 are bytes. String literals in Python 3 are unicode. + # Most strings in Mercurial are bytes and unicode strings are rare. + # Rather than rewrite all string literals to use ``b''`` to indicate + # byte strings, we apply this token transformer to insert the ``b`` + # prefix nearly everywhere. + if t.type == token.STRING and t not in sysstrtokens: + s = t.string - # String literal. Prefix to make a b'' string. - yield t._replace(string='b%s' % t.string) + # Preserve docstrings as string literals. This is inconsistent + # with regular unprefixed strings. However, the + # "from __future__" parsing (which allows a module docstring to + # exist before it) doesn't properly handle the docstring if it + # is b''' prefixed, leading to a SyntaxError. We leave all + # docstrings as unprefixed to avoid this. This means Mercurial + # components touching docstrings need to handle unicode, + # unfortunately. + if s[0:3] in ("'''", '"""'): + yield adjusttokenpos(t, coloffset) continue - # Insert compatibility imports at "from __future__ import" line. - # No '\n' should be added to preserve line numbers. - if (t.type == token.NAME and t.string == 'import' and - all(u.type == token.NAME for u in tokens[i - 2:i]) and - [u.string for u in tokens[i - 2:i]] == ['from', '__future__']): - futureimpline = True - if t.type == token.NEWLINE and futureimpline: - futureimpline = False - if fullname == 'mercurial.pycompat': - yield t - continue - r, c = t.start - l = (b'; from mercurial.pycompat import ' - b'delattr, getattr, hasattr, setattr, xrange, ' - b'open, unicode\n') - for u in tokenize.tokenize(io.BytesIO(l).readline): - if u.type in (tokenize.ENCODING, token.ENDMARKER): - continue - yield u._replace( - start=(r, c + u.start[1]), end=(r, c + u.end[1])) + # If the first character isn't a quote, it is likely a string + # prefixing character (such as 'b', 'u', or 'r'. Ignore. + if s[0] not in ("'", '"'): + yield adjusttokenpos(t, coloffset) + continue + + # String literal. Prefix to make a b'' string. + yield adjusttokenpos(t._replace(string='b%s' % t.string), + coloffset) + coldelta += 1 + continue + + # This looks like a function call. + if t.type == token.NAME and _isop(i + 1, '('): + fn = t.string + + # *attr() builtins don't accept byte strings to 2nd argument. + if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and + not _isop(i - 1, '.')): + arg1idx = _findargnofcall(1) + if arg1idx is not None: + _ensuresysstr(arg1idx) + + # .encode() and .decode() on str/bytes/unicode don't accept + # byte strings on Python 3. + elif fn in ('encode', 'decode') and _isop(i - 1, '.'): + for argn in range(2): + argidx = _findargnofcall(argn) + if argidx is not None: + _ensuresysstr(argidx) + + # It changes iteritems/values to items/values as they are not + # present in Python 3 world. + elif opts['dictiter'] and fn in ('iteritems', 'itervalues'): + yield adjusttokenpos(t._replace(string=fn[4:]), coloffset) continue - # This looks like a function call. - if t.type == token.NAME and _isop(i + 1, '('): - fn = t.string - - # *attr() builtins don't accept byte strings to 2nd argument. - if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and - not _isop(i - 1, '.')): - arg1idx = _findargnofcall(1) - if arg1idx is not None: - _ensureunicode(arg1idx) - - # .encode() and .decode() on str/bytes/unicode don't accept - # byte strings on Python 3. - elif fn in ('encode', 'decode') and _isop(i - 1, '.'): - for argn in range(2): - argidx = _findargnofcall(argn) - if argidx is not None: - _ensureunicode(argidx) - - # It changes iteritems/values to items/values as they are not - # present in Python 3 world. - elif fn in ('iteritems', 'itervalues'): - yield t._replace(string=fn[4:]) - continue + # Emit unmodified token. + yield adjusttokenpos(t, coloffset) - # Emit unmodified token. - yield t - - # Header to add to bytecode files. This MUST be changed when - # ``replacetoken`` or any mechanism that changes semantics of module - # loading is changed. Otherwise cached bytecode may get loaded without - # the new transformation mechanisms applied. - BYTECODEHEADER = b'HG\x00\x0a' - - class hgloader(importlib.machinery.SourceFileLoader): - """Custom module loader that transforms source code. +def process(fin, fout, opts): + tokens = tokenize.tokenize(fin.readline) + tokens = replacetokens(list(tokens), opts) + fout.write(tokenize.untokenize(tokens)) - When the source code is converted to a code object, we transform - certain patterns to be Python 3 compatible. This allows us to write code - that is natively Python 2 and compatible with Python 3 without - making the code excessively ugly. - - We do this by transforming the token stream between parse and compile. - - Implementing transformations invalidates caching assumptions made - by the built-in importer. The built-in importer stores a header on - saved bytecode files indicating the Python/bytecode version. If the - version changes, the cached bytecode is ignored. The Mercurial - transformations could change at any time. This means we need to check - that cached bytecode was generated with the current transformation - code or there could be a mismatch between cached bytecode and what - would be generated from this class. +def tryunlink(fname): + try: + os.unlink(fname) + except OSError as err: + if err.errno != errno.ENOENT: + raise - We supplement the bytecode caching layer by wrapping ``get_data`` - and ``set_data``. These functions are called when the - ``SourceFileLoader`` retrieves and saves bytecode cache files, - respectively. We simply add an additional header on the file. As - long as the version in this file is changed when semantics change, - cached bytecode should be invalidated when transformations change. - - The added header has the form ``HG``. That is a literal - ``HG`` with 2 binary bytes indicating the transformation version. - """ - def get_data(self, path): - data = super(hgloader, self).get_data(path) - - if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): - return data - - # There should be a header indicating the Mercurial transformation - # version. If it doesn't exist or doesn't match the current version, - # we raise an OSError because that is what - # ``SourceFileLoader.get_code()`` expects when loading bytecode - # paths to indicate the cached file is "bad." - if data[0:2] != b'HG': - raise OSError('no hg header') - if data[0:4] != BYTECODEHEADER: - raise OSError('hg header version mismatch') +@contextlib.contextmanager +def editinplace(fname): + n = os.path.basename(fname) + d = os.path.dirname(fname) + fp = tempfile.NamedTemporaryFile(prefix='.%s-' % n, suffix='~', dir=d, + delete=False) + try: + yield fp + fp.close() + if os.name == 'nt': + tryunlink(fname) + os.rename(fp.name, fname) + finally: + fp.close() + tryunlink(fp.name) - return data[4:] - - def set_data(self, path, data, *args, **kwargs): - if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): - data = BYTECODEHEADER + data - - return super(hgloader, self).set_data(path, data, *args, **kwargs) +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('-i', '--inplace', action='store_true', default=False, + help='edit files in place') + ap.add_argument('--dictiter', action='store_true', default=False, + help='rewrite iteritems() and itervalues()'), + ap.add_argument('files', metavar='FILE', nargs='+', help='source file') + args = ap.parse_args() + opts = { + 'dictiter': args.dictiter, + } + for fname in args.files: + if args.inplace: + with editinplace(fname) as fout: + with open(fname, 'rb') as fin: + process(fin, fout, opts) + else: + with open(fname, 'rb') as fin: + fout = sys.stdout.buffer + process(fin, fout, opts) - def source_to_code(self, data, path): - """Perform token transformation before compilation.""" - buf = io.BytesIO(data) - tokens = tokenize.tokenize(buf.readline) - data = tokenize.untokenize(replacetokens(list(tokens), self.name)) - # Python's built-in importer strips frames from exceptions raised - # for this code. Unfortunately, that mechanism isn't extensible - # and our frame will be blamed for the import failure. There - # are extremely hacky ways to do frame stripping. We haven't - # implemented them because they are very ugly. - return super(hgloader, self).source_to_code(data, path) - - # We automagically register our custom importer as a side-effect of - # loading. This is necessary to ensure that any entry points are able - # to import mercurial.* modules without having to perform this - # registration themselves. - if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path): - # meta_path is used before any implicit finders and before sys.path. - sys.meta_path.insert(0, hgpathentryfinder()) +if __name__ == '__main__': + main() diff --git a/contrib/check-code.py b/contrib/check-code.py --- a/contrib/check-code.py +++ b/contrib/check-code.py @@ -340,7 +340,8 @@ pypats = [ (r'\butil\.Abort\b', "directly use error.Abort"), (r'^@(\w*\.)?cachefunc', "module-level @cachefunc is risky, please avoid"), (r'^import atexit', "don't use atexit, use ui.atexit"), - (r'^import Queue', "don't use Queue, use util.queue + util.empty"), + (r'^import Queue', "don't use Queue, use pycompat.queue.Queue + " + "pycompat.queue.Empty"), (r'^import cStringIO', "don't use cStringIO.StringIO, use util.stringio"), (r'^import urllib', "don't use urllib, use util.urlreq/util.urlerr"), (r'^import SocketServer', "don't use SockerServer, use util.socketserver"), diff --git a/contrib/dockerlib.sh b/contrib/dockerlib.sh deleted file mode 100644 --- a/contrib/dockerlib.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -eu - -# This function exists to set up the DOCKER variable and verify that -# it's the binary we expect. It also verifies that the docker service -# is running on the system and we can talk to it. -function checkdocker() { - if which docker.io >> /dev/null 2>&1 ; then - DOCKER=docker.io - elif which docker >> /dev/null 2>&1 ; then - DOCKER=docker - else - echo "Error: docker must be installed" - exit 1 - fi - - $DOCKER -h 2> /dev/null | grep -q Jansens && { echo "Error: $DOCKER is the Docking System Tray - install docker.io instead"; exit 1; } - $DOCKER version | grep -Eq "^Client( version)?:" || { echo "Error: unexpected output from \"$DOCKER version\""; exit 1; } - $DOCKER version | grep -Eq "^Server( version)?:" || { echo "Error: could not get docker server version - check it is running and your permissions"; exit 1; } -} - -# Construct a container and leave its name in $CONTAINER for future use. -function initcontainer() { - [ "$1" ] || { echo "Error: platform name must be specified"; exit 1; } - - DFILE="$ROOTDIR/contrib/docker/$1" - [ -f "$DFILE" ] || { echo "Error: docker file $DFILE not found"; exit 1; } - - CONTAINER="hg-dockerrpm-$1" - DBUILDUSER=build - ( - cat $DFILE - if [ $(uname) = "Darwin" ] ; then - # The builder is using boot2docker on OS X, so we're going to - # *guess* the uid of the user inside the VM that is actually - # running docker. This is *very likely* to fail at some point. - echo RUN useradd $DBUILDUSER -u 1000 - else - echo RUN groupadd $DBUILDUSER -g `id -g` -o - echo RUN useradd $DBUILDUSER -u `id -u` -g $DBUILDUSER -o - fi - ) | $DOCKER build --build-arg http_proxy --build-arg https_proxy --tag $CONTAINER - -} diff --git a/contrib/fixpax.py b/contrib/fixpax.py deleted file mode 100755 --- a/contrib/fixpax.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -# fixpax - fix ownership in bdist_mpkg output -# -# Copyright 2015 Matt Mackall -# -# This software may be used and distributed according to the terms of the -# MIT license (http://opensource.org/licenses/MIT) - -"""Set file ownership to 0 in an Archive.pax.gz. -Suitable for fixing files bdist_mpkg output: -*.mpkg/Contents/Packages/*.pkg/Contents/Archive.pax.gz -""" - -from __future__ import absolute_import, print_function -import gzip -import os -import sys - -def fixpax(iname, oname): - i = gzip.GzipFile(iname) - o = gzip.GzipFile(oname, "w") - - while True: - magic = i.read(6) - dev = i.read(6) - ino = i.read(6) - mode = i.read(6) - i.read(6) # uid - i.read(6) # gid - nlink = i.read(6) - rdev = i.read(6) - mtime = i.read(11) - namesize = i.read(6) - filesize = i.read(11) - name = i.read(int(namesize, 8)) - data = i.read(int(filesize, 8)) - - o.write(magic) - o.write(dev) - o.write(ino) - o.write(mode) - o.write("000000") - o.write("000000") - o.write(nlink) - o.write(rdev) - o.write(mtime) - o.write(namesize) - o.write(filesize) - o.write(name) - o.write(data) - - if name.startswith("TRAILER!!!"): - o.write(i.read()) - break - - o.close() - i.close() - -if __name__ == '__main__': - for iname in sys.argv[1:]: - print('fixing file ownership in %s' % iname) - oname = sys.argv[1] + '.tmp' - fixpax(iname, oname) - os.rename(oname, iname) diff --git a/contrib/fuzz/Makefile b/contrib/fuzz/Makefile --- a/contrib/fuzz/Makefile +++ b/contrib/fuzz/Makefile @@ -1,40 +1,81 @@ +CC = clang +CXX = clang++ + +all: bdiff mpatch xdiff + +fuzzutil.o: fuzzutil.cc fuzzutil.h + $(CXX) $(CXXFLAGS) -g -O1 -fsanitize=fuzzer-no-link,address \ + -std=c++17 \ + -I../../mercurial -c -o fuzzutil.o fuzzutil.cc + +fuzzutil-oss-fuzz.o: fuzzutil.cc fuzzutil.h + $(CXX) $(CXXFLAGS) -std=c++17 \ + -I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc + bdiff.o: ../../mercurial/bdiff.c - clang -g -O1 -fsanitize=fuzzer-no-link,address -c -o bdiff.o \ + $(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \ ../../mercurial/bdiff.c -bdiff: bdiff.cc bdiff.o - clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ - -I../../mercurial bdiff.cc bdiff.o -o bdiff +bdiff: bdiff.cc bdiff.o fuzzutil.o + $(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ + -std=c++17 \ + -I../../mercurial bdiff.cc bdiff.o fuzzutil.o -o bdiff bdiff-oss-fuzz.o: ../../mercurial/bdiff.c - $$CC $$CFLAGS -c -o bdiff-oss-fuzz.o ../../mercurial/bdiff.c + $(CC) $(CFLAGS) -c -o bdiff-oss-fuzz.o ../../mercurial/bdiff.c + +bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o fuzzutil-oss-fuzz.o + $(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial bdiff.cc \ + bdiff-oss-fuzz.o fuzzutil-oss-fuzz.o -lFuzzingEngine -o \ + $$OUT/bdiff_fuzzer + +mpatch.o: ../../mercurial/mpatch.c + $(CC) -g -O1 -fsanitize=fuzzer-no-link,address -c -o mpatch.o \ + ../../mercurial/mpatch.c -bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o - $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial bdiff.cc \ - bdiff-oss-fuzz.o -lFuzzingEngine -o $$OUT/bdiff_fuzzer +mpatch: CXXFLAGS += -std=c++17 +mpatch: mpatch.cc mpatch.o fuzzutil.o + $(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ + -I../../mercurial mpatch.cc mpatch.o fuzzutil.o -o mpatch + +mpatch-oss-fuzz.o: ../../mercurial/mpatch.c + $(CC) $(CFLAGS) -c -o mpatch-oss-fuzz.o ../../mercurial/mpatch.c + +mpatch_fuzzer: mpatch.cc mpatch-oss-fuzz.o fuzzutil-oss-fuzz.o + $(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial mpatch.cc \ + mpatch-oss-fuzz.o fuzzutil-oss-fuzz.o -lFuzzingEngine -o \ + $$OUT/mpatch_fuzzer + +mpatch_corpus.zip: + python mpatch_corpus.py $$OUT/mpatch_fuzzer_seed_corpus.zip x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h - clang -g -O1 -fsanitize=fuzzer-no-link,address -c \ + $(CC) -g -O1 -fsanitize=fuzzer-no-link,address -c \ -o $@ \ $< -xdiff: xdiff.cc xdiffi.o xprepare.o xutils.o - clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ +xdiff: CXXFLAGS += -std=c++17 +xdiff: xdiff.cc xdiffi.o xprepare.o xutils.o fuzzutil.o + $(CXX) $(CXXFLAGS) -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ -I../../mercurial xdiff.cc \ - xdiffi.o xprepare.o xutils.o -o xdiff + xdiffi.o xprepare.o xutils.o fuzzutil.o -o xdiff fuzz-x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h - $$CC $$CFLAGS -c \ + $(CC) $(CFLAGS) -c \ -o $@ \ $< -xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o - $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial xdiff.cc \ - fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o \ +xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o + $(CXX) $(CXXFLAGS) -std=c++17 -I../../mercurial xdiff.cc \ + fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \ -lFuzzingEngine -o $$OUT/xdiff_fuzzer -all: bdiff xdiff +clean: + $(RM) *.o *_fuzzer \ + bdiff \ + mpatch \ + xdiff -oss-fuzz: bdiff_fuzzer xdiff_fuzzer +oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer -.PHONY: all oss-fuzz +.PHONY: all clean oss-fuzz diff --git a/contrib/fuzz/bdiff.cc b/contrib/fuzz/bdiff.cc --- a/contrib/fuzz/bdiff.cc +++ b/contrib/fuzz/bdiff.cc @@ -6,30 +6,25 @@ * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. */ +#include #include +#include "fuzzutil.h" + extern "C" { #include "bdiff.h" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { - if (!Size) { + auto maybe_inputs = SplitInputs(Data, Size); + if (!maybe_inputs) { return 0; } - // figure out a random point in [0, Size] to split our input. - size_t split = Data[0] / 255.0 * Size; - - // left input to diff is data[1:split] - const uint8_t *left = Data + 1; - // which has len split-1 - size_t left_size = split - 1; - // right starts at the next byte after left ends - const uint8_t *right = left + left_size; - size_t right_size = Size - split; + auto inputs = std::move(maybe_inputs.value()); struct bdiff_line *a, *b; - int an = bdiff_splitlines((const char *)left, split - 1, &a); - int bn = bdiff_splitlines((const char *)right, right_size, &b); + int an = bdiff_splitlines(inputs.left.get(), inputs.left_size, &a); + int bn = bdiff_splitlines(inputs.right.get(), inputs.right_size, &b); struct bdiff_hunk l; bdiff_diff(a, an, b, bn, &l); free(a); diff --git a/contrib/fuzz/fuzzutil.cc b/contrib/fuzz/fuzzutil.cc new file mode 100644 --- /dev/null +++ b/contrib/fuzz/fuzzutil.cc @@ -0,0 +1,27 @@ +#include "fuzzutil.h" + +#include +#include + +contrib::optional SplitInputs(const uint8_t *Data, size_t Size) +{ + if (!Size) { + return contrib::nullopt; + } + // figure out a random point in [0, Size] to split our input. + size_t left_size = (Data[0] / 255.0) * (Size - 1); + + // Copy inputs to new allocations so if bdiff over-reads + // AddressSanitizer can detect it. + std::unique_ptr left(new char[left_size]); + std::memcpy(left.get(), Data + 1, left_size); + // right starts at the next byte after left ends + size_t right_size = Size - (left_size + 1); + std::unique_ptr right(new char[right_size]); + std::memcpy(right.get(), Data + 1 + left_size, right_size); + LOG(2) << "inputs are " << left_size << " and " << right_size + << " bytes" << std::endl; + two_inputs result = {std::move(right), right_size, std::move(left), + left_size}; + return result; +} diff --git a/contrib/fuzz/fuzzutil.h b/contrib/fuzz/fuzzutil.h new file mode 100644 --- /dev/null +++ b/contrib/fuzz/fuzzutil.h @@ -0,0 +1,47 @@ +#ifndef CONTRIB_FUZZ_FUZZUTIL_H +#define CONTRIB_FUZZ_FUZZUTIL_H +#include +#include +#include + +/* Try and use std::optional, but failing that assume we'll have a + * workable https://abseil.io/ install on the include path to get + * their backport of std::optional. */ +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#include +#define CONTRIB_FUZZ_HAVE_STD_OPTIONAL +#endif +#endif +#ifdef CONTRIB_FUZZ_HAVE_STD_OPTIONAL +namespace contrib +{ +using std::nullopt; +using std::optional; +} /* namespace contrib */ +#else +#include "third_party/absl/types/optional.h" +namespace contrib +{ +using absl::nullopt; +using absl::optional; +} /* namespace contrib */ +#endif + +/* set DEBUG to 1 for a few debugging prints, or 2 for a lot */ +#define DEBUG 0 +#define LOG(level) \ + if (level <= DEBUG) \ + std::cout + +struct two_inputs { + std::unique_ptr right; + size_t right_size; + std::unique_ptr left; + size_t left_size; +}; + +/* Split a non-zero-length input into two inputs. */ +contrib::optional SplitInputs(const uint8_t *Data, size_t Size); + +#endif /* CONTRIB_FUZZ_FUZZUTIL_H */ diff --git a/contrib/fuzz/mpatch.cc b/contrib/fuzz/mpatch.cc new file mode 100644 --- /dev/null +++ b/contrib/fuzz/mpatch.cc @@ -0,0 +1,122 @@ +/* + * mpatch.cc - fuzzer harness for mpatch.c + * + * Copyright 2018, Google Inc. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + */ +#include +#include +#include +#include +#include + +#include "fuzzutil.h" + +// To avoid having too many OOMs from the fuzzer infrastructure, we'll +// skip patch application if the resulting fulltext would be bigger +// than 10MiB. +#define MAX_OUTPUT_SIZE 10485760 + +extern "C" { +#include "bitmanipulation.h" +#include "mpatch.h" + +struct mpatchbin { + std::unique_ptr data; + size_t len; +}; + +static mpatch_flist *getitem(void *vbins, ssize_t pos) +{ + std::vector *bins = (std::vector *)vbins; + const mpatchbin &bin = bins->at(pos + 1); + struct mpatch_flist *res; + LOG(2) << "mpatch_decode " << bin.len << std::endl; + if (mpatch_decode(bin.data.get(), bin.len, &res) < 0) + return NULL; + return res; +} + +// input format: +// u8 number of inputs +// one u16 for each input, its length +// the inputs +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) +{ + if (!Size) { + return 0; + } + // First byte of data is how many texts we expect, first text + // being the base the rest being the deltas. + ssize_t numtexts = Data[0]; + if (numtexts < 2) { + // No point if we don't have at least a base text and a delta... + return 0; + } + // Each text will be described by a byte for how long it + // should be, so give up if we don't have enough. + if ((Size - 1) < (numtexts * 2)) { + return 0; + } + size_t consumed = 1 + (numtexts * 2); + LOG(2) << "input contains " << Size << std::endl; + LOG(2) << numtexts << " texts, consuming " << consumed << std::endl; + std::vector bins; + bins.reserve(numtexts); + for (int i = 0; i < numtexts; ++i) { + mpatchbin bin; + size_t nthsize = getbeuint16((char *)Data + 1 + (2 * i)); + LOG(2) << "text " << i << " is " << nthsize << std::endl; + char *start = (char *)Data + consumed; + consumed += nthsize; + if (consumed > Size) { + LOG(2) << "ran out of data, consumed " << consumed + << " of " << Size << std::endl; + return 0; + } + bin.len = nthsize; + bin.data.reset(new char[nthsize]); + memcpy(bin.data.get(), start, nthsize); + bins.push_back(std::move(bin)); + } + LOG(2) << "mpatch_flist" << std::endl; + struct mpatch_flist *patch = + mpatch_fold(&bins, getitem, 0, numtexts - 1); + if (!patch) { + return 0; + } + LOG(2) << "mpatch_calcsize" << std::endl; + ssize_t outlen = mpatch_calcsize(bins[0].len, patch); + LOG(2) << "outlen " << outlen << std::endl; + if (outlen < 0 || outlen > MAX_OUTPUT_SIZE) { + goto cleanup; + } + { + char *dest = (char *)malloc(outlen); + LOG(2) << "expecting " << outlen << " total bytes at " + << (void *)dest << std::endl; + mpatch_apply(dest, bins[0].data.get(), bins[0].len, patch); + free(dest); + LOG(1) << "applied a complete patch" << std::endl; + } +cleanup: + mpatch_lfree(patch); + return 0; +} + +#ifdef HG_FUZZER_INCLUDE_MAIN +int main(int argc, char **argv) +{ + // One text, one patch. + const char data[] = "\x02\x00\0x1\x00\x0d" + // base text + "a" + // binary delta that will append a single b + "\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01b"; + return LLVMFuzzerTestOneInput((const uint8_t *)data, 19); +} +#endif + +} // extern "C" diff --git a/contrib/fuzz/mpatch_corpus.py b/contrib/fuzz/mpatch_corpus.py new file mode 100644 --- /dev/null +++ b/contrib/fuzz/mpatch_corpus.py @@ -0,0 +1,345 @@ +from __future__ import absolute_import, print_function + +import argparse +import struct +import zipfile + +from mercurial import ( + hg, + ui as uimod, +) + +ap = argparse.ArgumentParser() +ap.add_argument("out", metavar="some.zip", type=str, nargs=1) +args = ap.parse_args() + +class deltafrag(object): + def __init__(self, start, end, data): + self.start = start + self.end = end + self.data = data + + def __str__(self): + return struct.pack( + ">lll", self.start, self.end, len(self.data)) + self.data + +class delta(object): + def __init__(self, frags): + self.frags = frags + + def __str__(self): + return ''.join(str(f) for f in self.frags) + +class corpus(object): + + def __init__(self, base, deltas): + self.base = base + self.deltas = deltas + + def __str__(self): + deltas = [str(d) for d in self.deltas] + parts = ( + [ + struct.pack(">B", len(deltas) + 1), + struct.pack(">H", len(self.base)), + ] + + [struct.pack(">H", len(d)) for d in deltas] + + [self.base] + + deltas + ) + return "".join(parts) + +with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf: + # Manually constructed entries + zf.writestr( + "one_delta_applies", + str(corpus('a', [delta([deltafrag(0, 1, 'b')])])) + ) + zf.writestr( + "one_delta_starts_late", + str(corpus('a', [delta([deltafrag(3, 1, 'b')])])) + ) + zf.writestr( + "one_delta_ends_late", + str(corpus('a', [delta([deltafrag(0, 20, 'b')])])) + ) + + try: + # Generated from repo data + r = hg.repository(uimod.ui(), '../..') + fl = r.file('mercurial/manifest.py') + rl = getattr(fl, '_revlog', fl) + bins = rl._chunks(rl._deltachain(10)[0]) + zf.writestr('manifest_py_rev_10', + str(corpus(bins[0], bins[1:]))) + except: # skip this, so no re-raises + print('skipping seed file from repo data') + # Automatically discovered by running the fuzzer + zf.writestr( + "mpatch_decode_old_overread", "\x02\x00\x00\x00\x02\x00\x00\x00" + ) + # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=8876 + zf.writestr( + "mpatch_ossfuzz_getbe32_ubsan", + "\x02\x00\x00\x00\x0c \xff\xff\xff\xff ") + zf.writestr( + "mpatch_apply_over_memcpy", + '\x13\x01\x00\x05\xd0\x00\x00\x00\x00\x00\x00\x00\x00\n \x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x8c\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00A\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x18' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x94\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xfa\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00]\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00se\x00\x00' + '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + '\x00\x00\x00\x00') diff --git a/contrib/fuzz/xdiff.cc b/contrib/fuzz/xdiff.cc --- a/contrib/fuzz/xdiff.cc +++ b/contrib/fuzz/xdiff.cc @@ -10,6 +10,8 @@ #include #include +#include "fuzzutil.h" + extern "C" { int hunk_consumer(long a1, long a2, long b1, long b2, void *priv) @@ -20,21 +22,17 @@ int hunk_consumer(long a1, long a2, long int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { - if (!Size) { + auto maybe_inputs = SplitInputs(Data, Size); + if (!maybe_inputs) { return 0; } - // figure out a random point in [0, Size] to split our input. - size_t split = Data[0] / 255.0 * Size; - + auto inputs = std::move(maybe_inputs.value()); mmfile_t a, b; - // `a` input to diff is data[1:split] - a.ptr = (char *)Data + 1; - // which has len split-1 - a.size = split - 1; - // `b` starts at the next byte after `a` ends - b.ptr = a.ptr + a.size; - b.size = Size - split; + a.ptr = inputs.left.get(); + a.size = inputs.left_size; + b.ptr = inputs.right.get(); + b.size = inputs.right_size; xpparam_t xpp = { XDF_INDENT_HEURISTIC, /* flags */ }; diff --git a/contrib/genosxversion.py b/contrib/genosxversion.py --- a/contrib/genosxversion.py +++ b/contrib/genosxversion.py @@ -117,9 +117,9 @@ def main(argv): return with open(opts.versionfile) as f: for l in f: - if l.startswith('version = '): + if l.startswith('version = b'): # version number is entire line minus the quotes - ver = l[len('version = ') + 1:-2] + ver = l[len('version = b') + 1:-2] break if opts.paranoid: print(paranoidver(ver)) diff --git a/contrib/hg-ssh b/contrib/hg-ssh --- a/contrib/hg-ssh +++ b/contrib/hg-ssh @@ -39,10 +39,14 @@ import hgdemandimport ; hgdemandimport.e from mercurial import ( dispatch, + pycompat, ui as uimod, ) def main(): + # Prevent insertion/deletion of CRs + dispatch.initstdio() + cwd = os.getcwd() readonly = False args = sys.argv[1:] @@ -66,15 +70,15 @@ def main(): path = cmdargv[2] repo = os.path.normpath(os.path.join(cwd, os.path.expanduser(path))) if repo in allowed_paths: - cmd = ['-R', repo, 'serve', '--stdio'] + cmd = [b'-R', pycompat.fsencode(repo), b'serve', b'--stdio'] req = dispatch.request(cmd) if readonly: if not req.ui: req.ui = uimod.ui.load() - req.ui.setconfig('hooks', 'pretxnopen.hg-ssh', - 'python:__main__.rejectpush', 'hg-ssh') - req.ui.setconfig('hooks', 'prepushkey.hg-ssh', - 'python:__main__.rejectpush', 'hg-ssh') + req.ui.setconfig(b'hooks', b'pretxnopen.hg-ssh', + b'python:__main__.rejectpush', b'hg-ssh') + req.ui.setconfig(b'hooks', b'prepushkey.hg-ssh', + b'python:__main__.rejectpush', b'hg-ssh') dispatch.dispatch(req) else: sys.stderr.write('Illegal repository "%s"\n' % repo) @@ -84,7 +88,7 @@ def main(): sys.exit(255) def rejectpush(ui, **kwargs): - ui.warn(("Permission denied\n")) + ui.warn((b"Permission denied\n")) # mercurial hooks use unix process conventions for hook return values # so a truthy return means failure return True diff --git a/Makefile b/contrib/packaging/Makefile copy from Makefile copy to contrib/packaging/Makefile --- a/Makefile +++ b/contrib/packaging/Makefile @@ -1,323 +1,144 @@ -# If you want to change PREFIX, do not just edit it below. The changed -# value wont get passed on to recursive make calls. You should instead -# override the variable on the command like: -# -# % make PREFIX=/opt/ install +$(eval HGROOT := $(shell cd ../..; pwd)) + +DEBIAN_CODENAMES := \ + jessie \ + stretch \ + buster + +UBUNTU_CODENAMES := \ + trusty \ + xenial \ + artful \ + bionic \ -export PREFIX=/usr/local -PYTHON=python -$(eval HGROOT := $(shell pwd)) -HGPYTHONS ?= $(HGROOT)/build/pythons -PURE= -PYFILES:=$(shell find mercurial hgext doc -name '*.py') -DOCFILES=mercurial/help/*.txt -export LANGUAGE=C -export LC_ALL=C -TESTFLAGS ?= $(shell echo $$HGTESTFLAGS) -OSXVERSIONFLAGS ?= $(shell echo $$OSXVERSIONFLAGS) +FEDORA_RELEASES := \ + 20 \ + 21 \ + 28 -# Set this to e.g. "mingw32" to use a non-default compiler. -COMPILER= +CENTOS_RELEASES := \ + 5 \ + 6 \ + 7 -COMPILERFLAG_tmp_ = -COMPILERFLAG_tmp_${COMPILER} ?= -c $(COMPILER) -COMPILERFLAG=${COMPILERFLAG_tmp_${COMPILER}} +# Build a Python for these CentOS releases. +CENTOS_WITH_PYTHON_RELEASES := 5 6 help: - @echo 'Commonly used make targets:' - @echo ' all - build program and documentation' - @echo ' install - install program and man pages to $$PREFIX ($(PREFIX))' - @echo ' install-home - install with setup.py install --home=$$HOME ($(HOME))' - @echo ' local - build for inplace usage' - @echo ' tests - run all tests in the automatic test suite' - @echo ' test-foo - run only specified tests (e.g. test-merge1.t)' - @echo ' dist - run all tests and create a source tarball in dist/' - @echo ' clean - remove files created by other targets' - @echo ' (except installed files or dist source tarball)' - @echo ' update-pot - update i18n/hg.pot' - @echo - @echo 'Example for a system-wide installation under /usr/local:' - @echo ' make all && su -c "make install" && hg version' - @echo - @echo 'Example for a local installation (usable in this directory):' - @echo ' make local && ./hg version' - -all: build doc - -local: - $(PYTHON) setup.py $(PURE) \ - build_py -c -d . \ - build_ext $(COMPILERFLAG) -i \ - build_hgexe $(COMPILERFLAG) -i \ - build_mo - env HGRCPATH= $(PYTHON) hg version - -build: - $(PYTHON) setup.py $(PURE) build $(COMPILERFLAG) - -wheel: - FORCE_SETUPTOOLS=1 $(PYTHON) setup.py $(PURE) bdist_wheel $(COMPILERFLAG) - -doc: - $(MAKE) -C doc - -cleanbutpackages: - -$(PYTHON) setup.py clean --all # ignore errors from this command - find contrib doc hgext hgext3rd i18n mercurial tests hgdemandimport \ - \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';' - rm -f MANIFEST MANIFEST.in hgext/__index__.py tests/*.err - rm -f mercurial/__modulepolicy__.py - if test -d .hg; then rm -f mercurial/__version__.py; fi - rm -rf build mercurial/locale - $(MAKE) -C doc clean - $(MAKE) -C contrib/chg distclean - -clean: cleanbutpackages - rm -rf packages - -install: install-bin install-doc - -install-bin: build - $(PYTHON) setup.py $(PURE) install --root="$(DESTDIR)/" --prefix="$(PREFIX)" --force - -install-doc: doc - cd doc && $(MAKE) $(MFLAGS) install - -install-home: install-home-bin install-home-doc - -install-home-bin: build - $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force + @echo 'Packaging Make Targets' + @echo '' + @echo 'docker-centos{$(strip $(CENTOS_RELEASES))}' + @echo ' Build an RPM for a specific CentOS version using Docker.' + @echo '' + @echo 'docker-debian-{$(strip $(DEBIAN_CODENAMES))}' + @echo ' Build Debian packages specific to a Debian distro using Docker.' + @echo '' + @echo 'docker-fedora{$(strip $(FEDORA_RELEASES))}' + @echo ' Build an RPM for a specific Fedora version using Docker.' + @echo '' + @echo 'docker-ubuntu-{$(strip $(UBUNTU_CODENAMES))}' + @echo ' Build Debian package specific to an Ubuntu distro using Docker.' + @echo '' + @echo 'docker-ubuntu-{$(strip $(UBUNTU_CODENAMES))}-ppa' + @echo ' Build a source-only Debian package specific to an Ubuntu distro' + @echo ' using Docker.' + @echo '' + @echo 'linux-wheels' + @echo ' Build Linux manylinux wheels using Docker.' + @echo '' + @echo 'linux-wheels-{x86_64, i686}' + @echo ' Build Linux manylinux wheels for a specific architecture using Docker' + @echo '' + @echo 'deb' + @echo ' Build a Debian package locally targeting the current system' + @echo '' + @echo 'ppa' + @echo ' Build a Debian source package locally targeting the current system' + @echo '' + @echo 'centos{$(strip $(CENTOS_RELEASES))}' + @echo ' Build an RPM for a specific CentOS version locally' + @echo '' + @echo 'fedora{$(strip $(FEDORA_RELEASES))}' + @echo ' Build an RPM for a specific Fedora version locally' -install-home-doc: doc - cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install - -MANIFEST-doc: - $(MAKE) -C doc MANIFEST - -MANIFEST.in: MANIFEST-doc - hg manifest | sed -e 's/^/include /' > MANIFEST.in - echo include mercurial/__version__.py >> MANIFEST.in - sed -e 's/^/include /' < doc/MANIFEST >> MANIFEST.in - -dist: tests dist-notests - -dist-notests: doc MANIFEST.in - TAR_OPTIONS="--owner=root --group=root --mode=u+w,go-w,a+rX-s" $(PYTHON) setup.py -q sdist +.PHONY: help -check: tests - -tests: - cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) - -test-%: - cd tests && $(PYTHON) run-tests.py $(TESTFLAGS) $@ - -testpy-%: - @echo Looking for Python $* in $(HGPYTHONS) - [ -e $(HGPYTHONS)/$*/bin/python ] || ( \ - cd $$(mktemp --directory --tmpdir) && \ - $(MAKE) -f $(HGROOT)/contrib/Makefile.python PYTHONVER=$* PREFIX=$(HGPYTHONS)/$* python ) - cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS) - -check-code: - hg manifest | xargs python contrib/check-code.py +.PHONY: deb +deb: + ./builddeb -format-c: - clang-format --style file -i \ - `hg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-ignorelist"'` - -update-pot: i18n/hg.pot +.PHONY: ppa +ppa: + ./builddeb --source-only -i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext - $(PYTHON) i18n/hggettext mercurial/commands.py \ - hgext/*.py hgext/*/__init__.py \ - mercurial/fileset.py mercurial/revset.py \ - mercurial/templatefilters.py \ - mercurial/templatefuncs.py \ - mercurial/templatekw.py \ - mercurial/filemerge.py \ - mercurial/hgweb/webcommands.py \ - mercurial/util.py \ - $(DOCFILES) > i18n/hg.pot.tmp - # All strings marked for translation in Mercurial contain - # ASCII characters only. But some files contain string - # literals like this '\037\213'. xgettext thinks it has to - # parse them even though they are not marked for translation. - # Extracting with an explicit encoding of ISO-8859-1 will make - # xgettext "parse" and ignore them. - echo $(PYFILES) | xargs \ - xgettext --package-name "Mercurial" \ - --msgid-bugs-address "" \ - --copyright-holder "Matt Mackall and others" \ - --from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \ - -d hg -p i18n -o hg.pot.tmp - $(PYTHON) i18n/posplit i18n/hg.pot.tmp - # The target file is not created before the last step. So it never is in - # an intermediate state. - mv -f i18n/hg.pot.tmp i18n/hg.pot +# Debian targets. +define debian_targets = +.PHONY: docker-debian-$(1) +docker-debian-$(1): + ./dockerdeb debian $(1) + +endef -%.po: i18n/hg.pot - # work on a temporary copy for never having a half completed target - cp $@ $@.tmp - msgmerge --no-location --update $@.tmp $^ - mv -f $@.tmp $@ +$(foreach codename,$(DEBIAN_CODENAMES),$(eval $(call debian_targets,$(codename)))) -# Packaging targets +# Ubuntu targets. +define ubuntu_targets = +.PHONY: docker-ubuntu-$(1) +docker-ubuntu-$(1): + ./dockerdeb ubuntu $(1) + +.PHONY: docker-ubuntu-$(1)-ppa +docker-ubuntu-$(1)-ppa: + ./dockerdeb ubuntu $(1) --source-only -osx: - rm -rf build/mercurial - /usr/bin/python2.7 setup.py install --optimize=1 \ - --root=build/mercurial/ --prefix=/usr/local/ \ - --install-lib=/Library/Python/2.7/site-packages/ - make -C doc all install DESTDIR="$(PWD)/build/mercurial/" - # Place a bogon .DS_Store file in the target dir so we can be - # sure it doesn't get included in the final package. - touch build/mercurial/.DS_Store - # install zsh completions - this location appears to be - # searched by default as of macOS Sierra. - install -d build/mercurial/usr/local/share/zsh/site-functions/ - install -m 0644 contrib/zsh_completion build/mercurial/usr/local/share/zsh/site-functions/_hg - # install bash completions - there doesn't appear to be a - # place that's searched by default for bash, so we'll follow - # the lead of Apple's git install and just put it in a - # location of our own. - install -d build/mercurial/usr/local/hg/contrib/ - install -m 0644 contrib/bash_completion build/mercurial/usr/local/hg/contrib/hg-completion.bash - make -C contrib/chg \ - HGPATH=/usr/local/bin/hg \ - PYTHON=/usr/bin/python2.7 \ - HGEXTDIR=/Library/Python/2.7/site-packages/hgext \ - DESTDIR=../../build/mercurial \ - PREFIX=/usr/local \ - clean install - mkdir -p $${OUTPUTDIR:-dist} - HGVER=$$(python contrib/genosxversion.py $(OSXVERSIONFLAGS) build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py) && \ - OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \ - pkgbuild --filter \\.DS_Store --root build/mercurial/ \ - --identifier org.mercurial-scm.mercurial \ - --version "$${HGVER}" \ - build/mercurial.pkg && \ - productbuild --distribution contrib/macosx/distribution.xml \ - --package-path build/ \ - --version "$${HGVER}" \ - --resources contrib/macosx/ \ - "$${OUTPUTDIR:-dist/}"/Mercurial-"$${HGVER}"-macosx"$${OSXVER}".pkg +endef + +$(foreach codename,$(UBUNTU_CODENAMES),$(eval $(call ubuntu_targets,$(codename)))) -deb: - contrib/builddeb - -ppa: - contrib/builddeb --source-only - -contrib/docker/debian-%: contrib/docker/debian.template - sed "s/__CODENAME__/$*/" $< > $@ - -docker-debian-jessie: contrib/docker/debian-jessie - contrib/dockerdeb debian jessie - -docker-debian-stretch: contrib/docker/debian-stretch - contrib/dockerdeb debian stretch - -contrib/docker/ubuntu-%: contrib/docker/ubuntu.template - sed "s/__CODENAME__/$*/" $< > $@ - -docker-ubuntu-trusty: contrib/docker/ubuntu-trusty - contrib/dockerdeb ubuntu trusty - -docker-ubuntu-trusty-ppa: contrib/docker/ubuntu-trusty - contrib/dockerdeb ubuntu trusty --source-only +# Fedora targets. +define fedora_targets +.PHONY: fedora$(1) +fedora$(1): + mkdir -p $$(HGROOT)/packages/fedora$(1) + ./buildrpm + cp $$(HGROOT)/contrib/packaging/rpmbuild/RPMS/*/* $$(HGROOT)/packages/fedora$(1) + cp $$(HGROOT)/contrib/packaging/rpmbuild/SRPMS/* $$(HGROOT)/packages/fedora$(1) + rm -rf $(HGROOT)/rpmbuild -docker-ubuntu-xenial: contrib/docker/ubuntu-xenial - contrib/dockerdeb ubuntu xenial - -docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial - contrib/dockerdeb ubuntu xenial --source-only - -docker-ubuntu-artful: contrib/docker/ubuntu-artful - contrib/dockerdeb ubuntu artful - -docker-ubuntu-artful-ppa: contrib/docker/ubuntu-artful - contrib/dockerdeb ubuntu artful --source-only +.PHONY: docker-fedora$(1) +docker-fedora$(1): + mkdir -p $$(HGROOT)/packages/fedora$(1) + ./dockerrpm fedora$(1) -docker-ubuntu-bionic: contrib/docker/ubuntu-bionic - contrib/dockerdeb ubuntu bionic - -docker-ubuntu-bionic-ppa: contrib/docker/ubuntu-bionic - contrib/dockerdeb ubuntu bionic --source-only +endef -fedora20: - mkdir -p packages/fedora20 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/fedora20 - cp rpmbuild/SRPMS/* packages/fedora20 - rm -rf rpmbuild - -docker-fedora20: - mkdir -p packages/fedora20 - contrib/dockerrpm fedora20 +$(foreach release,$(FEDORA_RELEASES),$(eval $(call fedora_targets,$(release)))) -fedora21: - mkdir -p packages/fedora21 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/fedora21 - cp rpmbuild/SRPMS/* packages/fedora21 - rm -rf rpmbuild - -docker-fedora21: - mkdir -p packages/fedora21 - contrib/dockerrpm fedora21 - -centos5: - mkdir -p packages/centos5 - contrib/buildrpm --withpython - cp rpmbuild/RPMS/*/* packages/centos5 - cp rpmbuild/SRPMS/* packages/centos5 - -docker-centos5: - mkdir -p packages/centos5 - contrib/dockerrpm centos5 --withpython +# CentOS targets. +define centos_targets +.PHONY: centos$(1) +centos$(1): + mkdir -p $$(HGROOT)/packages/centos$(1) + ./buildrpm $$(if $$(filter $(1),$$(CENTOS_WITH_PYTHON_RELEASES)),--withpython) + cp $$(HGROOT)/rpmbuild/RPMS/*/* $$(HGROOT)/packages/centos$(1) + cp $$(HGROOT)/rpmbuild/SRPMS/* $$(HGROOT)/packages/centos$(1) -centos6: - mkdir -p packages/centos6 - contrib/buildrpm --withpython - cp rpmbuild/RPMS/*/* packages/centos6 - cp rpmbuild/SRPMS/* packages/centos6 - -docker-centos6: - mkdir -p packages/centos6 - contrib/dockerrpm centos6 --withpython +.PHONY: docker-centos$(1) +docker-centos$(1): + mkdir -p $$(HGROOT)/packages/centos$(1) + ./dockerrpm centos$(1) $$(if $$(filter $(1),$$(CENTOS_WITH_PYTHON_RELEASES)),--withpython) -centos7: - mkdir -p packages/centos7 - contrib/buildrpm - cp rpmbuild/RPMS/*/* packages/centos7 - cp rpmbuild/SRPMS/* packages/centos7 +endef -docker-centos7: - mkdir -p packages/centos7 - contrib/dockerrpm centos7 +$(foreach release,$(CENTOS_RELEASES),$(eval $(call centos_targets,$(release)))) +.PHONY: linux-wheels linux-wheels: linux-wheels-x86_64 linux-wheels-i686 +.PHONY: linux-wheels-x86_64 linux-wheels-x86_64: - docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`:/src quay.io/pypa/manylinux1_x86_64 /src/contrib/build-linux-wheels.sh - -linux-wheels-i686: - docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`:/src quay.io/pypa/manylinux1_i686 linux32 /src/contrib/build-linux-wheels.sh + docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`/../..:/src quay.io/pypa/manylinux1_x86_64 /src/contrib/packaging/build-linux-wheels.sh -.PHONY: help all local build doc cleanbutpackages clean install install-bin \ - install-doc install-home install-home-bin install-home-doc \ - dist dist-notests check tests check-code format-c update-pot \ - osx deb ppa \ - docker-debian-jessie \ - docker-debian-stretch \ - docker-ubuntu-trusty docker-ubuntu-trusty-ppa \ - docker-ubuntu-xenial docker-ubuntu-xenial-ppa \ - docker-ubuntu-artful docker-ubuntu-artful-ppa \ - docker-ubuntu-bionic docker-ubuntu-bionic-ppa \ - fedora20 docker-fedora20 \ - fedora21 docker-fedora21 \ - centos5 docker-centos5 \ - centos6 docker-centos6 \ - centos7 docker-centos7 \ - linux-wheels +.PHONY: linux-wheels-i686 +linux-wheels-i686: + docker run -e "HGTEST_JOBS=$(shell nproc)" --rm -ti -v `pwd`/../..:/src quay.io/pypa/manylinux1_i686 linux32 /src/contrib/packaging/build-linux-wheels.sh diff --git a/contrib/build-linux-wheels.sh b/contrib/packaging/build-linux-wheels.sh rename from contrib/build-linux-wheels.sh rename to contrib/packaging/build-linux-wheels.sh --- a/contrib/build-linux-wheels.sh +++ b/contrib/packaging/build-linux-wheels.sh @@ -30,5 +30,5 @@ for PYBIN in $PYTHON_TARGETS; do # Install mercurial wheel as root "${PYBIN}/pip" install mercurial --no-index -f /src/wheelhouse # But run tests as hgbuilder user (non-root) - su hgbuilder -c "\"${PYBIN}/python\" /io/tests/run-tests.py --with-hg=\"${PYBIN}/hg\" --blacklist=/io/contrib/linux-wheel-centos5-blacklist" + su hgbuilder -c "\"${PYBIN}/python\" /io/tests/run-tests.py --with-hg=\"${PYBIN}/hg\" --blacklist=/io/contrib/packaging/linux-wheel-centos5-blacklist" done diff --git a/contrib/builddeb b/contrib/packaging/builddeb rename from contrib/builddeb rename to contrib/packaging/builddeb --- a/contrib/builddeb +++ b/contrib/packaging/builddeb @@ -6,6 +6,8 @@ . $(dirname $0)/packagelib.sh +ROOTDIR=$(cd $(dirname $0)/../.. > /dev/null; pwd) + BUILD=1 CLEANUP=1 DISTID=`(lsb_release -is 2> /dev/null | tr '[:upper:]' '[:lower:]') || echo debian` @@ -73,7 +75,7 @@ if [ "$BUILD" ]; then exit 1 fi - cp -r "$PWD"/contrib/debian debian + cp -r "$ROOTDIR"/contrib/packaging/debian debian sed -i.tmp "s/__VERSION__/$debver/" $changelog sed -i.tmp "s/__DATE__/$(date --rfc-2822)/" $changelog @@ -82,7 +84,7 @@ if [ "$BUILD" ]; then # remove the node from the version string SRCFILE="mercurial_$(echo $debver | sed "s,-$node,,").orig.tar.gz" - "$PWD/hg" archive $SRCFILE + "$ROOTDIR/hg" archive $SRCFILE mv $SRCFILE .. debuild -us -uc -i -I $DEBFLAGS if [ $? != 0 ]; then diff --git a/contrib/buildrpm b/contrib/packaging/buildrpm rename from contrib/buildrpm rename to contrib/packaging/buildrpm --- a/contrib/buildrpm +++ b/contrib/packaging/buildrpm @@ -35,9 +35,9 @@ while [ "$1" ]; do esac done -cd "`dirname $0`/.." +cd "`dirname $0`/../.." -specfile=$PWD/contrib/mercurial.spec +specfile=$PWD/contrib/packaging/mercurial.spec if [ ! -f $specfile ]; then echo "Cannot find $specfile!" 1>&2 exit 1 diff --git a/contrib/debian/cacerts.rc b/contrib/packaging/debian/cacerts.rc rename from contrib/debian/cacerts.rc rename to contrib/packaging/debian/cacerts.rc diff --git a/contrib/debian/changelog b/contrib/packaging/debian/changelog rename from contrib/debian/changelog rename to contrib/packaging/debian/changelog diff --git a/contrib/debian/compat b/contrib/packaging/debian/compat rename from contrib/debian/compat rename to contrib/packaging/debian/compat diff --git a/contrib/debian/control b/contrib/packaging/debian/control rename from contrib/debian/control rename to contrib/packaging/debian/control diff --git a/contrib/debian/copyright b/contrib/packaging/debian/copyright rename from contrib/debian/copyright rename to contrib/packaging/debian/copyright diff --git a/contrib/debian/default-tools.rc b/contrib/packaging/debian/default-tools.rc rename from contrib/debian/default-tools.rc rename to contrib/packaging/debian/default-tools.rc diff --git a/contrib/debian/hgkpath.rc b/contrib/packaging/debian/hgkpath.rc rename from contrib/debian/hgkpath.rc rename to contrib/packaging/debian/hgkpath.rc diff --git a/contrib/debian/rules b/contrib/packaging/debian/rules rename from contrib/debian/rules rename to contrib/packaging/debian/rules --- a/contrib/debian/rules +++ b/contrib/packaging/debian/rules @@ -35,7 +35,7 @@ override_dh_install: mkdir -p "$(CURDIR)"/debian/mercurial-common/usr/share/mercurial cp contrib/hgk "$(CURDIR)"/debian/mercurial-common/usr/share/mercurial mkdir -p "$(CURDIR)"/debian/mercurial-common/etc/mercurial/hgrc.d/ - cp contrib/debian/*.rc "$(CURDIR)"/debian/mercurial-common/etc/mercurial/hgrc.d/ + cp contrib/packaging/debian/*.rc "$(CURDIR)"/debian/mercurial-common/etc/mercurial/hgrc.d/ # completions mkdir -p "$(CURDIR)"/debian/mercurial-common/usr/share/bash-completion/completions cp contrib/bash_completion "$(CURDIR)"/debian/mercurial-common/usr/share/bash-completion/completions/hg diff --git a/contrib/docker/centos5 b/contrib/packaging/docker/centos5 rename from contrib/docker/centos5 rename to contrib/packaging/docker/centos5 --- a/contrib/docker/centos5 +++ b/contrib/packaging/docker/centos5 @@ -1,4 +1,8 @@ FROM centos:centos5 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN \ sed -i 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo && \ sed -i 's/^#\(baseurl=\)http:\/\/mirror.centos.org\/centos/\1http:\/\/vault.centos.org/' /etc/yum.repos.d/*.repo && \ diff --git a/contrib/docker/centos6 b/contrib/packaging/docker/centos6 rename from contrib/docker/centos6 rename to contrib/packaging/docker/centos6 --- a/contrib/docker/centos6 +++ b/contrib/packaging/docker/centos6 @@ -1,4 +1,8 @@ FROM centos:centos6 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN yum install -y \ gcc \ gettext \ diff --git a/contrib/docker/centos7 b/contrib/packaging/docker/centos7 rename from contrib/docker/centos7 rename to contrib/packaging/docker/centos7 --- a/contrib/docker/centos7 +++ b/contrib/packaging/docker/centos7 @@ -1,4 +1,8 @@ FROM centos:centos7 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN yum install -y \ gcc \ gettext \ diff --git a/contrib/docker/debian.template b/contrib/packaging/docker/debian.template rename from contrib/docker/debian.template rename to contrib/packaging/docker/debian.template --- a/contrib/docker/debian.template +++ b/contrib/packaging/docker/debian.template @@ -1,4 +1,8 @@ -FROM debian:__CODENAME__ +FROM debian:%CODENAME% + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN apt-get update && apt-get install -y \ build-essential \ debhelper \ diff --git a/contrib/docker/fedora20 b/contrib/packaging/docker/fedora20 rename from contrib/docker/fedora20 rename to contrib/packaging/docker/fedora20 --- a/contrib/docker/fedora20 +++ b/contrib/packaging/docker/fedora20 @@ -1,4 +1,8 @@ FROM fedora:20 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN yum install -y \ gcc \ gettext \ diff --git a/contrib/docker/fedora21 b/contrib/packaging/docker/fedora21 rename from contrib/docker/fedora21 rename to contrib/packaging/docker/fedora21 --- a/contrib/docker/fedora21 +++ b/contrib/packaging/docker/fedora21 @@ -1,4 +1,8 @@ FROM fedora:21 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN yum install -y \ gcc \ gettext \ diff --git a/contrib/packaging/docker/fedora28 b/contrib/packaging/docker/fedora28 new file mode 100644 --- /dev/null +++ b/contrib/packaging/docker/fedora28 @@ -0,0 +1,15 @@ +FROM fedora:28 + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + +RUN dnf install -y \ + gcc \ + gettext \ + make \ + python-devel \ + python-docutils \ + rpm-build + +# For creating repo meta data +RUN dnf install -y createrepo diff --git a/contrib/docker/ubuntu.template b/contrib/packaging/docker/ubuntu.template rename from contrib/docker/ubuntu.template rename to contrib/packaging/docker/ubuntu.template --- a/contrib/docker/ubuntu.template +++ b/contrib/packaging/docker/ubuntu.template @@ -1,4 +1,8 @@ -FROM ubuntu:__CODENAME__ +FROM ubuntu:%CODENAME% + +RUN groupadd -g 1000 build && \ + useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build + RUN apt-get update && apt-get install -y \ build-essential \ debhelper \ diff --git a/contrib/dockerdeb b/contrib/packaging/dockerdeb rename from contrib/dockerdeb rename to contrib/packaging/dockerdeb --- a/contrib/dockerdeb +++ b/contrib/packaging/dockerdeb @@ -1,12 +1,9 @@ #!/bin/bash -eu -. $(dirname $0)/dockerlib.sh . $(dirname $0)/packagelib.sh BUILDDIR=$(dirname $0) -export ROOTDIR=$(cd $BUILDDIR/.. > /dev/null; pwd) - -checkdocker +export ROOTDIR=$(cd $BUILDDIR/../.. > /dev/null; pwd) DISTID="$1" CODENAME="$2" @@ -14,21 +11,29 @@ PLATFORM="$1-$2" shift; shift # extra params are passed to build process OUTPUTDIR=${OUTPUTDIR:=$ROOTDIR/packages/$PLATFORM} +CONTAINER=hg-docker-$PLATFORM -initcontainer $PLATFORM +DOCKER=$($BUILDDIR/hg-docker docker-path) + +$BUILDDIR/hg-docker build \ + --build-arg CODENAME=$CODENAME \ + $BUILDDIR/docker/$DISTID.template \ + $CONTAINER # debuild only appears to be able to save built debs etc to .., so we # have to share the .. of the current directory with the docker # container and hope it's writable. Whee. -dn=$(basename $PWD) +dn=$(basename $ROOTDIR) + +DBUILDUSER=build if [ $(uname) = "Darwin" ] ; then $DOCKER run -u $DBUILDUSER --rm -v $PWD/..:/mnt $CONTAINER \ sh -c "cd /mnt/$dn && make clean && make local" fi -$DOCKER run -u $DBUILDUSER --rm -v $PWD/..:/mnt $CONTAINER \ - sh -c "cd /mnt/$dn && DEB_BUILD_OPTIONS='${DEB_BUILD_OPTIONS:=}' contrib/builddeb --build --distid $DISTID --codename $CODENAME $@" -contrib/builddeb --cleanup --distid $DISTID --codename $CODENAME +$DOCKER run -u $DBUILDUSER --rm -v $ROOTDIR/..:/mnt $CONTAINER \ + sh -c "cd /mnt/$dn && DEB_BUILD_OPTIONS='${DEB_BUILD_OPTIONS:=}' contrib/packaging/builddeb --build --distid $DISTID --codename $CODENAME $@" +contrib/packaging/builddeb --cleanup --distid $DISTID --codename $CODENAME if [ $(uname) = "Darwin" ] ; then $DOCKER run -u $DBUILDUSER --rm -v $PWD/..:/mnt $CONTAINER \ sh -c "cd /mnt/$dn && make clean" diff --git a/contrib/dockerrpm b/contrib/packaging/dockerrpm rename from contrib/dockerrpm rename to contrib/packaging/dockerrpm --- a/contrib/dockerrpm +++ b/contrib/packaging/dockerrpm @@ -1,21 +1,23 @@ #!/bin/bash -e -. $(dirname $0)/dockerlib.sh - BUILDDIR=$(dirname $0) -export ROOTDIR=$(cd $BUILDDIR/..; pwd) - -checkdocker +export ROOTDIR=$(cd $BUILDDIR/../..; pwd) PLATFORM="$1" shift # extra params are passed to buildrpm -initcontainer $PLATFORM +DOCKER=$($BUILDDIR/hg-docker docker-path) + +CONTAINER=hg-docker-$PLATFORM + +$BUILDDIR/hg-docker build $BUILDDIR/docker/$PLATFORM $CONTAINER RPMBUILDDIR=$ROOTDIR/packages/$PLATFORM -contrib/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $* +$ROOTDIR/contrib/packaging/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $* DSHARED=/mnt/shared +DBUILDUSER=build + $DOCKER run -e http_proxy -e https_proxy -u $DBUILDUSER --rm -v $RPMBUILDDIR:$DSHARED $CONTAINER \ rpmbuild --define "_topdir $DSHARED" -ba $DSHARED/SPECS/mercurial.spec --clean diff --git a/contrib/packaging/hg-docker b/contrib/packaging/hg-docker new file mode 100755 --- /dev/null +++ b/contrib/packaging/hg-docker @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +# +# Copyright 2018 Gregory Szorc +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import argparse +import pathlib +import shutil +import subprocess +import sys + +def get_docker() -> str: + docker = shutil.which('docker.io') or shutil.which('docker') + if not docker: + print('could not find docker executable') + return 1 + + try: + out = subprocess.check_output([docker, '-h'], stderr=subprocess.STDOUT) + + if b'Jansens' in out: + print('%s is the Docking System Tray; try installing docker.io' % + docker) + sys.exit(1) + except subprocess.CalledProcessError as e: + print('error calling `%s -h`: %s' % (docker, e.output)) + sys.exit(1) + + out = subprocess.check_output([docker, 'version'], + stderr=subprocess.STDOUT) + + lines = out.splitlines() + if not any(l.startswith((b'Client:', b'Client version:')) for l in lines): + print('`%s version` does not look like Docker' % docker) + sys.exit(1) + + if not any(l.startswith((b'Server:', b'Server version:')) for l in lines): + print('`%s version` does not look like Docker' % docker) + sys.exit(1) + + return docker + +def get_dockerfile(path: pathlib.Path, args: list) -> bytes: + with path.open('rb') as fh: + df = fh.read() + + for k, v in args: + df = df.replace(b'%%%s%%' % k, v) + + return df + +def build_docker_image(dockerfile: pathlib.Path, params: list, tag: str): + """Build a Docker image from a templatized Dockerfile.""" + docker = get_docker() + + dockerfile_path = pathlib.Path(dockerfile) + + dockerfile = get_dockerfile(dockerfile_path, params) + + print('building Dockerfile:') + print(dockerfile.decode('utf-8', 'replace')) + + args = [ + docker, + 'build', + '--build-arg', 'http_proxy', + '--build-arg', 'https_proxy', + '--tag', tag, + '-', + ] + + print('executing: %r' % args) + subprocess.run(args, input=dockerfile, check=True) + +def command_build(args): + build_args = [] + for arg in args.build_arg: + k, v = arg.split('=', 1) + build_args.append((k.encode('utf-8'), v.encode('utf-8'))) + + build_docker_image(pathlib.Path(args.dockerfile), + build_args, + args.tag) + +def command_docker(args): + print(get_docker()) + +def main() -> int: + parser = argparse.ArgumentParser() + + subparsers = parser.add_subparsers(title='subcommands') + + build = subparsers.add_parser('build', help='Build a Docker image') + build.set_defaults(func=command_build) + build.add_argument('--build-arg', action='append', default=[], + help='Substitution to perform in Dockerfile; ' + 'format: key=value') + build.add_argument('dockerfile', help='path to Dockerfile to use') + build.add_argument('tag', help='Tag to apply to created image') + + docker = subparsers.add_parser('docker-path', help='Resolve path to Docker') + docker.set_defaults(func=command_docker) + + args = parser.parse_args() + + return args.func(args) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/contrib/linux-wheel-centos5-blacklist b/contrib/packaging/linux-wheel-centos5-blacklist rename from contrib/linux-wheel-centos5-blacklist rename to contrib/packaging/linux-wheel-centos5-blacklist diff --git a/contrib/macosx/Readme.html b/contrib/packaging/macosx/Readme.html rename from contrib/macosx/Readme.html rename to contrib/packaging/macosx/Readme.html diff --git a/contrib/macosx/Welcome.html b/contrib/packaging/macosx/Welcome.html rename from contrib/macosx/Welcome.html rename to contrib/packaging/macosx/Welcome.html diff --git a/contrib/macosx/distribution.xml b/contrib/packaging/macosx/distribution.xml rename from contrib/macosx/distribution.xml rename to contrib/packaging/macosx/distribution.xml --- a/contrib/macosx/distribution.xml +++ b/contrib/packaging/macosx/distribution.xml @@ -4,7 +4,7 @@ org.mercurial-scm - + (?:.*)D(?P[1-9][0-9]*))$', re.M) + b'^Differential Revision:\s*(?P(?:.*)D(?P[1-9][0-9]*))$', re.M) def getoldnodedrevmap(repo, nodelist): """find previous nodes that has been sent to Phabricator @@ -254,16 +287,16 @@ def getoldnodedrevmap(repo, nodelist): # Check commit message m = _differentialrevisiondescre.search(ctx.description()) if m: - toconfirm[node] = (1, set(precnodes), int(m.group('id'))) + toconfirm[node] = (1, set(precnodes), int(m.group(b'id'))) # Double check if tags are genuine by collecting all old nodes from # Phabricator, and expect precursors overlap with it. if toconfirm: drevs = [drev for force, precs, drev in toconfirm.values()] - alldiffs = callconduit(unfi, 'differential.querydiffs', - {'revisionIDs': drevs}) + alldiffs = callconduit(unfi, b'differential.querydiffs', + {b'revisionIDs': drevs}) getnode = lambda d: bin(encoding.unitolocal( - getdiffmeta(d).get(r'node', ''))) or None + getdiffmeta(d).get(r'node', b''))) or None for newnode, (force, precset, drev) in toconfirm.items(): diffs = [d for d in alldiffs.values() if int(d[r'revisionID']) == drev] @@ -274,11 +307,11 @@ def getoldnodedrevmap(repo, nodelist): # Ignore if precursors (Phabricator and local repo) do not overlap, # and force is not set (when commit message says nothing) if not force and not bool(phprecset & precset): - tagname = 'D%d' % drev + tagname = b'D%d' % drev tags.tag(repo, tagname, nullid, message=None, user=None, date=None, local=True) - unfi.ui.warn(_('D%s: local tag removed - does not match ' - 'Differential history\n') % drev) + unfi.ui.warn(_(b'D%s: local tag removed - does not match ' + b'Differential history\n') % drev) continue # Find the last node using Phabricator metadata, and make sure it @@ -307,40 +340,40 @@ def creatediff(ctx): repo = ctx.repo() repophid = getrepophid(repo) # Create a "Differential Diff" via "differential.createrawdiff" API - params = {'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))} + params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))} if repophid: - params['repositoryPHID'] = repophid - diff = callconduit(repo, 'differential.createrawdiff', params) + params[b'repositoryPHID'] = repophid + diff = callconduit(repo, b'differential.createrawdiff', params) if not diff: - raise error.Abort(_('cannot create diff for %s') % ctx) + raise error.Abort(_(b'cannot create diff for %s') % ctx) return diff def writediffproperties(ctx, diff): """write metadata to diff so patches could be applied losslessly""" params = { - 'diff_id': diff[r'id'], - 'name': 'hg:meta', - 'data': json.dumps({ - 'user': ctx.user(), - 'date': '%d %d' % ctx.date(), - 'node': ctx.hex(), - 'parent': ctx.p1().hex(), + b'diff_id': diff[r'id'], + b'name': b'hg:meta', + b'data': json.dumps({ + b'user': ctx.user(), + b'date': b'%d %d' % ctx.date(), + b'node': ctx.hex(), + b'parent': ctx.p1().hex(), }), } - callconduit(ctx.repo(), 'differential.setdiffproperty', params) + callconduit(ctx.repo(), b'differential.setdiffproperty', params) params = { - 'diff_id': diff[r'id'], - 'name': 'local:commits', - 'data': json.dumps({ + b'diff_id': diff[r'id'], + b'name': b'local:commits', + b'data': json.dumps({ ctx.hex(): { - 'author': stringutil.person(ctx.user()), - 'authorEmail': stringutil.email(ctx.user()), - 'time': ctx.date()[0], + b'author': stringutil.person(ctx.user()), + b'authorEmail': stringutil.email(ctx.user()), + b'time': ctx.date()[0], }, }), } - callconduit(ctx.repo(), 'differential.setdiffproperty', params) + callconduit(ctx.repo(), b'differential.setdiffproperty', params) def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None, olddiff=None, actions=None): @@ -365,7 +398,7 @@ def createdifferentialrevision(ctx, revi transactions = [] if neednewdiff: diff = creatediff(ctx) - transactions.append({'type': 'update', 'value': diff[r'phid']}) + transactions.append({b'type': b'update', b'value': diff[r'phid']}) else: # Even if we don't need to upload a new diff because the patch content # does not change. We might still need to update its metadata so @@ -379,52 +412,52 @@ def createdifferentialrevision(ctx, revi # existing revision (revid is not None) since that introduces visible # churns (someone edited "Summary" twice) on the web page. if parentrevid and revid is None: - summary = 'Depends on D%s' % parentrevid - transactions += [{'type': 'summary', 'value': summary}, - {'type': 'summary', 'value': ' '}] + summary = b'Depends on D%s' % parentrevid + transactions += [{b'type': b'summary', b'value': summary}, + {b'type': b'summary', b'value': b' '}] if actions: transactions += actions # Parse commit message and update related fields. desc = ctx.description() - info = callconduit(repo, 'differential.parsecommitmessage', - {'corpus': desc}) + info = callconduit(repo, b'differential.parsecommitmessage', + {b'corpus': desc}) for k, v in info[r'fields'].items(): - if k in ['title', 'summary', 'testPlan']: - transactions.append({'type': k, 'value': v}) + if k in [b'title', b'summary', b'testPlan']: + transactions.append({b'type': k, b'value': v}) - params = {'transactions': transactions} + params = {b'transactions': transactions} if revid is not None: # Update an existing Differential Revision - params['objectIdentifier'] = revid + params[b'objectIdentifier'] = revid - revision = callconduit(repo, 'differential.revision.edit', params) + revision = callconduit(repo, b'differential.revision.edit', params) if not revision: - raise error.Abort(_('cannot create revision for %s') % ctx) + raise error.Abort(_(b'cannot create revision for %s') % ctx) return revision, diff def userphids(repo, names): """convert user names to PHIDs""" - query = {'constraints': {'usernames': names}} - result = callconduit(repo, 'user.search', query) + query = {b'constraints': {b'usernames': names}} + result = callconduit(repo, b'user.search', query) # username not found is not an error of the API. So check if we have missed # some names here. data = result[r'data'] resolved = set(entry[r'fields'][r'username'] for entry in data) unresolved = set(names) - resolved if unresolved: - raise error.Abort(_('unknown username: %s') - % ' '.join(sorted(unresolved))) + raise error.Abort(_(b'unknown username: %s') + % b' '.join(sorted(unresolved))) return [entry[r'phid'] for entry in data] -@command('phabsend', - [('r', 'rev', [], _('revisions to send'), _('REV')), - ('', 'amend', True, _('update commit messages')), - ('', 'reviewer', [], _('specify reviewers')), - ('', 'confirm', None, _('ask for confirmation before sending'))], - _('REV [OPTIONS]')) +@command(b'phabsend', + [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')), + (b'', b'amend', True, _(b'update commit messages')), + (b'', b'reviewer', [], _(b'specify reviewers')), + (b'', b'confirm', None, _(b'ask for confirmation before sending'))], + _(b'REV [OPTIONS]')) def phabsend(ui, repo, *revs, **opts): """upload changesets to Phabricator @@ -452,29 +485,29 @@ def phabsend(ui, repo, *revs, **opts): phabsend will check obsstore and the above association to decide whether to update an existing Differential Revision, or create a new one. """ - revs = list(revs) + opts.get('rev', []) + revs = list(revs) + opts.get(b'rev', []) revs = scmutil.revrange(repo, revs) if not revs: - raise error.Abort(_('phabsend requires at least one changeset')) - if opts.get('amend'): + raise error.Abort(_(b'phabsend requires at least one changeset')) + if opts.get(b'amend'): cmdutil.checkunfinished(repo) # {newnode: (oldnode, olddiff, olddrev} oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs]) - confirm = ui.configbool('phabsend', 'confirm') - confirm |= bool(opts.get('confirm')) + confirm = ui.configbool(b'phabsend', b'confirm') + confirm |= bool(opts.get(b'confirm')) if confirm: confirmed = _confirmbeforesend(repo, revs, oldmap) if not confirmed: - raise error.Abort(_('phabsend cancelled')) + raise error.Abort(_(b'phabsend cancelled')) actions = [] - reviewers = opts.get('reviewer', []) + reviewers = opts.get(b'reviewer', []) if reviewers: phids = userphids(repo, reviewers) - actions.append({'type': 'reviewers.add', 'value': phids}) + actions.append({b'type': b'reviewers.add', b'value': phids}) drevids = [] # [int] diffmap = {} # {newnode: diff} @@ -483,54 +516,54 @@ def phabsend(ui, repo, *revs, **opts): # can provide dependency relationship lastrevid = None for rev in revs: - ui.debug('sending rev %d\n' % rev) + ui.debug(b'sending rev %d\n' % rev) ctx = repo[rev] # Get Differential Revision ID oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None)) - if oldnode != ctx.node() or opts.get('amend'): + if oldnode != ctx.node() or opts.get(b'amend'): # Create or update Differential Revision revision, diff = createdifferentialrevision( ctx, revid, lastrevid, oldnode, olddiff, actions) diffmap[ctx.node()] = diff newrevid = int(revision[r'object'][r'id']) if revid: - action = 'updated' + action = b'updated' else: - action = 'created' + action = b'created' # Create a local tag to note the association, if commit message # does not have it already m = _differentialrevisiondescre.search(ctx.description()) - if not m or int(m.group('id')) != newrevid: - tagname = 'D%d' % newrevid + if not m or int(m.group(b'id')) != newrevid: + tagname = b'D%d' % newrevid tags.tag(repo, tagname, ctx.node(), message=None, user=None, date=None, local=True) else: # Nothing changed. But still set "newrevid" so the next revision # could depend on this one. newrevid = revid - action = 'skipped' + action = b'skipped' actiondesc = ui.label( - {'created': _('created'), - 'skipped': _('skipped'), - 'updated': _('updated')}[action], - 'phabricator.action.%s' % action) - drevdesc = ui.label('D%s' % newrevid, 'phabricator.drev') - nodedesc = ui.label(bytes(ctx), 'phabricator.node') - desc = ui.label(ctx.description().split('\n')[0], 'phabricator.desc') - ui.write(_('%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, - desc)) + {b'created': _(b'created'), + b'skipped': _(b'skipped'), + b'updated': _(b'updated')}[action], + b'phabricator.action.%s' % action) + drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev') + nodedesc = ui.label(bytes(ctx), b'phabricator.node') + desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc') + ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc, + desc)) drevids.append(newrevid) lastrevid = newrevid # Update commit messages and remove tags - if opts.get('amend'): + if opts.get(b'amend'): unfi = repo.unfiltered() - drevs = callconduit(repo, 'differential.query', {'ids': drevids}) - with repo.wlock(), repo.lock(), repo.transaction('phabsend'): - wnode = unfi['.'].node() + drevs = callconduit(repo, b'differential.query', {b'ids': drevids}) + with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'): + wnode = unfi[b'.'].node() mapping = {} # {oldnode: [newnode]} for i, rev in enumerate(revs): old = unfi[rev] @@ -546,23 +579,25 @@ def phabsend(ui, repo, *revs, **opts): new = context.metadataonlyctx( repo, old, parents=parents, text=newdesc, user=old.user(), date=old.date(), extra=old.extra()) + newnode = new.commit() + mapping[old.node()] = [newnode] # Update diff property writediffproperties(unfi[newnode], diffmap[old.node()]) # Remove local tags since it's no longer necessary - tagname = 'D%d' % drevid + tagname = b'D%d' % drevid if tagname in repo.tags(): tags.tag(repo, tagname, nullid, message=None, user=None, date=None, local=True) - scmutil.cleanupnodes(repo, mapping, 'phabsend') + scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True) if wnode in mapping: unfi.setparents(mapping[wnode][0]) # Map from "hg:meta" keys to header understood by "hg import". The order is # consistent with "hg export" output. -_metanamemap = util.sortdict([(r'user', 'User'), (r'date', 'Date'), - (r'node', 'Node ID'), (r'parent', 'Parent ')]) +_metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'), + (r'node', b'Node ID'), (r'parent', b'Parent ')]) def _confirmbeforesend(repo, revs, oldmap): url, token = readurltoken(repo) @@ -572,68 +607,69 @@ def _confirmbeforesend(repo, revs, oldma desc = ctx.description().splitlines()[0] oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None)) if drevid: - drevdesc = ui.label('D%s' % drevid, 'phabricator.drev') + drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev') else: - drevdesc = ui.label(_('NEW'), 'phabricator.drev') + drevdesc = ui.label(_(b'NEW'), b'phabricator.drev') - ui.write(_('%s - %s: %s\n') % (drevdesc, - ui.label(bytes(ctx), 'phabricator.node'), - ui.label(desc, 'phabricator.desc'))) + ui.write(_(b'%s - %s: %s\n') + % (drevdesc, + ui.label(bytes(ctx), b'phabricator.node'), + ui.label(desc, b'phabricator.desc'))) - if ui.promptchoice(_('Send the above changes to %s (yn)?' - '$$ &Yes $$ &No') % url): + if ui.promptchoice(_(b'Send the above changes to %s (yn)?' + b'$$ &Yes $$ &No') % url): return False return True -_knownstatusnames = {'accepted', 'needsreview', 'needsrevision', 'closed', - 'abandoned'} +_knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed', + b'abandoned'} def _getstatusname(drev): """get normalized status name from a Differential Revision""" - return drev[r'statusName'].replace(' ', '').lower() + return drev[r'statusName'].replace(b' ', b'').lower() # Small language to specify differential revisions. Support symbols: (), :X, # +, and -. _elements = { # token-type: binding-strength, primary, prefix, infix, suffix - '(': (12, None, ('group', 1, ')'), None, None), - ':': (8, None, ('ancestors', 8), None, None), - '&': (5, None, None, ('and_', 5), None), - '+': (4, None, None, ('add', 4), None), - '-': (4, None, None, ('sub', 4), None), - ')': (0, None, None, None, None), - 'symbol': (0, 'symbol', None, None, None), - 'end': (0, None, None, None, None), + b'(': (12, None, (b'group', 1, b')'), None, None), + b':': (8, None, (b'ancestors', 8), None, None), + b'&': (5, None, None, (b'and_', 5), None), + b'+': (4, None, None, (b'add', 4), None), + b'-': (4, None, None, (b'sub', 4), None), + b')': (0, None, None, None, None), + b'symbol': (0, b'symbol', None, None, None), + b'end': (0, None, None, None, None), } def _tokenize(text): view = memoryview(text) # zero-copy slice - special = '():+-& ' + special = b'():+-& ' pos = 0 length = len(text) while pos < length: - symbol = ''.join(itertools.takewhile(lambda ch: ch not in special, - view[pos:])) + symbol = b''.join(itertools.takewhile(lambda ch: ch not in special, + view[pos:])) if symbol: - yield ('symbol', symbol, pos) + yield (b'symbol', symbol, pos) pos += len(symbol) else: # special char, ignore space - if text[pos] != ' ': + if text[pos] != b' ': yield (text[pos], None, pos) pos += 1 - yield ('end', None, pos) + yield (b'end', None, pos) def _parse(text): tree, pos = parser.parser(_elements).parse(_tokenize(text)) if pos != len(text): - raise error.ParseError('invalid token', pos) + raise error.ParseError(b'invalid token', pos) return tree def _parsedrev(symbol): """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None""" - if symbol.startswith('D') and symbol[1:].isdigit(): + if symbol.startswith(b'D') and symbol[1:].isdigit(): return int(symbol[1:]) if symbol.isdigit(): return int(symbol) @@ -643,11 +679,11 @@ def _prefetchdrevs(tree): drevs = set() ancestordrevs = set() op = tree[0] - if op == 'symbol': + if op == b'symbol': r = _parsedrev(tree[1]) if r: drevs.add(r) - elif op == 'ancestors': + elif op == b'ancestors': r, a = _prefetchdrevs(tree[1]) drevs.update(r) ancestordrevs.update(r) @@ -706,13 +742,14 @@ def querydrev(repo, spec): key = (params.get(r'ids') or params.get(r'phids') or [None])[0] if key in prefetched: return prefetched[key] - drevs = callconduit(repo, 'differential.query', params) + drevs = callconduit(repo, b'differential.query', params) # Fill prefetched with the result for drev in drevs: prefetched[drev[r'phid']] = drev prefetched[int(drev[r'id'])] = drev if key not in prefetched: - raise error.Abort(_('cannot get Differential Revision %r') % params) + raise error.Abort(_(b'cannot get Differential Revision %r') + % params) return prefetched[key] def getstack(topdrevids): @@ -730,7 +767,7 @@ def querydrev(repo, spec): auxiliary = drev.get(r'auxiliary', {}) depends = auxiliary.get(r'phabricator:depends-on', []) for phid in depends: - queue.append({'phids': [phid]}) + queue.append({b'phids': [phid]}) result.reverse() return smartset.baseset(result) @@ -741,7 +778,7 @@ def querydrev(repo, spec): drevs, ancestordrevs = _prefetchdrevs(tree) # developer config: phabricator.batchsize - batchsize = repo.ui.configint('phabricator', 'batchsize', 12) + batchsize = repo.ui.configint(b'phabricator', b'batchsize') # Prefetch Differential Revisions in batch tofetch = set(drevs) @@ -754,7 +791,7 @@ def querydrev(repo, spec): # Walk through the tree, return smartsets def walk(tree): op = tree[0] - if op == 'symbol': + if op == b'symbol': drev = _parsedrev(tree[1]) if drev: return smartset.baseset([drev]) @@ -763,16 +800,16 @@ def querydrev(repo, spec): if _getstatusname(prefetched[r]) == tree[1]] return smartset.baseset(drevs) else: - raise error.Abort(_('unknown symbol: %s') % tree[1]) - elif op in {'and_', 'add', 'sub'}: + raise error.Abort(_(b'unknown symbol: %s') % tree[1]) + elif op in {b'and_', b'add', b'sub'}: assert len(tree) == 3 return getattr(operator, op)(walk(tree[1]), walk(tree[2])) - elif op == 'group': + elif op == b'group': return walk(tree[1]) - elif op == 'ancestors': + elif op == b'ancestors': return getstack(walk(tree[1])) else: - raise error.ProgrammingError('illegal tree: %r' % tree) + raise error.ProgrammingError(b'illegal tree: %r' % tree) return [prefetched[r] for r in walk(tree)] @@ -786,9 +823,9 @@ def getdescfromdrev(drev): summary = drev[r'summary'].rstrip() testplan = drev[r'testPlan'].rstrip() if testplan: - testplan = 'Test Plan:\n%s' % testplan - uri = 'Differential Revision: %s' % drev[r'uri'] - return '\n\n'.join(filter(None, [title, summary, testplan, uri])) + testplan = b'Test Plan:\n%s' % testplan + uri = b'Differential Revision: %s' % drev[r'uri'] + return b'\n\n'.join(filter(None, [title, summary, testplan, uri])) def getdiffmeta(diff): """get commit metadata (date, node, user, p1) from a diff object @@ -848,16 +885,17 @@ def readpatch(repo, drevs, write): """ # Prefetch hg:meta property for all diffs diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs)) - diffs = callconduit(repo, 'differential.querydiffs', {'ids': diffids}) + diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids}) # Generate patch for each drev for drev in drevs: - repo.ui.note(_('reading D%s\n') % drev[r'id']) + repo.ui.note(_(b'reading D%s\n') % drev[r'id']) diffid = max(int(v) for v in drev[r'diffs']) - body = callconduit(repo, 'differential.getrawdiff', {'diffID': diffid}) + body = callconduit(repo, b'differential.getrawdiff', + {b'diffID': diffid}) desc = getdescfromdrev(drev) - header = '# HG changeset patch\n' + header = b'# HG changeset patch\n' # Try to preserve metadata from hg:meta property. Write hg patch # headers that can be read by the "import" command. See patchheadermap @@ -865,14 +903,14 @@ def readpatch(repo, drevs, write): meta = getdiffmeta(diffs[str(diffid)]) for k in _metanamemap.keys(): if k in meta: - header += '# %s %s\n' % (_metanamemap[k], meta[k]) + header += b'# %s %s\n' % (_metanamemap[k], meta[k]) - content = '%s%s\n%s' % (header, desc, body) + content = b'%s%s\n%s' % (header, desc, body) write(encoding.unitolocal(content)) -@command('phabread', - [('', 'stack', False, _('read dependencies'))], - _('DREVSPEC [OPTIONS]')) +@command(b'phabread', + [(b'', b'stack', False, _(b'read dependencies'))], + _(b'DREVSPEC [OPTIONS]')) def phabread(ui, repo, spec, **opts): """print patches from Phabricator suitable for importing @@ -892,51 +930,51 @@ def phabread(ui, repo, spec, **opts): If --stack is given, follow dependencies information and read all patches. It is equivalent to the ``:`` operator. """ - if opts.get('stack'): - spec = ':(%s)' % spec + if opts.get(b'stack'): + spec = b':(%s)' % spec drevs = querydrev(repo, spec) readpatch(repo, drevs, ui.write) -@command('phabupdate', - [('', 'accept', False, _('accept revisions')), - ('', 'reject', False, _('reject revisions')), - ('', 'abandon', False, _('abandon revisions')), - ('', 'reclaim', False, _('reclaim revisions')), - ('m', 'comment', '', _('comment on the last revision')), - ], _('DREVSPEC [OPTIONS]')) +@command(b'phabupdate', + [(b'', b'accept', False, _(b'accept revisions')), + (b'', b'reject', False, _(b'reject revisions')), + (b'', b'abandon', False, _(b'abandon revisions')), + (b'', b'reclaim', False, _(b'reclaim revisions')), + (b'm', b'comment', b'', _(b'comment on the last revision')), + ], _(b'DREVSPEC [OPTIONS]')) def phabupdate(ui, repo, spec, **opts): """update Differential Revision in batch DREVSPEC selects revisions. See :hg:`help phabread` for its usage. """ - flags = [n for n in 'accept reject abandon reclaim'.split() if opts.get(n)] + flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)] if len(flags) > 1: - raise error.Abort(_('%s cannot be used together') % ', '.join(flags)) + raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags)) actions = [] for f in flags: - actions.append({'type': f, 'value': 'true'}) + actions.append({b'type': f, b'value': b'true'}) drevs = querydrev(repo, spec) for i, drev in enumerate(drevs): - if i + 1 == len(drevs) and opts.get('comment'): - actions.append({'type': 'comment', 'value': opts['comment']}) + if i + 1 == len(drevs) and opts.get(b'comment'): + actions.append({b'type': b'comment', b'value': opts[b'comment']}) if actions: - params = {'objectIdentifier': drev[r'phid'], - 'transactions': actions} - callconduit(repo, 'differential.revision.edit', params) + params = {b'objectIdentifier': drev[r'phid'], + b'transactions': actions} + callconduit(repo, b'differential.revision.edit', params) templatekeyword = registrar.templatekeyword() -@templatekeyword('phabreview', requires={'ctx'}) +@templatekeyword(b'phabreview', requires={b'ctx'}) def template_review(context, mapping): """:phabreview: Object describing the review for this changeset. Has attributes `url` and `id`. """ - ctx = context.resource(mapping, 'ctx') + ctx = context.resource(mapping, b'ctx') m = _differentialrevisiondescre.search(ctx.description()) if m: return { - 'url': m.group('url'), - 'id': "D{}".format(m.group('id')), + b'url': m.group(b'url'), + b'id': b"D{}".format(m.group(b'id')), } diff --git a/contrib/python3-whitelist b/contrib/python3-whitelist --- a/contrib/python3-whitelist +++ b/contrib/python3-whitelist @@ -2,6 +2,7 @@ test-abort-checkin.t test-add.t test-addremove-similar.t test-addremove.t +test-alias.t test-amend-subrepo.t test-amend.t test-ancestor.py @@ -14,6 +15,7 @@ test-audit-subrepo.t test-automv.t test-backout.t test-backwards-remove.t +test-bad-pull.t test-basic.t test-bheads.t test-bisect.t @@ -22,6 +24,7 @@ test-bisect3.t test-blackbox.t test-bookmarks-current.t test-bookmarks-merge.t +test-bookmarks-pushpull.t test-bookmarks-rebase.t test-bookmarks-strip.t test-bookmarks.t @@ -30,16 +33,24 @@ test-branch-option.t test-branch-tag-confict.t test-branches.t test-bundle-phases.t +test-bundle-r.t test-bundle-type.t test-bundle-vs-outgoing.t +test-bundle.t +test-bundle2-exchange.t +test-bundle2-format.t test-bundle2-multiple-changegroups.t +test-bundle2-pushback.t +test-bundle2-remote-changegroup.t test-cappedreader.py test-casecollision.t test-cat.t +test-cbor.py test-censor.t test-changelog-exec.t test-check-commit.t test-check-execute.t +test-check-interfaces.py test-check-module-imports.t test-check-pyflakes.t test-check-pylint.t @@ -49,7 +60,7 @@ test-clone-cgi.t test-clone-pull-corruption.t test-clone-r.t test-clone-update-order.t -test-command-template.t +test-clonebundles.t test-commit-amend.t test-commit-interactive.t test-commit-multiple.t @@ -61,10 +72,16 @@ test-config-env.py test-config.t test-conflict.t test-confused-revert.t +test-context.py test-contrib-check-code.t test-contrib-check-commit.t test-convert-authormap.t test-convert-clonebranches.t +test-convert-cvs-branch.t +test-convert-cvs-detectmerge.t +test-convert-cvs-synthetic.t +test-convert-cvs.t +test-convert-cvsnt-mergepoints.t test-convert-datesort.t test-convert-filemap.t test-convert-hg-sink.t @@ -81,6 +98,7 @@ test-debugextensions.t test-debugindexdot.t test-debugrename.t test-default-push.t +test-diff-antipatience.t test-diff-binary-file.t test-diff-change.t test-diff-copy-depth.t @@ -99,6 +117,7 @@ test-directaccess.t test-dirstate-backup.t test-dirstate-nonnormalset.t test-dirstate.t +test-dispatch.py test-doctest.py test-double-merge.t test-drawdag.t @@ -114,8 +133,11 @@ test-encoding.t test-eol-add.t test-eol-clone.t test-eol-hook.t +test-eol-patch.t test-eol-tag.t test-eol-update.t +test-eol.t +test-eolfilename.t test-excessive-merge.t test-exchange-obsmarkers-case-A1.t test-exchange-obsmarkers-case-A2.t @@ -143,9 +165,15 @@ test-execute-bit.t test-export.t test-extdata.t test-extdiff.t +test-extensions-afterloaded.t +test-extensions-wrapfunction.py test-extra-filelog-entry.t +test-fetch.t test-filebranch.t +test-filecache.py +test-filelog.py test-fileset-generated.t +test-fileset.t test-fix-topology.t test-flags.t test-generaldelta.t @@ -158,10 +186,12 @@ test-hg-parseurl.py test-hghave.t test-hgignore.t test-hgk.t +test-hgrc.t test-hgweb-bundle.t test-hgweb-descend-empties.t test-hgweb-empty.t test-hgweb-removed.t +test-hgwebdir-paths.py test-hgwebdirsym.t test-histedit-arguments.t test-histedit-base.t @@ -171,6 +201,7 @@ test-histedit-drop.t test-histedit-edit.t test-histedit-fold-non-commute.t test-histedit-fold.t +test-histedit-no-backup.t test-histedit-no-change.t test-histedit-non-commute-abort.t test-histedit-non-commute.t @@ -181,12 +212,18 @@ test-http-branchmap.t test-http-bundle1.t test-http-clone-r.t test-http.t +test-hybridencode.py test-identify.t +test-impexp-branch.t +test-import-bypass.t +test-import-eol.t +test-import-merge.t test-import-unknown.t test-import.t test-imports-checker.t test-incoming-outgoing.t test-inherit-mode.t +test-init.t test-issue1089.t test-issue1102.t test-issue1175.t @@ -209,12 +246,14 @@ test-issue842.t test-journal-exists.t test-journal-share.t test-journal.t +test-known.t test-largefiles-cache.t test-largefiles-misc.t test-largefiles-small-disk.t test-largefiles-update.t test-largefiles.t test-lfs-largefiles.t +test-lfs-pointer.py test-linerange.py test-locate.t test-lock-badness.t @@ -254,6 +293,8 @@ test-merge6.t test-merge7.t test-merge8.t test-merge9.t +test-minifileset.py +test-minirst.py test-mq-git.t test-mq-header-date.t test-mq-header-from.t @@ -298,8 +339,11 @@ test-narrow-shallow-merges.t test-narrow-shallow.t test-narrow-strip.t test-narrow-update.t +test-narrow-widen.t +test-narrow.t test-nested-repo.t test-newbranch.t +test-nointerrupt.t test-obshistory.t test-obsmarker-template.t test-obsmarkers-effectflag.t @@ -307,10 +351,16 @@ test-obsolete-bundle-strip.t test-obsolete-changeset-exchange.t test-obsolete-checkheads.t test-obsolete-distributed.t +test-obsolete-divergent.t test-obsolete-tag-cache.t +test-pager.t test-parents.t +test-parseindex2.py +test-patch-offset.t +test-patch.t test-pathconflicts-merge.t test-pathconflicts-update.t +test-pathencode.py test-pending.t test-permissions.t test-phases.t @@ -320,6 +370,7 @@ test-pull-permission.t test-pull-pull-corruption.t test-pull-r.t test-pull-update.t +test-pull.t test-purge.t test-push-checkheads-partial-C1.t test-push-checkheads-partial-C2.t @@ -350,7 +401,9 @@ test-push-checkheads-unpushed-D6.t test-push-checkheads-unpushed-D7.t test-push-http.t test-push-warn.t +test-push.t test-pushvars.t +test-qrecord.t test-rebase-abort.t test-rebase-base-flag.t test-rebase-bookmarks.t @@ -378,9 +431,11 @@ test-rebase-rename.t test-rebase-scenario-global.t test-rebase-templates.t test-rebase-transaction.t +test-rebuildstate.t test-record.t test-relink.t test-remove.t +test-removeemptydirs.t test-rename-after-merge.t test-rename-dir-merge.t test-rename-merge1.t @@ -389,11 +444,14 @@ test-repair-strip.t test-repo-compengines.t test-resolve.t test-revert-flags.t +test-revert-interactive.t test-revert-unknown.t test-revlog-ancestry.py test-revlog-group-emptyiter.t test-revlog-mmapindex.t test-revlog-packentry.t +test-revlog-raw.py +test-revlog-v2.t test-revset-dirstate-parents.t test-revset-legacy-lookup.t test-revset-outgoing.t @@ -409,34 +467,56 @@ test-show-stack.t test-show-work.t test-show.t test-simple-update.t +test-simplekeyvaluefile.py +test-simplemerge.py test-single-head.t test-sparse-clear.t +test-sparse-clone.t test-sparse-import.t test-sparse-merges.t test-sparse-profiles.t test-sparse-requirement.t test-sparse-verbose-json.t +test-sparse.t +test-split.t +test-ssh-bundle1.t test-ssh-clone-r.t +test-ssh-proto-unbundle.t test-ssh-proto.t +test-ssh.t test-sshserver.py test-stack.t +test-status-inprocess.py test-status-rev.t test-status-terse.t +test-strict.t test-strip-cross.t test-strip.t test-subrepo-deep-nested-change.t test-subrepo-missing.t +test-subrepo-paths.t test-subrepo-recursion.t test-subrepo-relative-path.t test-subrepo.t +test-symlink-os-yes-fs-no.py +test-symlink-placeholder.t test-symlinks.t test-tag.t test-tags.t -test-template-engine.t +test-template-basic.t +test-template-functions.t +test-template-keywords.t +test-template-map.t +test-transplant.t test-treemanifest.t +test-ui-color.py +test-ui-config.py +test-ui-verbosity.py test-unamend.t +test-unbundlehash.t test-uncommit.t test-unified-test.t +test-unionrepo.t test-unrelated-pull.t test-up-local-change.t test-update-branches.t @@ -447,11 +527,16 @@ test-update-reverse.t test-upgrade-repo.t test-url-download.t test-url-rev.t +test-url.py test-username-newline.t test-verify.t +test-walk.t +test-walkrepo.py test-websub.t test-win32text.t test-wireproto-clientreactor.py test-wireproto-framing.py test-wireproto-serverreactor.py +test-wireproto.py +test-wsgirequest.py test-xdg.t diff --git a/contrib/synthrepo.py b/contrib/synthrepo.py --- a/contrib/synthrepo.py +++ b/contrib/synthrepo.py @@ -54,13 +54,16 @@ from mercurial.node import ( ) from mercurial import ( context, + diffutil, error, hg, patch, registrar, scmutil, ) -from mercurial.utils import dateutil +from mercurial.utils import ( + dateutil, +) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -172,13 +175,10 @@ def analyze(ui, repo, *revs, **opts): revs = scmutil.revrange(repo, revs) revs.sort() - progress = ui.progress - _analyzing = _('analyzing') - _changesets = _('changesets') - _total = len(revs) - + progress = ui.makeprogress(_('analyzing'), unit=_('changesets'), + total=len(revs)) for i, rev in enumerate(revs): - progress(_analyzing, i, unit=_changesets, total=_total) + progress.update(i) ctx = repo[rev] pl = ctx.parents() pctx = pl[0] @@ -196,7 +196,9 @@ def analyze(ui, repo, *revs, **opts): if lastctx.rev() != nullrev: timedelta = ctx.date()[0] - lastctx.date()[0] interarrival[roundto(timedelta, 300)] += 1 - diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), []) + diffopts = diffutil.diffallopts(ui, {'git': True}) + diff = sum((d.splitlines() + for d in ctx.diff(pctx, opts=diffopts)), []) fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff): if isbin: @@ -222,6 +224,7 @@ def analyze(ui, repo, *revs, **opts): filesadded[fileadds] += 1 dirsadded[diradds] += 1 filesremoved[fileremoves] += 1 + progress.complete() invchildren = zerodict() @@ -338,7 +341,6 @@ def synthesize(ui, repo, descpath, **opt nevertouch = {'.hgsub', '.hgignore', '.hgtags'} - progress = ui.progress _synthesizing = _('synthesizing') _files = _('initial files') _changesets = _('changesets') @@ -362,8 +364,9 @@ def synthesize(ui, repo, descpath, **opt path = os.path.dirname(path) return True + progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount) for i in xrange(0, initcount): - ui.progress(_synthesizing, i, unit=_files, total=initcount) + progress.update(i) path = pickpath() while not validpath(path): @@ -378,7 +381,7 @@ def synthesize(ui, repo, descpath, **opt def filectxfn(repo, memctx, path): return context.memfilectx(repo, memctx, path, files[path]) - ui.progress(_synthesizing, None) + progress.complete() message = 'synthesized wide repo with %d files' % (len(files),) mc = context.memctx(repo, [pctx.node(), nullid], message, files, filectxfn, ui.username(), @@ -394,8 +397,9 @@ def synthesize(ui, repo, descpath, **opt # Synthesize incremental revisions to the repository, adding repo depth. count = int(opts['count']) heads = set(map(repo.changelog.rev, repo.heads())) + progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count) for i in xrange(count): - progress(_synthesizing, i, unit=_changesets, total=count) + progress.update(i) node = repo.changelog.node revs = len(repo) @@ -485,6 +489,7 @@ def synthesize(ui, repo, descpath, **opt heads.add(repo.changelog.rev(newnode)) heads.discard(r1) heads.discard(r2) + progress.complete() lock.release() wlock.release() diff --git a/contrib/wix/help.wxs b/contrib/wix/help.wxs --- a/contrib/wix/help.wxs +++ b/contrib/wix/help.wxs @@ -19,6 +19,7 @@ + diff --git a/hgdemandimport/__init__.py b/hgdemandimport/__init__.py --- a/hgdemandimport/__init__.py +++ b/hgdemandimport/__init__.py @@ -21,8 +21,9 @@ if sys.version_info[0] >= 3: else: from . import demandimportpy2 as demandimport -# Extensions can add to this list if necessary. -ignore = [ +# Full module names which can't be lazy imported. +# Extensions can add to this set. +IGNORES = { '__future__', '_hashlib', # ImportError during pkg_resources/__init__.py:fixup_namespace_package @@ -55,17 +56,15 @@ ignore = [ '__builtin__', 'builtins', 'urwid.command_map', # for pudb - ] +} _pypy = '__pypy__' in sys.builtin_module_names if _pypy: - ignore.extend([ - # _ctypes.pointer is shadowed by "from ... import pointer" (PyPy 5) - '_ctypes.pointer', - ]) + # _ctypes.pointer is shadowed by "from ... import pointer" (PyPy 5) + IGNORES.add('_ctypes.pointer') -demandimport.init(ignore) +demandimport.init(IGNORES) # Re-export. isenabled = demandimport.isenabled diff --git a/hgdemandimport/demandimportpy2.py b/hgdemandimport/demandimportpy2.py --- a/hgdemandimport/demandimportpy2.py +++ b/hgdemandimport/demandimportpy2.py @@ -162,7 +162,7 @@ class _demandmod(object): _pypy = '__pypy__' in sys.builtin_module_names def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1): - if locals is None or name in ignore or fromlist == ('*',): + if locals is None or name in ignores or fromlist == ('*',): # these cases we can't really delay return _hgextimport(_origimport, name, globals, locals, fromlist, level) elif not fromlist: @@ -209,7 +209,7 @@ def _demandimport(name, globals=None, lo # while processing the import statement. return mn = '%s.%s' % (mod.__name__, attr) - if mn in ignore: + if mn in ignores: importfunc = _origimport else: importfunc = _demandmod @@ -273,11 +273,11 @@ def _demandimport(name, globals=None, lo return mod -ignore = [] +ignores = set() -def init(ignorelist): - global ignore - ignore = ignorelist +def init(ignoreset): + global ignores + ignores = ignoreset def isenabled(): return builtins.__import__ == _demandimport diff --git a/hgdemandimport/demandimportpy3.py b/hgdemandimport/demandimportpy3.py --- a/hgdemandimport/demandimportpy3.py +++ b/hgdemandimport/demandimportpy3.py @@ -40,7 +40,7 @@ class _lazyloaderex(importlib.util.LazyL """ def exec_module(self, module): """Make the module load lazily.""" - if _deactivated or module.__name__ in ignore: + if _deactivated or module.__name__ in ignores: self.loader.exec_module(module) else: super().exec_module(module) @@ -62,11 +62,11 @@ def _makefinder(path): (_bytecode_loader, importlib.machinery.BYTECODE_SUFFIXES), ) -ignore = [] +ignores = set() -def init(ignorelist): - global ignore - ignore = ignorelist +def init(ignoreset): + global ignores + ignores = ignoreset def isenabled(): return _makefinder in sys.path_hooks and not _deactivated diff --git a/hgext/acl.py b/hgext/acl.py --- a/hgext/acl.py +++ b/hgext/acl.py @@ -57,6 +57,28 @@ access control. Keys in these sections a a glob syntax by default). The corresponding values follow the same syntax as the other sections above. +Bookmark-based Access Control +----------------------------- +Use the ``acl.deny.bookmarks`` and ``acl.allow.bookmarks`` sections to +have bookmark-based access control. Keys in these sections can be +either: + +- a bookmark name, or +- an asterisk, to match any bookmark; + +The corresponding values can be either: + +- a comma-separated list containing users and groups, or +- an asterisk, to match anyone; + +You can add the "!" prefix to a user or group name to invert the sense +of the match. + +Note: for interactions between clients and servers using Mercurial 3.6+ +a rejection will generally reject the entire push, for interactions +involving older clients, the commit transactions will already be accepted, +and only the bookmark movement will be rejected. + Groups ------ @@ -326,9 +348,10 @@ def hook(ui, repo, hooktype, node=None, ensureenabled(ui) - if hooktype not in ['pretxnchangegroup', 'pretxncommit']: - raise error.Abort(_('config error - hook type "%s" cannot stop ' - 'incoming changesets nor commits') % hooktype) + if hooktype not in ['pretxnchangegroup', 'pretxncommit', 'prepushkey']: + raise error.Abort( + _('config error - hook type "%s" cannot stop ' + 'incoming changesets, commits, nor bookmarks') % hooktype) if (hooktype == 'pretxnchangegroup' and source not in ui.configlist('acl', 'sources')): ui.debug('acl: changes have source "%s" - skipping\n' % source) @@ -345,6 +368,30 @@ def hook(ui, repo, hooktype, node=None, ui.debug('acl: checking access for user "%s"\n' % user) + if hooktype == 'prepushkey': + _pkhook(ui, repo, hooktype, node, source, user, **kwargs) + else: + _txnhook(ui, repo, hooktype, node, source, user, **kwargs) + +def _pkhook(ui, repo, hooktype, node, source, user, **kwargs): + if kwargs['namespace'] == 'bookmarks': + bookmark = kwargs['key'] + ctx = kwargs['new'] + allowbookmarks = buildmatch(ui, None, user, 'acl.allow.bookmarks') + denybookmarks = buildmatch(ui, None, user, 'acl.deny.bookmarks') + + if denybookmarks and denybookmarks(bookmark): + raise error.Abort(_('acl: user "%s" denied on bookmark "%s"' + ' (changeset "%s")') + % (user, bookmark, ctx)) + if allowbookmarks and not allowbookmarks(bookmark): + raise error.Abort(_('acl: user "%s" not allowed on bookmark "%s"' + ' (changeset "%s")') + % (user, bookmark, ctx)) + ui.debug('acl: bookmark access granted: "%s" on bookmark "%s"\n' + % (ctx, bookmark)) + +def _txnhook(ui, repo, hooktype, node, source, user, **kwargs): # deprecated config: acl.config cfg = ui.config('acl', 'config') if cfg: diff --git a/hgext/beautifygraph.py b/hgext/beautifygraph.py new file mode 100644 --- /dev/null +++ b/hgext/beautifygraph.py @@ -0,0 +1,93 @@ +# -*- coding: UTF-8 -*- +# beautifygraph.py - improve graph output by using Unicode characters +# +# Copyright 2018 John Stiles +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''beautify log -G output by using Unicode characters (EXPERIMENTAL) + + A terminal with UTF-8 support and monospace narrow text are required. +''' + +from __future__ import absolute_import + +from mercurial.i18n import _ +from mercurial import ( + encoding, + extensions, + graphmod, + templatekw, +) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +def prettyedge(before, edge, after): + if edge == '~': + return '\xE2\x95\xA7' # U+2567 ╧ + if edge == 'X': + return '\xE2\x95\xB3' # U+2573 ╳ + if edge == '/': + return '\xE2\x95\xB1' # U+2571 ╱ + if edge == '-': + return '\xE2\x94\x80' # U+2500 ─ + if edge == '|': + return '\xE2\x94\x82' # U+2502 │ + if edge == ':': + return '\xE2\x94\x86' # U+2506 ┆ + if edge == '\\': + return '\xE2\x95\xB2' # U+2572 ╲ + if edge == '+': + if before == ' ' and not after == ' ': + return '\xE2\x94\x9C' # U+251C ├ + if after == ' ' and not before == ' ': + return '\xE2\x94\xA4' # U+2524 ┤ + return '\xE2\x94\xBC' # U+253C ┼ + return edge + +def convertedges(line): + line = ' %s ' % line + pretty = [] + for idx in xrange(len(line) - 2): + pretty.append(prettyedge(line[idx], line[idx + 1], line[idx + 2])) + return ''.join(pretty) + +def getprettygraphnode(orig, *args, **kwargs): + node = orig(*args, **kwargs) + if node == 'o': + return '\xE2\x97\x8B' # U+25CB ○ + if node == '@': + return '\xE2\x97\x8D' # U+25CD ◍ + if node == '*': + return '\xE2\x88\x97' # U+2217 ∗ + if node == 'x': + return '\xE2\x97\x8C' # U+25CC ◌ + if node == '_': + return '\xE2\x95\xA4' # U+2564 ╤ + return node + +def outputprettygraph(orig, ui, graph, *args, **kwargs): + (edges, text) = zip(*graph) + graph = zip([convertedges(e) for e in edges], text) + return orig(ui, graph, *args, **kwargs) + +def extsetup(ui): + if encoding.encoding != 'UTF-8': + ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n')) + return + + if 'A' in encoding._wide: + ui.warn(_('beautifygraph: unsupported terminal settings, ' + 'monospace narrow text required\n')) + return + + if ui.plain('graph'): + return + + extensions.wrapfunction(graphmod, 'outputgraph', outputprettygraph) + extensions.wrapfunction(templatekw, 'getgraphnode', getprettygraphnode) diff --git a/hgext/censor.py b/hgext/censor.py --- a/hgext/censor.py +++ b/hgext/censor.py @@ -32,7 +32,6 @@ from mercurial.node import short from mercurial import ( error, - lock as lockmod, registrar, revlog, scmutil, @@ -52,13 +51,8 @@ testedwith = 'ships-with-hg-core' ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))], _('-r REV [-t TEXT] [FILE]')) def censor(ui, repo, path, rev='', tombstone='', **opts): - wlock = lock = None - try: - wlock = repo.wlock() - lock = repo.lock() + with repo.wlock(), repo.lock(): return _docensor(ui, repo, path, rev, tombstone, **opts) - finally: - lockmod.release(lock, wlock) def _docensor(ui, repo, path, rev='', tombstone='', **opts): if not path: diff --git a/hgext/churn.py b/hgext/churn.py --- a/hgext/churn.py +++ b/hgext/churn.py @@ -52,7 +52,7 @@ def countrate(ui, repo, amap, *pats, **o def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) - return date.strftime(opts['dateformat']) + return date.strftime(encoding.strfromlocal(opts['dateformat'])) else: tmpl = opts.get('oldtemplate') or opts.get('template') tmpl = logcmdutil.maketemplater(ui, repo, tmpl) @@ -61,7 +61,8 @@ def countrate(ui, repo, amap, *pats, **o tmpl.show(ctx) return ui.popbuffer() - state = {'count': 0} + progress = ui.makeprogress(_('analyzing'), unit=_('revisions'), + total=len(repo)) rate = {} df = False if opts.get('date'): @@ -87,14 +88,12 @@ def countrate(ui, repo, amap, *pats, **o lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] - state['count'] += 1 - ui.progress(_('analyzing'), state['count'], total=len(repo), - unit=_('revisions')) + progress.increment() for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue - ui.progress(_('analyzing'), None) + progress.complete() return rate @@ -161,7 +160,7 @@ def churn(ui, repo, *pats, **opts): if not aliases and os.path.exists(repo.wjoin('.hgchurn')): aliases = repo.wjoin('.hgchurn') if aliases: - for l in open(aliases, "r"): + for l in open(aliases, "rb"): try: alias, actual = l.rsplit('=' in l and '=' or None, 1) amap[alias.strip()] = actual.strip() diff --git a/hgext/convert/__init__.py b/hgext/convert/__init__.py --- a/hgext/convert/__init__.py +++ b/hgext/convert/__init__.py @@ -204,6 +204,14 @@ def convert(ui, src, dest=None, revmapfi :convert.hg.revs: revset specifying the source revisions to convert. + Bazaar Source + ############# + + The following options can be used with ``--config``: + + :convert.bzr.saverev: whether to store the original Bazaar commit ID in + the metadata of the destination commit. The default is True. + CVS Source ########## diff --git a/hgext/convert/bzr.py b/hgext/convert/bzr.py --- a/hgext/convert/bzr.py +++ b/hgext/convert/bzr.py @@ -19,7 +19,7 @@ from mercurial import ( from . import common # these do not work with demandimport, blacklist -demandimport.ignore.extend([ +demandimport.IGNORES.update([ 'bzrlib.transactions', 'bzrlib.urlutils', 'ElementPath', @@ -65,6 +65,7 @@ class bzr_source(common.converter_source raise common.NoRepo(_('%s does not look like a Bazaar repository') % path) self._parentids = {} + self._saverev = ui.configbool('convert', 'bzr.saverev') def _checkrepotype(self, path): # Lightweight checkouts detection is informational but probably @@ -175,7 +176,8 @@ class bzr_source(common.converter_source author=self.recode(rev.committer), desc=self.recode(rev.message), branch=branch, - rev=version) + rev=version, + saverev=self._saverev) def gettags(self): bytetags = {} diff --git a/hgext/convert/common.py b/hgext/convert/common.py --- a/hgext/convert/common.py +++ b/hgext/convert/common.py @@ -214,7 +214,7 @@ class converter_source(object): if not encoding: encoding = self.encoding or 'utf-8' - if isinstance(s, unicode): + if isinstance(s, pycompat.unicode): return s.encode("utf-8") try: return s.decode(pycompat.sysstr(encoding)).encode("utf-8") diff --git a/hgext/convert/convcmd.py b/hgext/convert/convcmd.py --- a/hgext/convert/convcmd.py +++ b/hgext/convert/convcmd.py @@ -55,7 +55,7 @@ svn_source = subversion.svn_source orig_encoding = 'ascii' def recode(s): - if isinstance(s, unicode): + if isinstance(s, pycompat.unicode): return s.encode(pycompat.sysstr(orig_encoding), 'replace') else: return s.decode('utf-8').encode( @@ -123,7 +123,7 @@ def convertsource(ui, path, type, revs): exceptions.append(inst) if not ui.quiet: for inst in exceptions: - ui.write("%s\n" % inst) + ui.write("%s\n" % pycompat.bytestr(inst)) raise error.Abort(_('%s: missing or unsupported repository') % path) def convertsink(ui, path, type): @@ -143,13 +143,11 @@ class progresssource(object): def __init__(self, ui, source, filecount): self.ui = ui self.source = source - self.filecount = filecount - self.retrieved = 0 + self.progress = ui.makeprogress(_('getting files'), unit=_('files'), + total=filecount) def getfile(self, file, rev): - self.retrieved += 1 - self.ui.progress(_('getting files'), self.retrieved, - item=file, total=self.filecount, unit=_('files')) + self.progress.increment(item=file) return self.source.getfile(file, rev) def targetfilebelongstosource(self, targetfilename): @@ -159,7 +157,7 @@ class progresssource(object): return self.source.lookuprev(rev) def close(self): - self.ui.progress(_('getting files'), None) + self.progress.complete() class converter(object): def __init__(self, ui, source, dest, revmapfile, opts): @@ -234,10 +232,12 @@ class converter(object): def walktree(self, heads): '''Return a mapping that identifies the uncommitted parents of every uncommitted changeset.''' - visit = heads + visit = list(heads) known = set() parents = {} numcommits = self.source.numcommits() + progress = self.ui.makeprogress(_('scanning'), unit=_('revisions'), + total=numcommits) while visit: n = visit.pop(0) if n in known: @@ -247,14 +247,13 @@ class converter(object): if m == SKIPREV or self.dest.hascommitfrommap(m): continue known.add(n) - self.ui.progress(_('scanning'), len(known), unit=_('revisions'), - total=numcommits) + progress.update(len(known)) commit = self.cachecommit(n) parents[n] = [] for p in commit.parents: parents[n].append(p) visit.append(p) - self.ui.progress(_('scanning'), None) + progress.complete() return parents @@ -510,6 +509,8 @@ class converter(object): c = None self.ui.status(_("converting...\n")) + progress = self.ui.makeprogress(_('converting'), + unit=_('revisions'), total=len(t)) for i, c in enumerate(t): num -= 1 desc = self.commitcache[c].desc @@ -520,10 +521,9 @@ class converter(object): # uses is 'utf-8' self.ui.status("%d %s\n" % (num, recode(desc))) self.ui.note(_("source: %s\n") % recode(c)) - self.ui.progress(_('converting'), i, unit=_('revisions'), - total=len(t)) + progress.update(i) self.copy(c) - self.ui.progress(_('converting'), None) + progress.complete() if not self.ui.configbool('convert', 'skiptags'): tags = self.source.gettags() diff --git a/hgext/convert/cvsps.py b/hgext/convert/cvsps.py --- a/hgext/convert/cvsps.py +++ b/hgext/convert/cvsps.py @@ -6,6 +6,7 @@ # GNU General Public License version 2 or any later version. from __future__ import absolute_import +import functools import os import re @@ -50,8 +51,8 @@ class logentry(object): self.__dict__.update(entries) def __repr__(self): - items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) - return "%s(%s)"%(type(self).__name__, ", ".join(items)) + items = (r"%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__)) + return r"%s(%s)"%(type(self).__name__, r", ".join(items)) class logerror(Exception): pass @@ -110,25 +111,25 @@ def createlog(ui, directory=None, root=" log = [] # list of logentry objects containing the CVS state # patterns to match in CVS (r)log output, by state of use - re_00 = re.compile('RCS file: (.+)$') - re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$') - re_02 = re.compile('cvs (r?log|server): (.+)\n$') - re_03 = re.compile("(Cannot access.+CVSROOT)|" - "(can't create temporary directory.+)$") - re_10 = re.compile('Working file: (.+)$') - re_20 = re.compile('symbolic names:') - re_30 = re.compile('\t(.+): ([\\d.]+)$') - re_31 = re.compile('----------------------------$') - re_32 = re.compile('=======================================' - '======================================$') - re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$') - re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' - r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' - r'(\s+commitid:\s+([^;]+);)?' - r'(.*mergepoint:\s+([^;]+);)?') - re_70 = re.compile('branches: (.+);$') + re_00 = re.compile(b'RCS file: (.+)$') + re_01 = re.compile(b'cvs \\[r?log aborted\\]: (.+)$') + re_02 = re.compile(b'cvs (r?log|server): (.+)\n$') + re_03 = re.compile(b"(Cannot access.+CVSROOT)|" + b"(can't create temporary directory.+)$") + re_10 = re.compile(b'Working file: (.+)$') + re_20 = re.compile(b'symbolic names:') + re_30 = re.compile(b'\t(.+): ([\\d.]+)$') + re_31 = re.compile(b'----------------------------$') + re_32 = re.compile(b'=======================================' + b'======================================$') + re_50 = re.compile(b'revision ([\\d.]+)(\s+locked by:\s+.+;)?$') + re_60 = re.compile(br'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);' + br'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?' + br'(\s+commitid:\s+([^;]+);)?' + br'(.*mergepoint:\s+([^;]+);)?') + re_70 = re.compile(b'branches: (.+);$') - file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch') + file_added_re = re.compile(br'file [^/]+ was (initially )?added on branch') prefix = '' # leading path to strip of what we get from CVS @@ -509,7 +510,8 @@ def createlog(ui, directory=None, root=" comment = entry.comment for e in encodings: try: - entry.comment = comment.decode(e).encode('utf-8') + entry.comment = comment.decode( + pycompat.sysstr(e)).encode('utf-8') if ui.debugflag: ui.debug("transcoding by %s: %s of %s\n" % (e, revstr(entry.revision), entry.file)) @@ -565,11 +567,15 @@ def createchangeset(ui, log, fuzz=60, me mindate = {} for e in log: if e.commitid: - mindate[e.commitid] = min(e.date, mindate.get(e.commitid)) + if e.commitid not in mindate: + mindate[e.commitid] = e.date + else: + mindate[e.commitid] = min(e.date, mindate[e.commitid]) # Merge changesets - log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment, - x.author, x.branch, x.date, x.branchpoints)) + log.sort(key=lambda x: (mindate.get(x.commitid, (-1, 0)), + x.commitid or '', x.comment, + x.author, x.branch or '', x.date, x.branchpoints)) changesets = [] files = set() @@ -653,7 +659,7 @@ def createchangeset(ui, log, fuzz=60, me return 0 for c in changesets: - c.entries.sort(entitycompare) + c.entries.sort(key=functools.cmp_to_key(entitycompare)) # Sort changesets by date @@ -706,7 +712,7 @@ def createchangeset(ui, log, fuzz=60, me d = c(len(l.branchpoints), len(r.branchpoints)) return d - changesets.sort(cscmp) + changesets.sort(key=functools.cmp_to_key(cscmp)) # Collect tags @@ -729,12 +735,12 @@ def createchangeset(ui, log, fuzz=60, me # {{mergefrombranch BRANCHNAME}} by setting two parents. if mergeto is None: - mergeto = r'{{mergetobranch ([-\w]+)}}' + mergeto = br'{{mergetobranch ([-\w]+)}}' if mergeto: mergeto = re.compile(mergeto) if mergefrom is None: - mergefrom = r'{{mergefrombranch ([-\w]+)}}' + mergefrom = br'{{mergefrombranch ([-\w]+)}}' if mergefrom: mergefrom = re.compile(mergefrom) @@ -797,7 +803,7 @@ def createchangeset(ui, log, fuzz=60, me except KeyError: ui.warn(_("warning: CVS commit message references " "non-existent branch %r:\n%s\n") - % (m, c.comment)) + % (pycompat.bytestr(m), c.comment)) if m in branches and c.branch != m and not candidate.synthetic: c.parents.append(candidate) @@ -940,7 +946,8 @@ def debugcvsps(ui, *args, **opts): if fn.startswith(opts["prefix"]): fn = fn[len(opts["prefix"]):] ui.write('\t%s:%s->%s%s \n' % ( - fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL', + fn, + '.'.join([b"%d" % x for x in f.parent]) or 'INITIAL', '.'.join([(b"%d" % x) for x in f.revision]), ['', '(DEAD)'][f.dead])) ui.write('\n') diff --git a/hgext/convert/darcs.py b/hgext/convert/darcs.py --- a/hgext/convert/darcs.py +++ b/hgext/convert/darcs.py @@ -10,10 +10,11 @@ import errno import os import re import shutil -import tempfile + from mercurial.i18n import _ from mercurial import ( error, + pycompat, util, ) from mercurial.utils import dateutil @@ -76,7 +77,7 @@ class darcs_source(common.converter_sour self.ui.warn(_('failed to detect repository format!')) def before(self): - self.tmppath = tempfile.mkdtemp( + self.tmppath = pycompat.mkdtemp( prefix='convert-' + os.path.basename(self.path) + '-') output, status = self.run('init', repodir=self.tmppath) self.checkexit(status) @@ -103,7 +104,7 @@ class darcs_source(common.converter_sour shutil.rmtree(self.tmppath, ignore_errors=True) def recode(self, s, encoding=None): - if isinstance(s, unicode): + if isinstance(s, pycompat.unicode): # XMLParser returns unicode objects for anything it can't # encode into ASCII. We convert them back to str to get # recode's normal conversion behavior. @@ -125,8 +126,7 @@ class darcs_source(common.converter_sour return etree.getroot() def format(self): - output, status = self.run('show', 'repo', no_files=True, - repodir=self.path) + output, status = self.run('show', 'repo', repodir=self.path) self.checkexit(status) m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE) if not m: diff --git a/hgext/convert/subversion.py b/hgext/convert/subversion.py --- a/hgext/convert/subversion.py +++ b/hgext/convert/subversion.py @@ -5,7 +5,6 @@ from __future__ import absolute_import import os import re -import tempfile import xml.dom.minidom from mercurial.i18n import _ @@ -751,9 +750,10 @@ class svn_source(converter_source): self.module = new_module self.reparent(self.module) + progress = self.ui.makeprogress(_('scanning paths'), unit=_('paths'), + total=len(paths)) for i, (path, ent) in enumerate(paths): - self.ui.progress(_('scanning paths'), i, item=path, - total=len(paths), unit=_('paths')) + progress.update(i, item=path) entrypath = self.getrelpath(path) kind = self._checkpath(entrypath, revnum) @@ -839,7 +839,7 @@ class svn_source(converter_source): copytopath = self.getrelpath(copytopath) copies[self.recode(copytopath)] = self.recode(childpath) - self.ui.progress(_('scanning paths'), None) + progress.complete() changed.update(removed) return (list(changed), removed, copies) @@ -1081,7 +1081,7 @@ class svn_source(converter_source): ' hg executable is in PATH')) return logstream(stdout) -pre_revprop_change = '''#!/bin/sh +pre_revprop_change = b'''#!/bin/sh REPOS="$1" REV="$2" @@ -1098,8 +1098,8 @@ exit 1 ''' class svn_sink(converter_sink, commandline): - commit_re = re.compile(r'Committed revision (\d+).', re.M) - uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M) + commit_re = re.compile(br'Committed revision (\d+).', re.M) + uuid_re = re.compile(br'Repository UUID:\s*(\S+)', re.M) def prerun(self): if self.wc: @@ -1225,7 +1225,7 @@ class svn_sink(converter_sink, commandli wdest = self.wjoin(dest) exists = os.path.lexists(wdest) if exists: - fd, tempname = tempfile.mkstemp( + fd, tempname = pycompat.mkstemp( prefix='hg-copy-', dir=os.path.dirname(wdest)) os.close(fd) os.unlink(tempname) @@ -1313,7 +1313,7 @@ class svn_sink(converter_sink, commandli self.xargs(self.setexec, 'propset', 'svn:executable', '*') self.setexec = [] - fd, messagefile = tempfile.mkstemp(prefix='hg-convert-') + fd, messagefile = pycompat.mkstemp(prefix='hg-convert-') fp = os.fdopen(fd, r'wb') fp.write(util.tonativeeol(commit.desc)) fp.close() diff --git a/hgext/eol.py b/hgext/eol.py --- a/hgext/eol.py +++ b/hgext/eol.py @@ -142,7 +142,7 @@ def tolf(s, params, ui, **kwargs): if ui.configbool('eol', 'only-consistent') and inconsistenteol(s): return s if (ui.configbool('eol', 'fix-trailing-newline') - and s and s[-1] != '\n'): + and s and not s.endswith('\n')): s = s + '\n' return util.tolf(s) @@ -153,7 +153,7 @@ def tocrlf(s, params, ui, **kwargs): if ui.configbool('eol', 'only-consistent') and inconsistenteol(s): return s if (ui.configbool('eol', 'fix-trailing-newline') - and s and s[-1] != '\n'): + and s and not s.endswith('\n')): s = s + '\n' return util.tocrlf(s) diff --git a/hgext/extdiff.py b/hgext/extdiff.py --- a/hgext/extdiff.py +++ b/hgext/extdiff.py @@ -71,7 +71,7 @@ import os import re import shutil import stat -import tempfile + from mercurial.i18n import _ from mercurial.node import ( nullid, @@ -210,7 +210,7 @@ def dodiff(ui, repo, cmdline, pats, opts if not common: return 0 - tmproot = tempfile.mkdtemp(prefix='extdiff.') + tmproot = pycompat.mkdtemp(prefix='extdiff.') try: if not opts.get('patch'): # Always make a copy of node1a (and node1b, if applicable) diff --git a/hgext/fix.py b/hgext/fix.py --- a/hgext/fix.py +++ b/hgext/fix.py @@ -70,6 +70,7 @@ from mercurial import ( registrar, scmutil, util, + worker, ) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for @@ -133,32 +134,56 @@ def fix(ui, repo, *pats, **opts): raise error.Abort(_('cannot specify both "--rev" and "--all"')) opts['rev'] = ['not public() and not obsolete()'] opts['working_dir'] = True - with repo.wlock(), repo.lock(): + with repo.wlock(), repo.lock(), repo.transaction('fix'): revstofix = getrevstofix(ui, repo, opts) basectxs = getbasectxs(repo, opts, revstofix) workqueue, numitems = getworkqueue(ui, repo, pats, opts, revstofix, basectxs) + fixers = getfixers(ui) + + # There are no data dependencies between the workers fixing each file + # revision, so we can use all available parallelism. + def getfixes(items): + for rev, path in items: + ctx = repo[rev] + olddata = ctx[path].data() + newdata = fixfile(ui, opts, fixers, ctx, path, basectxs[rev]) + # Don't waste memory/time passing unchanged content back, but + # produce one result per item either way. + yield (rev, path, newdata if newdata != olddata else None) + results = worker.worker(ui, 1.0, getfixes, tuple(), workqueue) + + # We have to hold on to the data for each successor revision in memory + # until all its parents are committed. We ensure this by committing and + # freeing memory for the revisions in some topological order. This + # leaves a little bit of memory efficiency on the table, but also makes + # the tests deterministic. It might also be considered a feature since + # it makes the results more easily reproducible. filedata = collections.defaultdict(dict) replacements = {} - fixers = getfixers(ui) - # Some day this loop can become a worker pool, but for now it's easier - # to fix everything serially in topological order. - for rev, path in sorted(workqueue): - ctx = repo[rev] - olddata = ctx[path].data() - newdata = fixfile(ui, opts, fixers, ctx, path, basectxs[rev]) - if newdata != olddata: - filedata[rev][path] = newdata - numitems[rev] -= 1 - if not numitems[rev]: - if rev == wdirrev: - writeworkingdir(repo, ctx, filedata[rev], replacements) - else: - replacerev(ui, repo, ctx, filedata[rev], replacements) - del filedata[rev] + commitorder = sorted(revstofix, reverse=True) + with ui.makeprogress(topic=_('fixing'), unit=_('files'), + total=sum(numitems.values())) as progress: + for rev, path, newdata in results: + progress.increment(item=path) + if newdata is not None: + filedata[rev][path] = newdata + numitems[rev] -= 1 + # Apply the fixes for this and any other revisions that are + # ready and sitting at the front of the queue. Using a loop here + # prevents the queue from being blocked by the first revision to + # be ready out of order. + while commitorder and not numitems[commitorder[-1]]: + rev = commitorder.pop() + ctx = repo[rev] + if rev == wdirrev: + writeworkingdir(repo, ctx, filedata[rev], replacements) + else: + replacerev(ui, repo, ctx, filedata[rev], replacements) + del filedata[rev] replacements = {prec: [succ] for prec, succ in replacements.iteritems()} - scmutil.cleanupnodes(repo, replacements, 'fix') + scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True) def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): """"Constructs the list of files to be fixed at specific revisions @@ -168,11 +193,19 @@ def getworkqueue(ui, repo, pats, opts, r topological order. Each work item represents a file in the working copy or in some revision that should be fixed and written back to the working copy or into a replacement revision. + + Work items for the same revision are grouped together, so that a worker + pool starting with the first N items in parallel is likely to finish the + first revision's work before other revisions. This can allow us to write + the result to disk and reduce memory footprint. At time of writing, the + partition strategy in worker.py seems favorable to this. We also sort the + items by ascending revision number to match the order in which we commit + the fixes later. """ workqueue = [] numitems = collections.defaultdict(int) maxfilesize = ui.configbytes('fix', 'maxfilesize') - for rev in revstofix: + for rev in sorted(revstofix): fixctx = repo[rev] match = scmutil.match(fixctx, pats, opts) for path in pathstofix(ui, repo, pats, opts, match, basectxs[rev], @@ -352,7 +385,9 @@ def getbasectxs(repo, opts, revstofix): """Returns a map of the base contexts for each revision The base contexts determine which lines are considered modified when we - attempt to fix just the modified lines in a file. + attempt to fix just the modified lines in a file. It also determines which + files we attempt to fix, so it is important to compute this even when + --whole is used. """ # The --base flag overrides the usual logic, and we give every revision # exactly the set of baserevs that the user specified. @@ -484,25 +519,23 @@ def replacerev(ui, repo, ctx, filedata, isexec=fctx.isexec(), copied=copied) - overrides = {('phases', 'new-commit'): ctx.phase()} - with ui.configoverride(overrides, source='fix'): - memctx = context.memctx( - repo, - parents=(newp1node, newp2node), - text=ctx.description(), - files=set(ctx.files()) | set(filedata.keys()), - filectxfn=filectxfn, - user=ctx.user(), - date=ctx.date(), - extra=ctx.extra(), - branch=ctx.branch(), - editor=None) - sucnode = memctx.commit() - prenode = ctx.node() - if prenode == sucnode: - ui.debug('node %s already existed\n' % (ctx.hex())) - else: - replacements[ctx.node()] = sucnode + memctx = context.memctx( + repo, + parents=(newp1node, newp2node), + text=ctx.description(), + files=set(ctx.files()) | set(filedata.keys()), + filectxfn=filectxfn, + user=ctx.user(), + date=ctx.date(), + extra=ctx.extra(), + branch=ctx.branch(), + editor=None) + sucnode = memctx.commit() + prenode = ctx.node() + if prenode == sucnode: + ui.debug('node %s already existed\n' % (ctx.hex())) + else: + replacements[ctx.node()] = sucnode def getfixers(ui): """Returns a map of configured fixer tools indexed by their names diff --git a/hgext/githelp.py b/hgext/githelp.py --- a/hgext/githelp.py +++ b/hgext/githelp.py @@ -67,7 +67,7 @@ def githelp(ui, repo, *args, **kwargs): cmd = args[0] if not cmd in gitcommands: - raise error.Abort("error: unknown git command %s" % (cmd)) + raise error.Abort(_("error: unknown git command %s") % (cmd)) ui.pager('githelp') args = args[1:] @@ -90,14 +90,13 @@ def parseoptions(ui, cmdoptions, args): elif ('-' + ex.opt) in ex.msg: flag = '-' + ex.opt else: - raise error.Abort("unknown option %s" % ex.opt) + raise error.Abort(_("unknown option %s") % ex.opt) try: args.remove(flag) except Exception: - raise error.Abort( - "unknown option {0} packed with other options\n" - "Please try passing the option as it's own flag: -{0}" \ - .format(ex.opt)) + msg = _("unknown option '%s' packed with other options") + hint = _("please try passing the option as its own flag: -%s") + raise error.Abort(msg % ex.opt, hint=hint % ex.opt) ui.warn(_("ignoring unknown option %s\n") % flag) @@ -171,7 +170,7 @@ def add(ui, repo, *args, **kwargs): cmd.extend(args) else: ui.status(_("note: use hg addremove to remove files that have " - "been deleted.\n\n")) + "been deleted\n\n")) ui.status((bytes(cmd)), "\n") @@ -196,7 +195,7 @@ def apply(ui, repo, *args, **kwargs): ui.status((bytes(cmd)), "\n") def bisect(ui, repo, *args, **kwargs): - ui.status(_("See 'hg help bisect' for how to use bisect.\n\n")) + ui.status(_("see 'hg help bisect' for how to use bisect\n\n")) def blame(ui, repo, *args, **kwargs): cmdoptions = [ @@ -236,6 +235,8 @@ def branch(ui, repo, *args, **kwargs): # shell command to output the active bookmark for the active # revision old = '`hg log -T"{activebookmark}" -r .`' + else: + raise error.Abort(_('missing newbranch argument')) new = args[0] cmd['-m'] = old cmd.append(new) @@ -334,7 +335,7 @@ def checkout(ui, repo, *args, **kwargs): cmd = Command('revert') cmd['--all'] = None else: - raise error.Abort("a commit must be specified") + raise error.Abort(_("a commit must be specified")) ui.status((bytes(cmd)), "\n") @@ -353,7 +354,7 @@ def cherrypick(ui, repo, *args, **kwargs if opts.get('continue'): cmd['--continue'] = None elif opts.get('abort'): - ui.status(_("note: hg graft does not have --abort.\n\n")) + ui.status(_("note: hg graft does not have --abort\n\n")) return else: cmd.extend(args) @@ -384,7 +385,7 @@ def clone(ui, repo, *args, **kwargs): args, opts = parseoptions(ui, cmdoptions, args) if len(args) == 0: - raise error.Abort("a repository to clone must be specified") + raise error.Abort(_("a repository to clone must be specified")) cmd = Command('clone') cmd.append(args[0]) @@ -393,8 +394,8 @@ def clone(ui, repo, *args, **kwargs): if opts.get('bare'): cmd['-U'] = None - ui.status(_("note: Mercurial does not have bare clones. " + - "-U will clone the repo without checking out a commit\n\n")) + ui.status(_("note: Mercurial does not have bare clones. " + "-U will clone the repo without checking out a commit\n\n")) elif opts.get('no_checkout'): cmd['-U'] = None @@ -436,9 +437,9 @@ def commit(ui, repo, *args, **kwargs): cmd['-m'] = "'%s'" % (opts.get('message'),) if opts.get('all'): - ui.status(_("note: Mercurial doesn't have a staging area, " + - "so there is no --all. -A will add and remove files " + - "for you though.\n\n")) + ui.status(_("note: Mercurial doesn't have a staging area, " + "so there is no --all. -A will add and remove files " + "for you though.\n\n")) if opts.get('file'): cmd['-l'] = opts.get('file') @@ -454,8 +455,8 @@ def commit(ui, repo, *args, **kwargs): ui.status((bytes(cmd)), "\n") def deprecated(ui, repo, *args, **kwargs): - ui.warn(_('This command has been deprecated in the git project, ' + - 'thus isn\'t supported by this tool.\n\n')) + ui.warn(_('this command has been deprecated in the git project, ' + 'thus isn\'t supported by this tool\n\n')) def diff(ui, repo, *args, **kwargs): cmdoptions = [ @@ -468,8 +469,8 @@ def diff(ui, repo, *args, **kwargs): cmd = Command('diff') if opts.get('cached'): - ui.status(_('note: Mercurial has no concept of a staging area, ' + - 'so --cached does nothing.\n\n')) + ui.status(_('note: Mercurial has no concept of a staging area, ' + 'so --cached does nothing\n\n')) if opts.get('reverse'): cmd['--reverse'] = None @@ -505,10 +506,10 @@ def fetch(ui, repo, *args, **kwargs): if len(args) > 0: cmd.append(args[0]) if len(args) > 1: - ui.status(_("note: Mercurial doesn't have refspecs. " + - "-r can be used to specify which commits you want to pull. " + - "-B can be used to specify which bookmark you want to pull." + - "\n\n")) + ui.status(_("note: Mercurial doesn't have refspecs. " + "-r can be used to specify which commits you want to " + "pull. -B can be used to specify which bookmark you " + "want to pull.\n\n")) for v in args[1:]: if v in repo._bookmarks: cmd['-B'] = v @@ -556,10 +557,10 @@ def log(ui, repo, *args, **kwargs): ('p', 'patch', None, ''), ] args, opts = parseoptions(ui, cmdoptions, args) - ui.status(_('note: -v prints the entire commit message like Git does. To ' + - 'print just the first line, drop the -v.\n\n')) - ui.status(_("note: see hg help revset for information on how to filter " + - "log output.\n\n")) + ui.status(_('note: -v prints the entire commit message like Git does. To ' + 'print just the first line, drop the -v.\n\n')) + ui.status(_("note: see hg help revset for information on how to filter " + "log output\n\n")) cmd = Command('log') cmd['-v'] = None @@ -578,13 +579,13 @@ def log(ui, repo, *args, **kwargs): if opts.get('pretty') or opts.get('format') or opts.get('oneline'): format = opts.get('format', '') if 'format:' in format: - ui.status(_("note: --format format:??? equates to Mercurial's " + - "--template. See hg help templates for more info.\n\n")) + ui.status(_("note: --format format:??? equates to Mercurial's " + "--template. See hg help templates for more info.\n\n")) cmd['--template'] = '???' else: - ui.status(_("note: --pretty/format/oneline equate to Mercurial's " + - "--style or --template. See hg help templates for more info." + - "\n\n")) + ui.status(_("note: --pretty/format/oneline equate to Mercurial's " + "--style or --template. See hg help templates for " + "more info.\n\n")) cmd['--style'] = '???' if len(args) > 0: @@ -654,8 +655,8 @@ def mergebase(ui, repo, *args, **kwargs) cmd = Command("log -T '{node}\\n' -r 'ancestor(%s,%s)'" % (args[0], args[1])) - ui.status(_('NOTE: ancestors() is part of the revset language.\n'), - _("Learn more about revsets with 'hg help revsets'\n\n")) + ui.status(_('note: ancestors() is part of the revset language\n'), + _("(learn more about revsets with 'hg help revsets')\n\n")) ui.status((bytes(cmd)), "\n") def mergetool(ui, repo, *args, **kwargs): @@ -697,10 +698,10 @@ def pull(ui, repo, *args, **kwargs): if len(args) > 0: cmd.append(args[0]) if len(args) > 1: - ui.status(_("note: Mercurial doesn't have refspecs. " + - "-r can be used to specify which commits you want to pull. " + - "-B can be used to specify which bookmark you want to pull." + - "\n\n")) + ui.status(_("note: Mercurial doesn't have refspecs. " + "-r can be used to specify which commits you want to " + "pull. -B can be used to specify which bookmark you " + "want to pull.\n\n")) for v in args[1:]: if v in repo._bookmarks: cmd['-B'] = v @@ -721,10 +722,10 @@ def push(ui, repo, *args, **kwargs): if len(args) > 0: cmd.append(args[0]) if len(args) > 1: - ui.status(_("note: Mercurial doesn't have refspecs. " + - "-r can be used to specify which commits you want to push. " + - "-B can be used to specify which bookmark you want to push." + - "\n\n")) + ui.status(_("note: Mercurial doesn't have refspecs. " + "-r can be used to specify which commits you want " + "to push. -B can be used to specify which bookmark " + "you want to push.\n\n")) for v in args[1:]: if v in repo._bookmarks: cmd['-B'] = v @@ -748,12 +749,12 @@ def rebase(ui, repo, *args, **kwargs): args, opts = parseoptions(ui, cmdoptions, args) if opts.get('interactive'): - ui.status(_("note: hg histedit does not perform a rebase. " + - "It just edits history.\n\n")) + ui.status(_("note: hg histedit does not perform a rebase. " + "It just edits history.\n\n")) cmd = Command('histedit') if len(args) > 0: ui.status(_("also note: 'hg histedit' will automatically detect" - " your stack, so no second argument is necessary.\n\n")) + " your stack, so no second argument is necessary\n\n")) ui.status((bytes(cmd)), "\n") return @@ -769,12 +770,12 @@ def rebase(ui, repo, *args, **kwargs): cmd['--abort'] = None if opts.get('onto'): - ui.status(_("note: if you're trying to lift a commit off one branch, " + - "try hg rebase -d -s " + - "\n\n")) + ui.status(_("note: if you're trying to lift a commit off one branch, " + "try hg rebase -d -s \n\n")) cmd['-d'] = convert(opts.get('onto')) if len(args) < 2: - raise error.Abort("Expected format: git rebase --onto X Y Z") + raise error.Abort(_("expected format: git rebase --onto X Y Z")) cmd['-s'] = "'::%s - ::%s'" % (convert(args[1]), convert(args[0])) else: if len(args) == 1: @@ -799,7 +800,7 @@ def reflog(ui, repo, *args, **kwargs): ui.status(bytes(cmd), "\n\n") ui.status(_("note: in hg commits can be deleted from repo but we always" - " have backups.\n")) + " have backups\n")) def reset(ui, repo, *args, **kwargs): cmdoptions = [ @@ -813,10 +814,10 @@ def reset(ui, repo, *args, **kwargs): hard = opts.get('hard') if opts.get('mixed'): - ui.status(_('NOTE: --mixed has no meaning since Mercurial has no ' + ui.status(_('note: --mixed has no meaning since Mercurial has no ' 'staging area\n\n')) if opts.get('soft'): - ui.status(_('NOTE: --soft has no meaning since Mercurial has no ' + ui.status(_('note: --soft has no meaning since Mercurial has no ' 'staging area\n\n')) cmd = Command('update') @@ -833,7 +834,7 @@ def revert(ui, repo, *args, **kwargs): args, opts = parseoptions(ui, cmdoptions, args) if len(args) > 1: - ui.status(_("note: hg backout doesn't support multiple commits at " + + ui.status(_("note: hg backout doesn't support multiple commits at " "once\n\n")) cmd = Command('backout') @@ -930,8 +931,8 @@ def stash(ui, repo, *args, **kwargs): cmd['--keep'] = None elif (action == 'branch' or action == 'show' or action == 'clear' or action == 'create'): - ui.status(_("note: Mercurial doesn't have equivalents to the " + - "git stash branch, show, clear, or create actions.\n\n")) + ui.status(_("note: Mercurial doesn't have equivalents to the " + "git stash branch, show, clear, or create actions\n\n")) return else: if len(args) > 0: @@ -957,9 +958,11 @@ def status(ui, repo, *args, **kwargs): ui.status((bytes(cmd)), "\n") def svn(ui, repo, *args, **kwargs): + if not args: + raise error.Abort(_('missing svn command')) svncmd = args[0] - if not svncmd in gitsvncommands: - ui.warn(_("error: unknown git svn command %s\n") % (svncmd)) + if svncmd not in gitsvncommands: + raise error.Abort(_('unknown git svn command "%s"') % (svncmd)) args = args[1:] return gitsvncommands[svncmd](ui, repo, *args, **kwargs) @@ -988,6 +991,9 @@ def svnfindrev(ui, repo, *args, **kwargs ] args, opts = parseoptions(ui, cmdoptions, args) + if not args: + raise error.Abort(_('missing find-rev argument')) + cmd = Command('log') cmd['-r'] = args[0] @@ -1020,6 +1026,10 @@ def tag(ui, repo, *args, **kwargs): cmd = Command('tags') else: cmd = Command('tag') + + if not args: + raise error.Abort(_('missing tag argument')) + cmd.append(args[0]) if len(args) > 1: cmd['-r'] = args[1] diff --git a/hgext/gpg.py b/hgext/gpg.py --- a/hgext/gpg.py +++ b/hgext/gpg.py @@ -9,7 +9,6 @@ from __future__ import absolute_import import binascii import os -import tempfile from mercurial.i18n import _ from mercurial import ( @@ -61,11 +60,11 @@ class gpg(object): sigfile = datafile = None try: # create temporary files - fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig") + fd, sigfile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".sig") fp = os.fdopen(fd, r'wb') fp.write(sig) fp.close() - fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt") + fd, datafile = pycompat.mkstemp(prefix="hg-gpg-", suffix=".txt") fp = os.fdopen(fd, r'wb') fp.write(data) fp.close() diff --git a/hgext/highlight/__init__.py b/hgext/highlight/__init__.py --- a/hgext/highlight/__init__.py +++ b/hgext/highlight/__init__.py @@ -36,7 +36,6 @@ from mercurial.hgweb import ( from mercurial import ( extensions, - fileset, ) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for @@ -51,9 +50,8 @@ def pygmentize(web, field, fctx, tmpl): filenameonly = web.configbool('web', 'highlightonlymatchfilename', False) ctx = fctx.changectx() - tree = fileset.parse(expr) - mctx = fileset.matchctx(ctx, subset=[fctx.path()], status=None) - if fctx.path() in fileset.getset(mctx, tree): + m = ctx.matchfileset(expr) + if m(fctx.path()): highlight.pygmentize(field, fctx, style, tmpl, guessfilenameonly=filenameonly) diff --git a/hgext/highlight/highlight.py b/hgext/highlight/highlight.py --- a/hgext/highlight/highlight.py +++ b/hgext/highlight/highlight.py @@ -11,7 +11,7 @@ from __future__ import absolute_import from mercurial import demandimport -demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__']) +demandimport.IGNORES.update(['pkgutil', 'pkg_resources', '__main__']) from mercurial import ( encoding, @@ -44,7 +44,8 @@ SYNTAX_CSS = ('\n to the syntax highlighting css - old_header = tmpl.load('header') + tmpl.load('header') + old_header = tmpl.cache['header'] if SYNTAX_CSS not in old_header: new_header = old_header + SYNTAX_CSS tmpl.cache['header'] = new_header @@ -89,7 +90,7 @@ def pygmentize(field, fctx, style, tmpl, coloriter = (s.encode(encoding.encoding, 'replace') for s in colorized.splitlines()) - tmpl.filters['colorize'] = lambda x: next(coloriter) + tmpl._filters['colorize'] = lambda x: next(coloriter) oldl = tmpl.cache[field] newl = oldl.replace('line|escape', 'line|colorize') diff --git a/hgext/histedit.py b/hgext/histedit.py --- a/hgext/histedit.py +++ b/hgext/histedit.py @@ -183,7 +183,6 @@ unexpectedly:: from __future__ import absolute_import -import errno import os from mercurial.i18n import _ @@ -207,6 +206,7 @@ from mercurial import ( registrar, repair, scmutil, + state as statemod, util, ) from mercurial.utils import ( @@ -304,6 +304,7 @@ class histeditstate(object): self.lock = lock self.wlock = wlock self.backupfile = None + self.stateobj = statemod.cmdstate(repo, 'histedit-state') if replacements is None: self.replacements = [] else: @@ -311,29 +312,33 @@ class histeditstate(object): def read(self): """Load histedit state from disk and set fields appropriately.""" - try: - state = self.repo.vfs.read('histedit-state') - except IOError as err: - if err.errno != errno.ENOENT: - raise + if not self.stateobj.exists(): cmdutil.wrongtooltocontinue(self.repo, _('histedit')) - if state.startswith('v1\n'): + data = self._read() + + self.parentctxnode = data['parentctxnode'] + actions = parserules(data['rules'], self) + self.actions = actions + self.keep = data['keep'] + self.topmost = data['topmost'] + self.replacements = data['replacements'] + self.backupfile = data['backupfile'] + + def _read(self): + fp = self.repo.vfs.read('histedit-state') + if fp.startswith('v1\n'): data = self._load() parentctxnode, rules, keep, topmost, replacements, backupfile = data else: - data = pickle.loads(state) + data = pickle.loads(fp) parentctxnode, rules, keep, topmost, replacements = data backupfile = None + rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules]) - self.parentctxnode = parentctxnode - rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules]) - actions = parserules(rules, self) - self.actions = actions - self.keep = keep - self.topmost = topmost - self.replacements = replacements - self.backupfile = backupfile + return {'parentctxnode': parentctxnode, "rules": rules, "keep": keep, + "topmost": topmost, "replacements": replacements, + "backupfile": backupfile} def write(self, tr=None): if tr: @@ -779,9 +784,7 @@ class fold(histeditaction): def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): parent = ctx.parents()[0].node() - repo.ui.pushbuffer() - hg.update(repo, parent) - repo.ui.popbuffer() + hg.updaterepo(repo, parent, overwrite=False) ### prepare new commit data commitopts = {} commitopts['user'] = ctx.user() @@ -812,9 +815,7 @@ class fold(histeditaction): skipprompt=self.skipprompt()) if n is None: return ctx, [] - repo.ui.pushbuffer() - hg.update(repo, n) - repo.ui.popbuffer() + hg.updaterepo(repo, n, overwrite=False) replacements = [(oldctx.node(), (newnode,)), (ctx.node(), (n,)), (newnode, (n,)), @@ -1109,6 +1110,8 @@ def _histedit(ui, repo, state, *freeargs fm.startitem() goal = _getgoal(opts) revs = opts.get('rev', []) + # experimental config: ui.history-editing-backup + nobackup = not ui.configbool('ui', 'history-editing-backup') rules = opts.get('commands', '') state.keep = opts.get('keep', False) @@ -1122,7 +1125,7 @@ def _histedit(ui, repo, state, *freeargs _edithisteditplan(ui, repo, state, rules) return elif goal == goalabort: - _aborthistedit(ui, repo, state) + _aborthistedit(ui, repo, state, nobackup=nobackup) return else: # goal == goalnew @@ -1149,8 +1152,6 @@ def _continuehistedit(ui, repo, state): # even if there's an exception before the first transaction serialize. state.write() - total = len(state.actions) - pos = 0 tr = None # Don't use singletransaction by default since it rolls the entire # transaction back if an unexpected exception happens (like a @@ -1160,13 +1161,13 @@ def _continuehistedit(ui, repo, state): # and reopen a transaction. For example, if the action executes an # external process it may choose to commit the transaction first. tr = repo.transaction('histedit') - with util.acceptintervention(tr): + progress = ui.makeprogress(_("editing"), unit=_('changes'), + total=len(state.actions)) + with progress, util.acceptintervention(tr): while state.actions: state.write(tr=tr) actobj = state.actions[0] - pos += 1 - ui.progress(_("editing"), pos, actobj.torule(), - _('changes'), total) + progress.increment(item=actobj.torule()) ui.debug('histedit: processing %s %s\n' % (actobj.verb,\ actobj.torule())) parentctx, replacement_ = actobj.run() @@ -1175,13 +1176,10 @@ def _continuehistedit(ui, repo, state): state.actions.pop(0) state.write() - ui.progress(_("editing"), None) def _finishhistedit(ui, repo, state, fm): """This action runs when histedit is finishing its session""" - repo.ui.pushbuffer() - hg.update(repo, state.parentctxnode, quietempty=True) - repo.ui.popbuffer() + hg.updaterepo(repo, state.parentctxnode, overwrite=False) mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: @@ -1225,7 +1223,7 @@ def _finishhistedit(ui, repo, state, fm) if repo.vfs.exists('histedit-last-edit.txt'): repo.vfs.unlink('histedit-last-edit.txt') -def _aborthistedit(ui, repo, state): +def _aborthistedit(ui, repo, state, nobackup=False): try: state.read() __, leafs, tmpnodes, __ = processreplacement(state) @@ -1247,8 +1245,8 @@ def _aborthistedit(ui, repo, state): if repo.unfiltered().revs('parents() and (%n or %ln::)', state.parentctxnode, leafs | tmpnodes): hg.clean(repo, state.topmost, show_stats=True, quietempty=True) - cleanupnode(ui, repo, tmpnodes) - cleanupnode(ui, repo, leafs) + cleanupnode(ui, repo, tmpnodes, nobackup=nobackup) + cleanupnode(ui, repo, leafs, nobackup=nobackup) except Exception: if state.inprogress(): ui.warn(_('warning: encountered an exception during histedit ' @@ -1605,7 +1603,7 @@ def movetopmostbookmarks(repo, oldtopmos changes.append((name, newtopmost)) marks.applychanges(repo, tr, changes) -def cleanupnode(ui, repo, nodes): +def cleanupnode(ui, repo, nodes, nobackup=False): """strip a group of nodes from the repository The set of node to strip may contains unknown nodes.""" @@ -1620,7 +1618,8 @@ def cleanupnode(ui, repo, nodes): nodes = sorted(n for n in nodes if n in nm) roots = [c.node() for c in repo.set("roots(%ln)", nodes)] if roots: - repair.strip(ui, repo, roots) + backup = not nobackup + repair.strip(ui, repo, roots, backup=backup) def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs): if isinstance(nodelist, str): diff --git a/hgext/infinitepush/__init__.py b/hgext/infinitepush/__init__.py --- a/hgext/infinitepush/__init__.py +++ b/hgext/infinitepush/__init__.py @@ -94,7 +94,6 @@ import random import re import socket import subprocess -import tempfile import time from mercurial.node import ( @@ -565,19 +564,19 @@ def _lookupwrap(orig): if isinstance(localkey, str) and _scratchbranchmatcher(localkey): scratchnode = repo.bundlestore.index.getnode(localkey) if scratchnode: - return "%s %s\n" % (1, scratchnode) + return "%d %s\n" % (1, scratchnode) else: - return "%s %s\n" % (0, 'scratch branch %s not found' % localkey) + return "%d %s\n" % (0, 'scratch branch %s not found' % localkey) else: try: r = hex(repo.lookup(localkey)) - return "%s %s\n" % (1, r) + return "%d %s\n" % (1, r) except Exception as inst: if repo.bundlestore.index.getbundle(localkey): - return "%s %s\n" % (1, localkey) + return "%d %s\n" % (1, localkey) else: - r = str(inst) - return "%s %s\n" % (0, r) + r = stringutil.forcebytestr(inst) + return "%d %s\n" % (0, r) return _lookup def _pull(orig, ui, repo, source="default", **opts): @@ -912,7 +911,7 @@ def storetobundlestore(orig, repo, op, u # storing the bundle in the bundlestore buf = util.chunkbuffer(bundler.getchunks()) - fd, bundlefile = tempfile.mkstemp() + fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, r'wb') @@ -998,7 +997,7 @@ def processparts(orig, repo, op, unbundl # If commits were sent, store them if cgparams: buf = util.chunkbuffer(bundler.getchunks()) - fd, bundlefile = tempfile.mkstemp() + fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, r'wb') @@ -1110,7 +1109,7 @@ def bundle2scratchbranch(op, part): bundler.addpart(cgpart) buf = util.chunkbuffer(bundler.getchunks()) - fd, bundlefile = tempfile.mkstemp() + fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, r'wb') diff --git a/hgext/infinitepush/common.py b/hgext/infinitepush/common.py --- a/hgext/infinitepush/common.py +++ b/hgext/infinitepush/common.py @@ -6,13 +6,13 @@ from __future__ import absolute_import import os -import tempfile from mercurial.node import hex from mercurial import ( error, extensions, + pycompat, ) def isremotebooksenabled(ui): @@ -30,7 +30,7 @@ def downloadbundle(repo, unknownbinhead) def _makebundlefromraw(data): fp = None - fd, bundlefile = tempfile.mkstemp() + fd, bundlefile = pycompat.mkstemp() try: # guards bundlefile try: # guards fp fp = os.fdopen(fd, 'wb') diff --git a/hgext/infinitepush/store.py b/hgext/infinitepush/store.py --- a/hgext/infinitepush/store.py +++ b/hgext/infinitepush/store.py @@ -120,6 +120,8 @@ class externalbundlestore(abstractbundle def write(self, data): # Won't work on windows because you can't open file second time without # closing it + # TODO: rewrite without str.format() and replace NamedTemporaryFile() + # with pycompat.namedtempfile() with NamedTemporaryFile() as temp: temp.write(data) temp.flush() @@ -142,6 +144,8 @@ class externalbundlestore(abstractbundle def read(self, handle): # Won't work on windows because you can't open file second time without # closing it + # TODO: rewrite without str.format() and replace NamedTemporaryFile() + # with pycompat.namedtempfile() with NamedTemporaryFile() as temp: formatted_args = [arg.format(filename=temp.name, handle=handle) for arg in self.get_args] diff --git a/hgext/keyword.py b/hgext/keyword.py --- a/hgext/keyword.py +++ b/hgext/keyword.py @@ -87,7 +87,6 @@ from __future__ import absolute_import import os import re -import tempfile import weakref from mercurial.i18n import _ @@ -246,7 +245,7 @@ class kwtemplater(object): @util.propertycache def escape(self): '''Returns bar-separated and escaped keywords.''' - return '|'.join(map(re.escape, self.templates.keys())) + return '|'.join(map(stringutil.reescape, self.templates.keys())) @util.propertycache def rekw(self): @@ -434,7 +433,7 @@ def demo(ui, repo, *args, **opts): ui.write('%s = %s\n' % (k, v)) fn = 'demo.txt' - tmpdir = tempfile.mkdtemp('', 'kwdemo.') + tmpdir = pycompat.mkdtemp('', 'kwdemo.') ui.note(_('creating temporary repository at %s\n') % tmpdir) if repo is None: baseui = ui diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py --- a/hgext/largefiles/basestore.py +++ b/hgext/largefiles/basestore.py @@ -62,9 +62,10 @@ class basestore(object): at = 0 available = self.exists(set(hash for (_filename, hash) in files)) + progress = ui.makeprogress(_('getting largefiles'), unit=_('files'), + total=len(files)) for filename, hash in files: - ui.progress(_('getting largefiles'), at, unit=_('files'), - total=len(files)) + progress.update(at) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash)) @@ -79,7 +80,7 @@ class basestore(object): else: missing.append(filename) - ui.progress(_('getting largefiles'), None) + progress.complete() return (success, missing) def _gethash(self, filename, hash): diff --git a/hgext/largefiles/lfcommands.py b/hgext/largefiles/lfcommands.py --- a/hgext/largefiles/lfcommands.py +++ b/hgext/largefiles/lfcommands.py @@ -118,12 +118,14 @@ def lfconvert(ui, src, dest, *pats, **op matcher = None lfiletohash = {} + progress = ui.makeprogress(_('converting revisions'), + unit=_('revisions'), + total=rsrc['tip'].rev()) for ctx in ctxs: - ui.progress(_('converting revisions'), ctx.rev(), - unit=_('revisions'), total=rsrc['tip'].rev()) + progress.update(ctx.rev()) _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash) - ui.progress(_('converting revisions'), None) + progress.complete() if rdst.wvfs.exists(lfutil.shortname): rdst.wvfs.rmtree(lfutil.shortname) @@ -368,9 +370,10 @@ def uploadlfiles(ui, rsrc, rdst, files): files = [h for h in files if not retval[h]] ui.debug("%d largefiles need to be uploaded\n" % len(files)) + progress = ui.makeprogress(_('uploading largefiles'), unit=_('files'), + total=len(files)) for hash in files: - ui.progress(_('uploading largefiles'), at, unit=_('files'), - total=len(files)) + progress.update(at) source = lfutil.findfile(rsrc, hash) if not source: raise error.Abort(_('largefile %s missing from store' @@ -378,7 +381,7 @@ def uploadlfiles(ui, rsrc, rdst, files): # XXX check for errors here store.put(source, hash) at += 1 - ui.progress(_('uploading largefiles'), None) + progress.complete() def verifylfiles(ui, repo, all=False, contents=False): '''Verify that every largefile revision in the current changeset diff --git a/hgext/largefiles/lfutil.py b/hgext/largefiles/lfutil.py --- a/hgext/largefiles/lfutil.py +++ b/hgext/largefiles/lfutil.py @@ -501,9 +501,10 @@ def getlfilestoupdate(oldstandins, newst return filelist def getlfilestoupload(repo, missing, addfunc): + progress = repo.ui.makeprogress(_('finding outgoing largefiles'), + unit=_('revisions'), total=len(missing)) for i, n in enumerate(missing): - repo.ui.progress(_('finding outgoing largefiles'), i, - unit=_('revisions'), total=len(missing)) + progress.update(i) parents = [p for p in repo[n].parents() if p != node.nullid] oldlfstatus = repo.lfstatus @@ -530,7 +531,7 @@ def getlfilestoupload(repo, missing, add for fn in files: if isstandin(fn) and fn in ctx: addfunc(fn, readasstandin(ctx[fn])) - repo.ui.progress(_('finding outgoing largefiles'), None) + progress.complete() def updatestandinsbymatch(repo, match): '''Update standins in the working directory according to specified match diff --git a/hgext/lfs/__init__.py b/hgext/lfs/__init__.py --- a/hgext/lfs/__init__.py +++ b/hgext/lfs/__init__.py @@ -362,8 +362,10 @@ def lfsfileset(mctx, x): """File that uses LFS storage.""" # i18n: "lfs" is a keyword fileset.getargs(x, 0, 0, _("lfs takes no arguments")) - return [f for f in mctx.subset - if wrapper.pointerfromctx(mctx.ctx, f, removed=True) is not None] + ctx = mctx.ctx + def lfsfilep(f): + return wrapper.pointerfromctx(ctx, f, removed=True) is not None + return mctx.predicate(lfsfilep, predrepr='') @templatekeyword('lfs_files', requires={'ctx'}) def lfsfiles(context, mapping): diff --git a/hgext/lfs/blobstore.py b/hgext/lfs/blobstore.py --- a/hgext/lfs/blobstore.py +++ b/hgext/lfs/blobstore.py @@ -405,7 +405,8 @@ class _gitlfsremote(object): if len(objects) > 1: self.ui.note(_('lfs: need to transfer %d objects (%s)\n') % (len(objects), util.bytecount(total))) - self.ui.progress(topic, 0, total=total) + progress = self.ui.makeprogress(topic, total=total) + progress.update(0) def transfer(chunk): for obj in chunk: objsize = obj.get('size', 0) @@ -443,9 +444,9 @@ class _gitlfsremote(object): for _one, oid in oids: processed += sizes[oid] blobs += 1 - self.ui.progress(topic, processed, total=total) + progress.update(processed) self.ui.note(_('lfs: processed: %s\n') % oid) - self.ui.progress(topic, pos=None, total=total) + progress.complete() if blobs > 0: if action == 'upload': diff --git a/hgext/lfs/pointer.py b/hgext/lfs/pointer.py --- a/hgext/lfs/pointer.py +++ b/hgext/lfs/pointer.py @@ -15,6 +15,9 @@ from mercurial import ( error, pycompat, ) +from mercurial.utils import ( + stringutil, +) class InvalidPointer(error.RevlogError): pass @@ -32,7 +35,8 @@ class gitlfspointer(dict): try: return cls(l.split(' ', 1) for l in text.splitlines()).validate() except ValueError: # l.split returns 1 item instead of 2 - raise InvalidPointer(_('cannot parse git-lfs text: %r') % text) + raise InvalidPointer(_('cannot parse git-lfs text: %s') + % stringutil.pprint(text)) def serialize(self): sortkeyfunc = lambda x: (x[0] != 'version', x) @@ -52,7 +56,7 @@ class gitlfspointer(dict): _requiredre = { 'size': re.compile(br'\A[0-9]+\Z'), 'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'), - 'version': re.compile(br'\A%s\Z' % re.escape(VERSION)), + 'version': re.compile(br'\A%s\Z' % stringutil.reescape(VERSION)), } def validate(self): @@ -61,15 +65,19 @@ class gitlfspointer(dict): for k, v in self.iteritems(): if k in self._requiredre: if not self._requiredre[k].match(v): - raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) + raise InvalidPointer( + _('unexpected lfs pointer value: %s=%s') + % (k, stringutil.pprint(v))) requiredcount += 1 elif not self._keyre.match(k): - raise InvalidPointer(_('unexpected key: %s') % k) + raise InvalidPointer(_('unexpected lfs pointer key: %s') % k) if not self._valuere.match(v): - raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) + raise InvalidPointer(_('unexpected lfs pointer value: %s=%s') + % (k, stringutil.pprint(v))) if len(self._requiredre) != requiredcount: miss = sorted(set(self._requiredre.keys()).difference(self.keys())) - raise InvalidPointer(_('missed keys: %s') % ', '.join(miss)) + raise InvalidPointer(_('missing lfs pointer keys: %s') + % ', '.join(miss)) return self deserialize = gitlfspointer.deserialize diff --git a/hgext/mq.py b/hgext/mq.py --- a/hgext/mq.py +++ b/hgext/mq.py @@ -492,7 +492,8 @@ class queue(object): n, name = entry yield statusentry(bin(n), name) elif l.strip(): - self.ui.warn(_('malformated mq status line: %s\n') % entry) + self.ui.warn(_('malformated mq status line: %s\n') % + stringutil.pprint(entry)) # else we ignore empty lines try: lines = self.opener.read(self.statuspath).splitlines() @@ -2872,7 +2873,7 @@ def guard(ui, repo, *args, **opts): patch = None args = list(args) if opts.get(r'list'): - if args or opts.get('none'): + if args or opts.get(r'none'): raise error.Abort(_('cannot mix -l/--list with options or ' 'arguments')) for i in xrange(len(q.series)): @@ -2886,7 +2887,7 @@ def guard(ui, repo, *args, **opts): patch = args.pop(0) if patch is None: raise error.Abort(_('no patch to work with')) - if args or opts.get('none'): + if args or opts.get(r'none'): idx = q.findseries(patch) if idx is None: raise error.Abort(_('no patch named %s') % patch) diff --git a/hgext/narrow/__init__.py b/hgext/narrow/__init__.py --- a/hgext/narrow/__init__.py +++ b/hgext/narrow/__init__.py @@ -28,8 +28,6 @@ from . import ( narrowchangegroup, narrowcommands, narrowcopies, - narrowdirstate, - narrowmerge, narrowpatch, narrowrepo, narrowrevlog, @@ -64,7 +62,6 @@ def uisetup(ui): localrepo.featuresetupfuncs.add(featuresetup) narrowrevlog.setup() narrowbundle2.setup() - narrowmerge.setup() narrowcommands.setup() narrowchangegroup.setup() narrowwirepeer.uisetup() @@ -74,10 +71,9 @@ def reposetup(ui, repo): if not repo.local(): return - narrowrepo.wraprepo(repo) if changegroup.NARROW_REQUIREMENT in repo.requirements: + narrowrepo.wraprepo(repo) narrowcopies.setup(repo) - narrowdirstate.setup(repo) narrowpatch.setup(repo) narrowwirepeer.reposetup(repo) diff --git a/hgext/narrow/narrowbundle2.py b/hgext/narrow/narrowbundle2.py --- a/hgext/narrow/narrowbundle2.py +++ b/hgext/narrow/narrowbundle2.py @@ -408,6 +408,8 @@ def _handlechangespec(op, inpart): topic='widen') repo._bookmarks = bmstore if chgrpfile: + op._widen_uninterr = repo.ui.uninterruptable() + op._widen_uninterr.__enter__() # presence of _widen_bundle attribute activates widen handler later op._widen_bundle = chgrpfile # Set the new narrowspec if we're widening. The setnewnarrowpats() method @@ -455,6 +457,7 @@ def handlechangegroup_widen(op, inpart): (undovfs.join(undofile), stringutil.forcebytestr(e))) # Remove partial backup only if there were no exceptions + op._widen_uninterr.__exit__(None, None, None) vfs.unlink(chgrpfile) def setup(): diff --git a/hgext/narrow/narrowcommands.py b/hgext/narrow/narrowcommands.py --- a/hgext/narrow/narrowcommands.py +++ b/hgext/narrow/narrowcommands.py @@ -203,50 +203,51 @@ def _narrow(ui, repo, remote, commoninc, hint=_('use --force-delete-local-changes to ' 'ignore')) - if revstostrip: - tostrip = [unfi.changelog.node(r) for r in revstostrip] - if repo['.'].node() in tostrip: - # stripping working copy, so move to a different commit first - urev = max(repo.revs('(::%n) - %ln + null', - repo['.'].node(), visibletostrip)) - hg.clean(repo, urev) - repair.strip(ui, unfi, tostrip, topic='narrow') + with ui.uninterruptable(): + if revstostrip: + tostrip = [unfi.changelog.node(r) for r in revstostrip] + if repo['.'].node() in tostrip: + # stripping working copy, so move to a different commit first + urev = max(repo.revs('(::%n) - %ln + null', + repo['.'].node(), visibletostrip)) + hg.clean(repo, urev) + repair.strip(ui, unfi, tostrip, topic='narrow') - todelete = [] - for f, f2, size in repo.store.datafiles(): - if f.startswith('data/'): - file = f[5:-2] - if not newmatch(file): - todelete.append(f) - elif f.startswith('meta/'): - dir = f[5:-13] - dirs = ['.'] + sorted(util.dirs({dir})) + [dir] - include = True - for d in dirs: - visit = newmatch.visitdir(d) - if not visit: - include = False - break - if visit == 'all': - break - if not include: - todelete.append(f) + todelete = [] + for f, f2, size in repo.store.datafiles(): + if f.startswith('data/'): + file = f[5:-2] + if not newmatch(file): + todelete.append(f) + elif f.startswith('meta/'): + dir = f[5:-13] + dirs = ['.'] + sorted(util.dirs({dir})) + [dir] + include = True + for d in dirs: + visit = newmatch.visitdir(d) + if not visit: + include = False + break + if visit == 'all': + break + if not include: + todelete.append(f) - repo.destroying() + repo.destroying() - with repo.transaction("narrowing"): - for f in todelete: - ui.status(_('deleting %s\n') % f) - util.unlinkpath(repo.svfs.join(f)) - repo.store.markremoved(f) + with repo.transaction("narrowing"): + for f in todelete: + ui.status(_('deleting %s\n') % f) + util.unlinkpath(repo.svfs.join(f)) + repo.store.markremoved(f) - for f in repo.dirstate: - if not newmatch(f): - repo.dirstate.drop(f) - repo.wvfs.unlinkpath(f) - repo.setnarrowpats(newincludes, newexcludes) + for f in repo.dirstate: + if not newmatch(f): + repo.dirstate.drop(f) + repo.wvfs.unlinkpath(f) + repo.setnarrowpats(newincludes, newexcludes) - repo.destroyed() + repo.destroyed() def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): newmatch = narrowspec.match(repo.root, newincludes, newexcludes) @@ -269,28 +270,29 @@ def _widen(ui, repo, remote, commoninc, repo.setnarrowpats(newincludes, newexcludes) repo.setnewnarrowpats = setnewnarrowpats - ds = repo.dirstate - p1, p2 = ds.p1(), ds.p2() - with ds.parentchange(): - ds.setparents(node.nullid, node.nullid) - common = commoninc[0] - with wrappedextraprepare: - exchange.pull(repo, remote, heads=common) - with ds.parentchange(): - ds.setparents(p1, p2) + with ui.uninterruptable(): + ds = repo.dirstate + p1, p2 = ds.p1(), ds.p2() + with ds.parentchange(): + ds.setparents(node.nullid, node.nullid) + common = commoninc[0] + with wrappedextraprepare: + exchange.pull(repo, remote, heads=common) + with ds.parentchange(): + ds.setparents(p1, p2) - actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} - addgaction = actions['g'].append + actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} + addgaction = actions['g'].append - mf = repo['.'].manifest().matches(newmatch) - for f, fn in mf.iteritems(): - if f not in repo.dirstate: - addgaction((f, (mf.flags(f), False), - "add from widened narrow clone")) + mf = repo['.'].manifest().matches(newmatch) + for f, fn in mf.iteritems(): + if f not in repo.dirstate: + addgaction((f, (mf.flags(f), False), + "add from widened narrow clone")) - merge.applyupdates(repo, actions, wctx=repo[None], - mctx=repo['.'], overwrite=False) - merge.recordupdates(repo, actions, branchmerge=False) + merge.applyupdates(repo, actions, wctx=repo[None], + mctx=repo['.'], overwrite=False) + merge.recordupdates(repo, actions, branchmerge=False) # TODO(rdamazio): Make new matcher format and update description @command('tracked', diff --git a/hgext/narrow/narrowdirstate.py b/hgext/narrow/narrowdirstate.py --- a/hgext/narrow/narrowdirstate.py +++ b/hgext/narrow/narrowdirstate.py @@ -9,74 +9,91 @@ from __future__ import absolute_import from mercurial.i18n import _ from mercurial import ( - dirstate, error, - extensions, match as matchmod, narrowspec, util as hgutil, ) -def setup(repo): +def wrapdirstate(repo, dirstate): """Add narrow spec dirstate ignore, block changes outside narrow spec.""" - def walk(orig, self, match, subrepos, unknown, ignored, full=True, - narrowonly=True): - if narrowonly: - # hack to not exclude explicitly-specified paths so that they can - # be warned later on e.g. dirstate.add() - em = matchmod.exact(match._root, match._cwd, match.files()) - nm = matchmod.unionmatcher([repo.narrowmatch(), em]) - match = matchmod.intersectmatchers(match, nm) - return orig(self, match, subrepos, unknown, ignored, full) - - extensions.wrapfunction(dirstate.dirstate, 'walk', walk) - - # Prevent adding files that are outside the sparse checkout - editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge'] - for func in editfuncs: - def _wrapper(orig, self, *args): + def _editfunc(fn): + def _wrapper(self, *args): dirstate = repo.dirstate narrowmatch = repo.narrowmatch() for f in args: if f is not None and not narrowmatch(f) and f not in dirstate: raise error.Abort(_("cannot track '%s' - it is outside " + "the narrow clone") % f) - return orig(self, *args) - extensions.wrapfunction(dirstate.dirstate, func, _wrapper) - - def filterrebuild(orig, self, parent, allfiles, changedfiles=None): - if changedfiles is None: - # Rebuilding entire dirstate, let's filter allfiles to match the - # narrowspec. - allfiles = [f for f in allfiles if repo.narrowmatch()(f)] - orig(self, parent, allfiles, changedfiles) - - extensions.wrapfunction(dirstate.dirstate, 'rebuild', filterrebuild) + return fn(self, *args) + return _wrapper def _narrowbackupname(backupname): assert 'dirstate' in backupname return backupname.replace('dirstate', narrowspec.FILENAME) - def restorebackup(orig, self, tr, backupname): - self._opener.rename(_narrowbackupname(backupname), narrowspec.FILENAME, - checkambig=True) - orig(self, tr, backupname) + class narrowdirstate(dirstate.__class__): + def walk(self, match, subrepos, unknown, ignored, full=True, + narrowonly=True): + if narrowonly: + # hack to not exclude explicitly-specified paths so that they + # can be warned later on e.g. dirstate.add() + em = matchmod.exact(match._root, match._cwd, match.files()) + nm = matchmod.unionmatcher([repo.narrowmatch(), em]) + match = matchmod.intersectmatchers(match, nm) + return super(narrowdirstate, self).walk(match, subrepos, unknown, + ignored, full) - extensions.wrapfunction(dirstate.dirstate, 'restorebackup', restorebackup) + # Prevent adding/editing/copying/deleting files that are outside the + # sparse checkout + @_editfunc + def normal(self, *args): + return super(narrowdirstate, self).normal(*args) - def savebackup(orig, self, tr, backupname): - orig(self, tr, backupname) + @_editfunc + def add(self, *args): + return super(narrowdirstate, self).add(*args) + + @_editfunc + def normallookup(self, *args): + return super(narrowdirstate, self).normallookup(*args) + + @_editfunc + def copy(self, *args): + return super(narrowdirstate, self).copy(*args) - narrowbackupname = _narrowbackupname(backupname) - self._opener.tryunlink(narrowbackupname) - hgutil.copyfile(self._opener.join(narrowspec.FILENAME), - self._opener.join(narrowbackupname), hardlink=True) + @_editfunc + def remove(self, *args): + return super(narrowdirstate, self).remove(*args) + + @_editfunc + def merge(self, *args): + return super(narrowdirstate, self).merge(*args) + + def rebuild(self, parent, allfiles, changedfiles=None): + if changedfiles is None: + # Rebuilding entire dirstate, let's filter allfiles to match the + # narrowspec. + allfiles = [f for f in allfiles if repo.narrowmatch()(f)] + super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles) - extensions.wrapfunction(dirstate.dirstate, 'savebackup', savebackup) + def restorebackup(self, tr, backupname): + self._opener.rename(_narrowbackupname(backupname), + narrowspec.FILENAME, checkambig=True) + super(narrowdirstate, self).restorebackup(tr, backupname) + + def savebackup(self, tr, backupname): + super(narrowdirstate, self).savebackup(tr, backupname) - def clearbackup(orig, self, tr, backupname): - orig(self, tr, backupname) - self._opener.unlink(_narrowbackupname(backupname)) + narrowbackupname = _narrowbackupname(backupname) + self._opener.tryunlink(narrowbackupname) + hgutil.copyfile(self._opener.join(narrowspec.FILENAME), + self._opener.join(narrowbackupname), hardlink=True) - extensions.wrapfunction(dirstate.dirstate, 'clearbackup', clearbackup) + def clearbackup(self, tr, backupname): + super(narrowdirstate, self).clearbackup(tr, backupname) + self._opener.unlink(_narrowbackupname(backupname)) + + dirstate.__class__ = narrowdirstate + return dirstate diff --git a/hgext/narrow/narrowmerge.py b/hgext/narrow/narrowmerge.py deleted file mode 100644 --- a/hgext/narrow/narrowmerge.py +++ /dev/null @@ -1,77 +0,0 @@ -# narrowmerge.py - extensions to mercurial merge module to support narrow clones -# -# Copyright 2017 Google, Inc. -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -from mercurial.i18n import _ -from mercurial import ( - copies, - error, - extensions, - merge, -) - -def setup(): - def _manifestmerge(orig, repo, wctx, p2, pa, branchmerge, *args, **kwargs): - """Filter updates to only lay out files that match the narrow spec.""" - actions, diverge, renamedelete = orig( - repo, wctx, p2, pa, branchmerge, *args, **kwargs) - - narrowmatch = repo.narrowmatch() - if narrowmatch.always(): - return actions, diverge, renamedelete - - nooptypes = set(['k']) # TODO: handle with nonconflicttypes - nonconflicttypes = set('a am c cm f g r e'.split()) - # We mutate the items in the dict during iteration, so iterate - # over a copy. - for f, action in list(actions.items()): - if narrowmatch(f): - pass - elif not branchmerge: - del actions[f] # just updating, ignore changes outside clone - elif action[0] in nooptypes: - del actions[f] # merge does not affect file - elif action[0] in nonconflicttypes: - raise error.Abort(_('merge affects file \'%s\' outside narrow, ' - 'which is not yet supported') % f, - hint=_('merging in the other direction ' - 'may work')) - else: - raise error.Abort(_('conflict in file \'%s\' is outside ' - 'narrow clone') % f) - - return actions, diverge, renamedelete - - extensions.wrapfunction(merge, 'manifestmerge', _manifestmerge) - - def _checkcollision(orig, repo, wmf, actions): - narrowmatch = repo.narrowmatch() - if not narrowmatch.always(): - wmf = wmf.matches(narrowmatch) - if actions: - narrowactions = {} - for m, actionsfortype in actions.iteritems(): - narrowactions[m] = [] - for (f, args, msg) in actionsfortype: - if narrowmatch(f): - narrowactions[m].append((f, args, msg)) - actions = narrowactions - return orig(repo, wmf, actions) - - extensions.wrapfunction(merge, '_checkcollision', _checkcollision) - - def _computenonoverlap(orig, repo, *args, **kwargs): - u1, u2 = orig(repo, *args, **kwargs) - narrowmatch = repo.narrowmatch() - if narrowmatch.always(): - return u1, u2 - - u1 = [f for f in u1 if narrowmatch(f)] - u2 = [f for f in u2 if narrowmatch(f)] - return u1, u2 - extensions.wrapfunction(copies, '_computenonoverlap', _computenonoverlap) diff --git a/hgext/narrow/narrowrepo.py b/hgext/narrow/narrowrepo.py --- a/hgext/narrow/narrowrepo.py +++ b/hgext/narrow/narrowrepo.py @@ -15,6 +15,7 @@ from mercurial import ( ) from . import ( + narrowdirstate, narrowrevlog, ) @@ -62,4 +63,8 @@ def wraprepo(repo): return scmutil.status(modified, added, removed, deleted, unknown, ignored, clean) + def _makedirstate(self): + dirstate = super(narrowrepository, self)._makedirstate() + return narrowdirstate.wrapdirstate(self, dirstate) + repo.__class__ = narrowrepository diff --git a/hgext/notify.py b/hgext/notify.py --- a/hgext/notify.py +++ b/hgext/notify.py @@ -113,6 +113,9 @@ notify.maxsubject notify.diffstat Set to True to include a diffstat before diff content. Default: True. +notify.showfunc + If set, override ``diff.showfunc`` for the diff content. Default: None. + notify.merge If True, send notifications for merge changesets. Default: True. @@ -206,6 +209,9 @@ configitem('notify', 'outgoing', configitem('notify', 'sources', default='serve', ) +configitem('notify', 'showfunc', + default=None, +) configitem('notify', 'strip', default=0, ) @@ -260,6 +266,9 @@ class notifier(object): self.charsets = mail._charsets(self.ui) self.subs = self.subscribers() self.merge = self.ui.configbool('notify', 'merge') + self.showfunc = self.ui.configbool('notify', 'showfunc') + if self.showfunc is None: + self.showfunc = self.ui.configbool('diff', 'showfunc') mapfile = None template = (self.ui.config('notify', hooktype) or @@ -420,8 +429,9 @@ class notifier(object): ref = ref.node() else: ref = ctx.node() - chunks = patch.diff(self.repo, prev, ref, - opts=patch.diffallopts(self.ui)) + diffopts = patch.diffallopts(self.ui) + diffopts.showfunc = self.showfunc + chunks = patch.diff(self.repo, prev, ref, opts=diffopts) difflines = ''.join(chunks).splitlines() if self.ui.configbool('notify', 'diffstat'): diff --git a/hgext/patchbomb.py b/hgext/patchbomb.py --- a/hgext/patchbomb.py +++ b/hgext/patchbomb.py @@ -75,11 +75,12 @@ from __future__ import absolute_import import email as emailmod import email.generator as emailgen +import email.mime.base as emimebase +import email.mime.multipart as emimemultipart import email.utils as eutil import errno import os import socket -import tempfile from mercurial.i18n import _ from mercurial import ( @@ -94,7 +95,6 @@ from mercurial import ( patch, pycompat, registrar, - repair, scmutil, templater, util, @@ -256,7 +256,7 @@ def makepatch(ui, repo, rev, patchlines, body += '\n'.join(patchlines) if addattachment: - msg = emailmod.MIMEMultipart.MIMEMultipart() + msg = emimemultipart.MIMEMultipart() if body: msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test'))) p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch', @@ -318,7 +318,7 @@ def _getbundle(repo, dest, **opts): The bundle is a returned as a single in-memory binary blob. """ ui = repo.ui - tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-') + tmpdir = pycompat.mkdtemp(prefix='hg-email-bundle-') tmpfn = os.path.join(tmpdir, 'bundle') btype = ui.config('patchbomb', 'bundletype') if btype: @@ -367,10 +367,10 @@ def _getbundlemsgs(repo, sender, bundle, or prompt(ui, 'Subject:', 'A bundle for your repository')) body = _getdescription(repo, '', sender, **opts) - msg = emailmod.MIMEMultipart.MIMEMultipart() + msg = emimemultipart.MIMEMultipart() if body: msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test'))) - datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle') + datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle') datapart.set_payload(bundle) bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle') datapart.add_header('Content-Disposition', 'attachment', @@ -624,7 +624,7 @@ def email(ui, repo, *revs, **opts): elif bookmark: if bookmark not in repo._bookmarks: raise error.Abort(_("bookmark '%s' not found") % bookmark) - revs = repair.stripbmrevset(repo, bookmark) + revs = scmutil.bookmarkrevs(repo, bookmark) revs = scmutil.revrange(repo, revs) if outgoing: @@ -753,6 +753,7 @@ def email(ui, repo, *revs, **opts): sender = mail.addressencode(ui, sender, _charsets, opts.get('test')) sendmail = None firstpatch = None + progress = ui.makeprogress(_('sending'), unit=_('emails'), total=len(msgs)) for i, (m, subj, ds) in enumerate(msgs): try: m['Message-Id'] = genmsgid(m['X-Mercurial-Node']) @@ -793,8 +794,7 @@ def email(ui, repo, *revs, **opts): if not sendmail: sendmail = mail.connect(ui, mbox=mbox) ui.status(_('sending '), subj, ' ...\n') - ui.progress(_('sending'), i, item=subj, total=len(msgs), - unit=_('emails')) + progress.update(i, item=subj) if not mbox: # Exim does not remove the Bcc field del m['Bcc'] @@ -803,5 +803,4 @@ def email(ui, repo, *revs, **opts): generator.flatten(m, 0) sendmail(sender_addr, to + bcc + cc, fp.getvalue()) - ui.progress(_('writing'), None) - ui.progress(_('sending'), None) + progress.complete() diff --git a/hgext/rebase.py b/hgext/rebase.py --- a/hgext/rebase.py +++ b/hgext/rebase.py @@ -34,7 +34,6 @@ from mercurial import ( error, extensions, hg, - lock, merge as mergemod, mergeutil, obsolete, @@ -48,11 +47,10 @@ from mercurial import ( revsetlang, scmutil, smartset, + state as statemod, util, ) -release = lock.release - # The following constants are used throughout the rebase module. The ordering of # their values must be maintained. @@ -184,6 +182,7 @@ class rebaseruntime(object): self.obsoletenotrebased = {} self.obsoletewithoutsuccessorindestination = set() self.inmemory = inmemory + self.stateobj = statemod.cmdstate(repo, 'rebasestate') @property def repo(self): @@ -225,40 +224,55 @@ class rebaseruntime(object): def restorestatus(self): """Restore a previously stored status""" + if not self.stateobj.exists(): + cmdutil.wrongtooltocontinue(self.repo, _('rebase')) + + data = self._read() + self.repo.ui.debug('rebase status resumed\n') + + self.originalwd = data['originalwd'] + self.destmap = data['destmap'] + self.state = data['state'] + self.skipped = data['skipped'] + self.collapsef = data['collapse'] + self.keepf = data['keep'] + self.keepbranchesf = data['keepbranches'] + self.external = data['external'] + self.activebookmark = data['activebookmark'] + + def _read(self): self.prepared = True repo = self.repo assert repo.filtername is None - keepbranches = None + data = {'keepbranches': None, 'collapse': None, 'activebookmark': None, + 'external': nullrev, 'keep': None, 'originalwd': None} legacydest = None - collapse = False - external = nullrev - activebookmark = None state = {} destmap = {} - try: + if True: f = repo.vfs("rebasestate") for i, l in enumerate(f.read().splitlines()): if i == 0: - originalwd = repo[l].rev() + data['originalwd'] = repo[l].rev() elif i == 1: # this line should be empty in newer version. but legacy # clients may still use it if l: legacydest = repo[l].rev() elif i == 2: - external = repo[l].rev() + data['external'] = repo[l].rev() elif i == 3: - collapse = bool(int(l)) + data['collapse'] = bool(int(l)) elif i == 4: - keep = bool(int(l)) + data['keep'] = bool(int(l)) elif i == 5: - keepbranches = bool(int(l)) + data['keepbranches'] = bool(int(l)) elif i == 6 and not (len(l) == 81 and ':' in l): # line 6 is a recent addition, so for backwards # compatibility check that the line doesn't look like the # oldrev:newrev lines - activebookmark = l + data['activebookmark'] = l else: args = l.split(':') oldrev = repo[args[0]].rev() @@ -276,35 +290,24 @@ class rebaseruntime(object): else: state[oldrev] = repo[newrev].rev() - except IOError as err: - if err.errno != errno.ENOENT: - raise - cmdutil.wrongtooltocontinue(repo, _('rebase')) - - if keepbranches is None: + if data['keepbranches'] is None: raise error.Abort(_('.hg/rebasestate is incomplete')) + data['destmap'] = destmap + data['state'] = state skipped = set() # recompute the set of skipped revs - if not collapse: + if not data['collapse']: seen = set(destmap.values()) for old, new in sorted(state.items()): if new != revtodo and new in seen: skipped.add(old) seen.add(new) + data['skipped'] = skipped repo.ui.debug('computed skipped revs: %s\n' % (' '.join('%d' % r for r in sorted(skipped)) or '')) - repo.ui.debug('rebase status resumed\n') - self.originalwd = originalwd - self.destmap = destmap - self.state = state - self.skipped = skipped - self.collapsef = collapse - self.keepf = keep - self.keepbranchesf = keepbranches - self.external = external - self.activebookmark = activebookmark + return data def _handleskippingobsolete(self, obsoleterevs, destmap): """Compute structures necessary for skipping obsolete revisions @@ -325,7 +328,7 @@ class rebaseruntime(object): skippedset.update(obsoleteextinctsuccessors) _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) - def _prepareabortorcontinue(self, isabort): + def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False): try: self.restorestatus() self.collapsemsg = restorecollapsemsg(self.repo, isabort) @@ -341,8 +344,9 @@ class rebaseruntime(object): hint = _('use "hg rebase --abort" to clear broken state') raise error.Abort(msg, hint=hint) if isabort: - return abort(self.repo, self.originalwd, self.destmap, - self.state, activebookmark=self.activebookmark) + return abort(self.repo, self.originalwd, self.destmap, self.state, + activebookmark=self.activebookmark, backup=backup, + suppwarns=suppwarns) def _preparenewrebase(self, destmap): if not destmap: @@ -433,13 +437,10 @@ class rebaseruntime(object): self.storestatus(tr) cands = [k for k, v in self.state.iteritems() if v == revtodo] - total = len(cands) - posholder = [0] + p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'), + total=len(cands)) def progress(ctx): - posholder[0] += 1 - self.repo.ui.progress(_("rebasing"), posholder[0], - ("%d:%s" % (ctx.rev(), ctx)), - _('changesets'), total) + p.increment(item=("%d:%s" % (ctx.rev(), ctx))) allowdivergence = self.ui.configbool( 'experimental', 'evolution.allowdivergence') for subset in sortsource(self.destmap): @@ -452,7 +453,7 @@ class rebaseruntime(object): ) for rev in sortedrevs: self._rebasenode(tr, rev, allowdivergence, progress) - ui.progress(_('rebasing'), None) + p.complete() ui.note(_('rebase merging completed\n')) def _concludenode(self, rev, p1, p2, editor, commitmsg=None): @@ -625,7 +626,7 @@ class rebaseruntime(object): newwd = self.originalwd if newwd not in [c.rev() for c in repo[None].parents()]: ui.note(_("update back to initial working directory parent\n")) - hg.updaterepo(repo, newwd, False) + hg.updaterepo(repo, newwd, overwrite=False) collapsedas = None if self.collapsef and not self.keepf: @@ -673,8 +674,7 @@ class rebaseruntime(object): ('a', 'abort', False, _('abort an interrupted rebase')), ('', 'auto-orphans', '', _('automatically rebase orphan revisions ' 'in the specified revset (EXPERIMENTAL)')), - ] + - cmdutil.formatteropts, + ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts, _('[-s REV | -b REV] [-d REV] [OPTION]')) def rebase(ui, repo, **opts): """move changeset (and descendants) to a different branch @@ -797,7 +797,23 @@ def rebase(ui, repo, **opts): unresolved conflicts. """ + opts = pycompat.byteskwargs(opts) inmemory = ui.configbool('rebase', 'experimental.inmemory') + dryrun = opts.get('dry_run') + if dryrun: + if opts.get('abort'): + raise error.Abort(_('cannot specify both --dry-run and --abort')) + if opts.get('continue'): + raise error.Abort(_('cannot specify both --dry-run and --continue')) + if opts.get('confirm'): + dryrun = True + if opts.get('dry_run'): + raise error.Abort(_('cannot specify both --confirm and --dry-run')) + if opts.get('abort'): + raise error.Abort(_('cannot specify both --confirm and --abort')) + if opts.get('continue'): + raise error.Abort(_('cannot specify both --confirm and --continue')) + if (opts.get('continue') or opts.get('abort') or repo.currenttransaction() is not None): # in-memory rebase is not compatible with resuming rebases. @@ -814,25 +830,67 @@ def rebase(ui, repo, **opts): opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)] opts['dest'] = '_destautoorphanrebase(SRC)' - if inmemory: + if dryrun: + return _dryrunrebase(ui, repo, opts) + elif inmemory: try: # in-memory merge doesn't support conflicts, so if we hit any, abort # and re-run as an on-disk merge. overrides = {('rebase', 'singletransaction'): True} with ui.configoverride(overrides, 'rebase'): - return _origrebase(ui, repo, inmemory=inmemory, **opts) + return _dorebase(ui, repo, opts, inmemory=inmemory) except error.InMemoryMergeConflictsError: ui.warn(_('hit merge conflicts; re-running rebase without in-memory' ' merge\n')) - _origrebase(ui, repo, **{'abort': True}) - return _origrebase(ui, repo, inmemory=False, **opts) + _dorebase(ui, repo, {'abort': True}) + return _dorebase(ui, repo, opts, inmemory=False) else: - return _origrebase(ui, repo, **opts) + return _dorebase(ui, repo, opts) -def _origrebase(ui, repo, inmemory=False, **opts): - opts = pycompat.byteskwargs(opts) +def _dryrunrebase(ui, repo, opts): + rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) + confirm = opts.get('confirm') + if confirm: + ui.status(_('starting in-memory rebase\n')) + else: + ui.status(_('starting dry-run rebase; repository will not be ' + 'changed\n')) + with repo.wlock(), repo.lock(): + needsabort = True + try: + overrides = {('rebase', 'singletransaction'): True} + with ui.configoverride(overrides, 'rebase'): + _origrebase(ui, repo, opts, rbsrt, inmemory=True, + leaveunfinished=True) + except error.InMemoryMergeConflictsError: + ui.status(_('hit a merge conflict\n')) + return 1 + else: + if confirm: + ui.status(_('rebase completed successfully\n')) + if not ui.promptchoice(_(b'apply changes (yn)?' + b'$$ &Yes $$ &No')): + # finish unfinished rebase + rbsrt._finishrebase() + else: + rbsrt._prepareabortorcontinue(isabort=True, backup=False, + suppwarns=True) + needsabort = False + else: + ui.status(_('dry-run rebase completed successfully; run without' + ' -n/--dry-run to perform this rebase\n')) + return 0 + finally: + if needsabort: + # no need to store backup in case of dryrun + rbsrt._prepareabortorcontinue(isabort=True, backup=False, + suppwarns=True) + +def _dorebase(ui, repo, opts, inmemory=False): rbsrt = rebaseruntime(repo, ui, inmemory, opts) + return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory) +def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False): with repo.wlock(), repo.lock(): # Validate input and define rebasing points destf = opts.get('dest', None) @@ -902,7 +960,8 @@ def _origrebase(ui, repo, inmemory=False dsguard = dirstateguard.dirstateguard(repo, 'rebase') with util.acceptintervention(dsguard): rbsrt._performrebase(tr) - rbsrt._finishrebase() + if not leaveunfinished: + rbsrt._finishrebase() def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None, revf=None, destspace=None): @@ -1255,13 +1314,7 @@ def defineparents(repo, rev, destmap, st # use unfiltered changelog since successorrevs may return filtered nodes assert repo.filtername is None cl = repo.changelog - def isancestor(a, b): - # take revision numbers instead of nodes - if a == b: - return True - elif a > b: - return False - return cl.isancestor(cl.node(a), cl.node(b)) + isancestor = cl.isancestorrev dest = destmap[rev] oldps = repo.changelog.parentrevs(rev) # old parents @@ -1527,7 +1580,8 @@ def needupdate(repo, state): return False -def abort(repo, originalwd, destmap, state, activebookmark=None): +def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True, + suppwarns=False): '''Restore the repository to its original state. Additional args: activebookmark: the name of the bookmark that should be active after the @@ -1572,8 +1626,7 @@ def abort(repo, originalwd, destmap, sta # Strip from the first rebased revision if rebased: - # no backup of rebased cset versions needed - repair.strip(repo.ui, repo, strippoints) + repair.strip(repo.ui, repo, strippoints, backup=backup) if activebookmark and activebookmark in repo._bookmarks: bookmarks.activate(repo, activebookmark) @@ -1581,7 +1634,8 @@ def abort(repo, originalwd, destmap, sta finally: clearstatus(repo) clearcollapsemsg(repo) - repo.ui.warn(_('rebase aborted\n')) + if not suppwarns: + repo.ui.warn(_('rebase aborted\n')) return 0 def sortsource(destmap): @@ -1790,33 +1844,31 @@ def _computeobsoletenotrebased(repo, reb assert repo.filtername is None cl = repo.changelog nodemap = cl.nodemap - extinctnodes = set(cl.node(r) for r in repo.revs('extinct()')) + extinctrevs = set(repo.revs('extinct()')) for srcrev in rebaseobsrevs: srcnode = cl.node(srcrev) - destnode = cl.node(destmap[srcrev]) # XXX: more advanced APIs are required to handle split correctly successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode])) # obsutil.allsuccessors includes node itself successors.remove(srcnode) - if successors.issubset(extinctnodes): + succrevs = {nodemap[s] for s in successors if s in nodemap} + if succrevs.issubset(extinctrevs): # all successors are extinct obsoleteextinctsuccessors.add(srcrev) if not successors: # no successor obsoletenotrebased[srcrev] = None else: - for succnode in successors: - if succnode not in nodemap: - continue - if cl.isancestor(succnode, destnode): - obsoletenotrebased[srcrev] = nodemap[succnode] + dstrev = destmap[srcrev] + for succrev in succrevs: + if cl.isancestorrev(succrev, dstrev): + obsoletenotrebased[srcrev] = succrev break else: # If 'srcrev' has a successor in rebase set but none in # destination (which would be catched above), we shall skip it # and its descendants to avoid divergence. - if any(nodemap[s] in destmap for s in successors - if s in nodemap): + if any(s in destmap for s in succrevs): obsoletewithoutsuccessorindestination.add(srcrev) return ( diff --git a/hgext/relink.py b/hgext/relink.py --- a/hgext/relink.py +++ b/hgext/relink.py @@ -70,17 +70,10 @@ def relink(ui, repo, origin=None, **opts # No point in continuing raise error.Abort(_('source and destination are on different devices')) - locallock = repo.lock() - try: - remotelock = src.lock() - try: - candidates = sorted(collect(src, ui)) - targets = prune(candidates, src.store.path, repo.store.path, ui) - do_relink(src.store.path, repo.store.path, targets, ui) - finally: - remotelock.release() - finally: - locallock.release() + with repo.lock(), src.lock(): + candidates = sorted(collect(src, ui)) + targets = prune(candidates, src.store.path, repo.store.path, ui) + do_relink(src.store.path, repo.store.path, targets, ui) def collect(src, ui): seplen = len(os.path.sep) @@ -94,6 +87,7 @@ def collect(src, ui): # mozilla-central as of 2010-06-10 had a ratio of just over 7:5. total = live * 3 // 2 src = src.store.path + progress = ui.makeprogress(_('collecting'), unit=_('files'), total=total) pos = 0 ui.status(_("tip has %d files, estimated total number of files: %d\n") % (live, total)) @@ -108,9 +102,9 @@ def collect(src, ui): continue pos += 1 candidates.append((os.path.join(relpath, filename), st)) - ui.progress(_('collecting'), pos, filename, _('files'), total) + progress.update(pos, item=filename) - ui.progress(_('collecting'), None) + progress.complete() ui.status(_('collected %d candidate storage files\n') % len(candidates)) return candidates @@ -132,7 +126,8 @@ def prune(candidates, src, dst, ui): return st targets = [] - total = len(candidates) + progress = ui.makeprogress(_('pruning'), unit=_('files'), + total=len(candidates)) pos = 0 for fn, st in candidates: pos += 1 @@ -143,9 +138,9 @@ def prune(candidates, src, dst, ui): ui.debug('not linkable: %s\n' % fn) continue targets.append((fn, ts.st_size)) - ui.progress(_('pruning'), pos, fn, _('files'), total) + progress.update(pos, item=fn) - ui.progress(_('pruning'), None) + progress.complete() ui.status(_('pruned down to %d probably relinkable files\n') % len(targets)) return targets @@ -164,8 +159,9 @@ def do_relink(src, dst, files, ui): relinked = 0 savedbytes = 0 + progress = ui.makeprogress(_('relinking'), unit=_('files'), + total=len(files)) pos = 0 - total = len(files) for f, sz in files: pos += 1 source = os.path.join(src, f) @@ -186,13 +182,13 @@ def do_relink(src, dst, files, ui): continue try: relinkfile(source, tgt) - ui.progress(_('relinking'), pos, f, _('files'), total) + progress.update(pos, item=f) relinked += 1 savedbytes += sz except OSError as inst: ui.warn('%s: %s\n' % (tgt, stringutil.forcebytestr(inst))) - ui.progress(_('relinking'), None) + progress.complete() ui.status(_('relinked %d files (%s reclaimed)\n') % (relinked, util.bytecount(savedbytes))) diff --git a/hgext/remotenames.py b/hgext/remotenames.py --- a/hgext/remotenames.py +++ b/hgext/remotenames.py @@ -249,6 +249,10 @@ def extsetup(ui): extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks) def reposetup(ui, repo): + + # set the config option to store remotenames + repo.ui.setconfig('experimental', 'remotenames', True, 'remotenames-ext') + if not repo.local(): return diff --git a/hgext/schemes.py b/hgext/schemes.py --- a/hgext/schemes.py +++ b/hgext/schemes.py @@ -114,7 +114,7 @@ schemes = { def extsetup(ui): schemes.update(dict(ui.configitems('schemes'))) - t = templater.engine(lambda x: x) + t = templater.engine(templater.parse) for scheme, url in schemes.items(): if (pycompat.iswindows and len(scheme) == 1 and scheme.isalpha() and os.path.exists('%s:\\' % scheme)): diff --git a/hgext/shelve.py b/hgext/shelve.py --- a/hgext/shelve.py +++ b/hgext/shelve.py @@ -594,10 +594,15 @@ def listcmd(ui, repo, pats, opts): for chunk, label in patch.diffstatui(difflines, width=width): ui.write(chunk, label=label) -def patchcmds(ui, repo, pats, opts, subcommand): +def patchcmds(ui, repo, pats, opts): """subcommand that displays shelves""" if len(pats) == 0: - raise error.Abort(_("--%s expects at least one shelf") % subcommand) + shelves = listshelves(repo) + if not shelves: + raise error.Abort(_("there are no shelves to show")) + mtime, name = shelves[0] + sname = util.split(name)[1] + pats = [sname] for shelfname in pats: if not shelvedfile(repo, shelfname, patchextension).exists(): @@ -621,14 +626,14 @@ def unshelveabort(ui, repo, state, opts) try: checkparents(repo, state) - repo.vfs.rename('unshelverebasestate', 'rebasestate') - try: - rebase.rebase(ui, repo, **{ - r'abort' : True - }) - except Exception: - repo.vfs.rename('rebasestate', 'unshelverebasestate') - raise + merge.update(repo, state.pendingctx, False, True) + if (state.activebookmark + and state.activebookmark in repo._bookmarks): + bookmarks.activate(repo, state.activebookmark) + + if repo.vfs.exists('unshelverebasestate'): + repo.vfs.rename('unshelverebasestate', 'rebasestate') + rebase.clearstatus(repo) mergefiles(ui, repo, state.wctx, state.pendingctx) repair.strip(ui, repo, state.nodestoremove, backup=False, @@ -683,22 +688,41 @@ def unshelvecontinue(ui, repo, state, op _("unresolved conflicts, can't continue"), hint=_("see 'hg resolve', then 'hg unshelve --continue'")) - repo.vfs.rename('unshelverebasestate', 'rebasestate') - try: - rebase.rebase(ui, repo, **{ - r'continue' : True - }) - except Exception: - repo.vfs.rename('rebasestate', 'unshelverebasestate') - raise + shelvectx = repo[state.parents[1]] + pendingctx = state.pendingctx + + with repo.dirstate.parentchange(): + repo.setparents(state.pendingctx.node(), nodemod.nullid) + repo.dirstate.write(repo.currenttransaction()) + + overrides = {('phases', 'new-commit'): phases.secret} + with repo.ui.configoverride(overrides, 'unshelve'): + with repo.dirstate.parentchange(): + repo.setparents(state.parents[0], nodemod.nullid) + newnode = repo.commit(text=shelvectx.description(), + extra=shelvectx.extra(), + user=shelvectx.user(), + date=shelvectx.date()) - shelvectx = repo['tip'] - if state.pendingctx not in shelvectx.parents(): - # rebase was a no-op, so it produced no child commit + if newnode is None: + # If it ended up being a no-op commit, then the normal + # merge state clean-up path doesn't happen, so do it + # here. Fix issue5494 + merge.mergestate.clean(repo) shelvectx = state.pendingctx + msg = _('note: unshelved changes already existed ' + 'in the working copy\n') + ui.status(msg) else: - # only strip the shelvectx if the rebase produced it - state.nodestoremove.append(shelvectx.node()) + # only strip the shelvectx if we produced one + state.nodestoremove.append(newnode) + shelvectx = repo[newnode] + + hg.updaterepo(repo, pendingctx.node(), overwrite=False) + + if repo.vfs.exists('unshelverebasestate'): + repo.vfs.rename('unshelverebasestate', 'rebasestate') + rebase.clearstatus(repo) mergefiles(ui, repo, state.wctx, shelvectx) restorebranch(ui, repo, state.branchtorestore) @@ -746,33 +770,46 @@ def _rebaserestoredcommit(ui, repo, opts if tmpwctx.node() == shelvectx.parents()[0].node(): return shelvectx - ui.status(_('rebasing shelved changes\n')) - try: - rebase.rebase(ui, repo, **{ - r'rev': [shelvectx.rev()], - r'dest': "%d" % tmpwctx.rev(), - r'keep': True, - r'tool': opts.get('tool', ''), - }) - except error.InterventionRequired: - tr.close() + overrides = { + ('ui', 'forcemerge'): opts.get('tool', ''), + ('phases', 'new-commit'): phases.secret, + } + with repo.ui.configoverride(overrides, 'unshelve'): + ui.status(_('rebasing shelved changes\n')) + stats = merge.graft(repo, shelvectx, shelvectx.p1(), + labels=['shelve', 'working-copy'], + keepconflictparent=True) + if stats.unresolvedcount: + tr.close() + + nodestoremove = [repo.changelog.node(rev) + for rev in xrange(oldtiprev, len(repo))] + shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove, + branchtorestore, opts.get('keep'), activebookmark) + raise error.InterventionRequired( + _("unresolved conflicts (see 'hg resolve', then " + "'hg unshelve --continue')")) - nodestoremove = [repo.changelog.node(rev) - for rev in xrange(oldtiprev, len(repo))] - shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove, - branchtorestore, opts.get('keep'), activebookmark) + with repo.dirstate.parentchange(): + repo.setparents(tmpwctx.node(), nodemod.nullid) + newnode = repo.commit(text=shelvectx.description(), + extra=shelvectx.extra(), + user=shelvectx.user(), + date=shelvectx.date()) - repo.vfs.rename('rebasestate', 'unshelverebasestate') - raise error.InterventionRequired( - _("unresolved conflicts (see 'hg resolve', then " - "'hg unshelve --continue')")) + if newnode is None: + # If it ended up being a no-op commit, then the normal + # merge state clean-up path doesn't happen, so do it + # here. Fix issue5494 + merge.mergestate.clean(repo) + shelvectx = tmpwctx + msg = _('note: unshelved changes already existed ' + 'in the working copy\n') + ui.status(msg) + else: + shelvectx = repo[newnode] + hg.updaterepo(repo, tmpwctx.node(), False) - # refresh ctx after rebase completes - shelvectx = repo['tip'] - - if tmpwctx not in shelvectx.parents(): - # rebase was a no-op, so it produced no child commit - shelvectx = tmpwctx return shelvectx def _forgetunknownfiles(repo, shelvectx, addedbefore): @@ -933,27 +970,27 @@ def _dounshelve(ui, repo, *shelved, **op # to the original pctx. activebookmark = _backupactivebookmark(repo) + tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts, + tmpwctx) + repo, shelvectx = _unshelverestorecommit(ui, repo, basename) + _checkunshelveuntrackedproblems(ui, repo, shelvectx) + branchtorestore = '' + if shelvectx.branch() != shelvectx.p1().branch(): + branchtorestore = shelvectx.branch() + + shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, + basename, pctx, tmpwctx, + shelvectx, branchtorestore, + activebookmark) overrides = {('ui', 'forcemerge'): opts.get('tool', '')} with ui.configoverride(overrides, 'unshelve'): - tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts, - tmpwctx) - repo, shelvectx = _unshelverestorecommit(ui, repo, basename) - _checkunshelveuntrackedproblems(ui, repo, shelvectx) - branchtorestore = '' - if shelvectx.branch() != shelvectx.p1().branch(): - branchtorestore = shelvectx.branch() + mergefiles(ui, repo, pctx, shelvectx) + restorebranch(ui, repo, branchtorestore) + _forgetunknownfiles(repo, shelvectx, addedbefore) - shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, - basename, pctx, tmpwctx, - shelvectx, branchtorestore, - activebookmark) - mergefiles(ui, repo, pctx, shelvectx) - restorebranch(ui, repo, branchtorestore) - _forgetunknownfiles(repo, shelvectx, addedbefore) - - shelvedstate.clear(repo) - _finishunshelve(repo, oldtiprev, tr, activebookmark) - unshelvecleanup(ui, repo, basename, opts) + shelvedstate.clear(repo) + _finishunshelve(repo, oldtiprev, tr, activebookmark) + unshelvecleanup(ui, repo, basename, opts) finally: if tr: tr.release() @@ -979,11 +1016,14 @@ def _dounshelve(ui, repo, *shelved, **op ('n', 'name', '', _('use the given name for the shelved commit'), _('NAME')), ('p', 'patch', None, - _('show patch')), + _('output patches for changes (provide the names of the shelved ' + 'changes as positional arguments)')), ('i', 'interactive', None, _('interactive mode, only works while creating a shelve')), ('', 'stat', None, - _('output diffstat-style summary of changes'))] + cmdutil.walkopts, + _('output diffstat-style summary of changes (provide the names of ' + 'the shelved changes as positional arguments)') + )] + cmdutil.walkopts, _('hg shelve [OPTION]... [FILE]...')) def shelvecmd(ui, repo, *pats, **opts): '''save and set aside changes from the working directory @@ -1047,10 +1087,8 @@ def shelvecmd(ui, repo, *pats, **opts): return deletecmd(ui, repo, pats) elif checkopt('list'): return listcmd(ui, repo, pats, opts) - elif checkopt('patch'): - return patchcmds(ui, repo, pats, opts, subcommand='patch') - elif checkopt('stat'): - return patchcmds(ui, repo, pats, opts, subcommand='stat') + elif checkopt('patch') or checkopt('stat'): + return patchcmds(ui, repo, pats, opts) else: return createcmd(ui, repo, pats, opts) diff --git a/hgext/sparse.py b/hgext/sparse.py --- a/hgext/sparse.py +++ b/hgext/sparse.py @@ -138,9 +138,9 @@ def _setuplog(ui): extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs) def _clonesparsecmd(orig, ui, repo, *args, **opts): - include_pat = opts.get('include') - exclude_pat = opts.get('exclude') - enableprofile_pat = opts.get('enable_profile') + include_pat = opts.get(r'include') + exclude_pat = opts.get(r'exclude') + enableprofile_pat = opts.get(r'enable_profile') include = exclude = enableprofile = False if include_pat: pat = include_pat @@ -178,7 +178,7 @@ def _setupadd(ui): 'also include directories of added files in sparse config')) def _add(orig, ui, repo, *pats, **opts): - if opts.get('sparse'): + if opts.get(r'sparse'): dirs = set() for pat in pats: dirname, basename = util.split(pat) diff --git a/hgext/split.py b/hgext/split.py --- a/hgext/split.py +++ b/hgext/split.py @@ -60,6 +60,7 @@ def split(ui, repo, *revs, **opts): By default, rebase connected non-obsoleted descendants onto the new changeset. Use --no-rebase to avoid the rebase. """ + opts = pycompat.byteskwargs(opts) revlist = [] if opts.get('rev'): revlist.append(opts.get('rev')) @@ -169,7 +170,7 @@ def dosplit(ui, repo, tr, ctx, opts): raise error.Abort(_('cannot split an empty revision')) scmutil.cleanupnodes(repo, {ctx.node(): [c.node() for c in committed]}, - operation='split') + operation='split', fixphase=True) return committed[-1] diff --git a/hgext/strip.py b/hgext/strip.py --- a/hgext/strip.py +++ b/hgext/strip.py @@ -103,8 +103,9 @@ def strip(ui, repo, revs, update=True, b 'option)'), _('REV')), ('f', 'force', None, _('force removal of changesets, discard ' 'uncommitted changes (no backup)')), - ('', 'no-backup', None, _('no backups')), - ('', 'nobackup', None, _('no backups (DEPRECATED)')), + ('', 'no-backup', None, _('do not save backup bundle')), + ('', 'nobackup', None, _('do not save backup bundle ' + '(DEPRECATED)')), ('n', '', None, _('ignored (DEPRECATED)')), ('k', 'keep', None, _("do not modify working directory during " "strip")), @@ -165,7 +166,7 @@ def stripcmd(ui, repo, *revs, **opts): nodetobookmarks.setdefault(node, []).append(mark) for marks in nodetobookmarks.values(): if bookmarks.issuperset(marks): - rsrevs = repair.stripbmrevset(repo, marks[0]) + rsrevs = scmutil.bookmarkrevs(repo, marks[0]) revs.update(set(rsrevs)) if not revs: with repo.lock(), repo.transaction('bookmark') as tr: diff --git a/hgext/transplant.py b/hgext/transplant.py --- a/hgext/transplant.py +++ b/hgext/transplant.py @@ -16,7 +16,7 @@ map from a changeset hash to its hash in from __future__ import absolute_import import os -import tempfile + from mercurial.i18n import _ from mercurial import ( bundlerepo, @@ -215,7 +215,7 @@ class transplanter(object): if skipmerge: patchfile = None else: - fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-') + fd, patchfile = pycompat.mkstemp(prefix='hg-transplant-') fp = os.fdopen(fd, r'wb') gen = patch.diff(source, parent, node, opts=diffopts) for chunk in gen: @@ -263,7 +263,7 @@ class transplanter(object): self.ui.status(_('filtering %s\n') % patchfile) user, date, msg = (changelog[1], changelog[2], changelog[4]) - fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') + fd, headerfile = pycompat.mkstemp(prefix='hg-transplant-') fp = os.fdopen(fd, r'wb') fp.write("# HG changeset patch\n") fp.write("# User %s\n" % user) @@ -523,7 +523,8 @@ def browserevs(ui, repo, nodes, opts): displayer.show(repo[node]) action = None while not action: - action = 'ynmpcq?'[ui.promptchoice(prompt)] + choice = ui.promptchoice(prompt) + action = 'ynmpcq?'[choice:choice + 1] if action == '?': for c, t in ui.extractchoices(prompt)[1]: ui.write('%s: %s\n' % (c, t)) @@ -682,7 +683,7 @@ def _dotransplant(ui, repo, *revs, **opt sourcerepo = opts.get('source') if sourcerepo: peer = hg.peer(repo, opts, ui.expandpath(sourcerepo)) - heads = map(peer.lookup, opts.get('branch', ())) + heads = pycompat.maplist(peer.lookup, opts.get('branch', ())) target = set(heads) for r in revs: try: @@ -693,7 +694,7 @@ def _dotransplant(ui, repo, *revs, **opt onlyheads=sorted(target), force=True) else: source = repo - heads = map(source.lookup, opts.get('branch', ())) + heads = pycompat.maplist(source.lookup, opts.get('branch', ())) cleanupfn = None try: @@ -708,7 +709,7 @@ def _dotransplant(ui, repo, *revs, **opt matchfn = lambda x: tf(x) and x not in prune else: matchfn = tf - merges = map(source.lookup, opts.get('merge', ())) + merges = pycompat.maplist(source.lookup, opts.get('merge', ())) revmap = {} if revs: for r in scmutil.revrange(source, revs): diff --git a/hgext/uncommit.py b/hgext/uncommit.py --- a/hgext/uncommit.py +++ b/hgext/uncommit.py @@ -91,12 +91,7 @@ def _commitfiltered(repo, ctx, match, ke user=ctx.user(), date=ctx.date(), extra=ctx.extra()) - # phase handling - commitphase = ctx.phase() - overrides = {('phases', 'new-commit'): commitphase} - with repo.ui.configoverride(overrides, 'uncommit'): - newid = repo.commitctx(new) - return newid + return repo.commitctx(new) def _fixdirstate(repo, oldctx, newctx, status): """ fix the dirstate after switching the working directory from oldctx to @@ -183,7 +178,7 @@ def uncommit(ui, repo, *pats, **opts): # Fully removed the old commit mapping[old.node()] = () - scmutil.cleanupnodes(repo, mapping, 'uncommit') + scmutil.cleanupnodes(repo, mapping, 'uncommit', fixphase=True) with repo.dirstate.parentchange(): repo.dirstate.setparents(newid, node.nullid) @@ -242,12 +237,7 @@ def unamend(ui, repo, **opts): user=predctx.user(), date=predctx.date(), extra=extras) - # phase handling - commitphase = curctx.phase() - overrides = {('phases', 'new-commit'): commitphase} - with repo.ui.configoverride(overrides, 'uncommit'): - newprednode = repo.commitctx(newctx) - + newprednode = repo.commitctx(newctx) newpredctx = repo[newprednode] dirstate = repo.dirstate @@ -257,4 +247,4 @@ def unamend(ui, repo, **opts): _fixdirstate(repo, curctx, newpredctx, s) mapping = {curctx.node(): (newprednode,)} - scmutil.cleanupnodes(repo, mapping, 'unamend') + scmutil.cleanupnodes(repo, mapping, 'unamend', fixphase=True) diff --git a/hgext/win32mbcs.py b/hgext/win32mbcs.py --- a/hgext/win32mbcs.py +++ b/hgext/win32mbcs.py @@ -90,7 +90,7 @@ def decode(arg): return arg def encode(arg): - if isinstance(arg, unicode): + if isinstance(arg, pycompat.unicode): return arg.encode(_encoding) elif isinstance(arg, tuple): return tuple(map(encode, arg)) @@ -127,7 +127,7 @@ def basewrapper(func, argtype, enc, dec, " %s encoding\n") % (_encoding)) def wrapper(func, args, kwds): - return basewrapper(func, unicode, encode, decode, args, kwds) + return basewrapper(func, pycompat.unicode, encode, decode, args, kwds) def reversewrapper(func, args, kwds): diff --git a/i18n/da.po b/i18n/da.po --- a/i18n/da.po +++ b/i18n/da.po @@ -13696,7 +13696,7 @@ msgid "" msgstr "" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " Default is empty." msgstr "" diff --git a/i18n/de.po b/i18n/de.po --- a/i18n/de.po +++ b/i18n/de.po @@ -17347,7 +17347,7 @@ msgid "" msgstr "" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " Default is empty." msgstr "" diff --git a/i18n/ja.po b/i18n/ja.po --- a/i18n/ja.po +++ b/i18n/ja.po @@ -27712,11 +27712,11 @@ msgstr "" " サーバの待ちうけアドレス。 (デフォルト値: ホストの持つ全アドレス)" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " (default: empty)" msgstr "" -"``allow_archive``\n" +"``allow-archive``\n" " 利用可能なダウンロード向けのアーカイブ形式 (bz2, gz, zip) 一覧。\n" " (デフォルト値: 空 = ダウンロード不可)" diff --git a/i18n/pt_BR.po b/i18n/pt_BR.po --- a/i18n/pt_BR.po +++ b/i18n/pt_BR.po @@ -28663,11 +28663,11 @@ msgstr "" " (padrão: usa todos os endereços)" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " (default: empty)" msgstr "" -"``allow_archive``\n" +"``allow-archive``\n" " Lista de formatos de pacote (bz2, gz, zip) permitidos para download.\n" " (padrão: lista vazia)" diff --git a/i18n/ro.po b/i18n/ro.po --- a/i18n/ro.po +++ b/i18n/ro.po @@ -12099,7 +12099,7 @@ msgid "" msgstr "" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " Default is empty." msgstr "" diff --git a/i18n/ru.po b/i18n/ru.po --- a/i18n/ru.po +++ b/i18n/ru.po @@ -19776,11 +19776,11 @@ msgstr "" " Адрес прослушиваемого интерфейса. По умолчанию все интерфейсы." msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " Default is empty." msgstr "" -"``allow_archive``\n" +"``allow-archive``\n" " Список форматов архивов (bz2, gz, zip), которые можно скачивать.\n" " По умолчанию пуст." diff --git a/i18n/sv.po b/i18n/sv.po --- a/i18n/sv.po +++ b/i18n/sv.po @@ -15034,7 +15034,7 @@ msgid "" msgstr "" msgid "" -"``allow_archive``\n" +"``allow-archive``\n" " List of archive format (bz2, gz, zip) allowed for downloading.\n" " Default is empty." msgstr "" diff --git a/mercurial/ancestor.py b/mercurial/ancestor.py --- a/mercurial/ancestor.py +++ b/mercurial/ancestor.py @@ -339,6 +339,10 @@ class lazyancestors(object): seen = self._containsseen if target in seen: return True + # Only integer target is valid, but some callers expect 'None in self' + # to be False. So we explicitly allow it. + if target is None: + return False parentrevs = self._parentrevs visit = self._containsvisit diff --git a/mercurial/archival.py b/mercurial/archival.py --- a/mercurial/archival.py +++ b/mercurial/archival.py @@ -322,13 +322,14 @@ def archive(repo, dest, node, kind, deco files.sort() scmutil.prefetchfiles(repo, [ctx.rev()], scmutil.matchfiles(repo, files)) - repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) - for i, f in enumerate(files): + progress = scmutil.progress(repo.ui, _('archiving'), unit=_('files'), + total=total) + progress.update(0) + for f in files: ff = ctx.flags(f) write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data) - repo.ui.progress(_('archiving'), i + 1, item=f, - unit=_('files'), total=total) - repo.ui.progress(_('archiving'), None) + progress.increment(item=f) + progress.complete() if subrepos: for subpath in sorted(ctx.substate): diff --git a/mercurial/bdiff.c b/mercurial/bdiff.c --- a/mercurial/bdiff.c +++ b/mercurial/bdiff.c @@ -310,6 +310,7 @@ int bdiff_diff(struct bdiff_line *a, int return count; } +/* deallocate list of hunks; l may be NULL */ void bdiff_freehunks(struct bdiff_hunk *l) { struct bdiff_hunk *n; diff --git a/mercurial/bitmanipulation.h b/mercurial/bitmanipulation.h --- a/mercurial/bitmanipulation.h +++ b/mercurial/bitmanipulation.h @@ -9,7 +9,8 @@ static inline uint32_t getbe32(const cha { const unsigned char *d = (const unsigned char *)c; - return ((d[0] << 24) | (d[1] << 16) | (d[2] << 8) | (d[3])); + return ((((uint32_t)d[0]) << 24) | (((uint32_t)d[1]) << 16) | + (((uint32_t)d[2]) << 8) | (d[3])); } static inline int16_t getbeint16(const char *c) diff --git a/mercurial/bookmarks.py b/mercurial/bookmarks.py --- a/mercurial/bookmarks.py +++ b/mercurial/bookmarks.py @@ -43,7 +43,7 @@ def _getbkfile(repo): fp, pending = txnutil.trypending(repo.root, repo.vfs, 'bookmarks') return fp -class bmstore(dict): +class bmstore(object): """Storage for bookmarks. This object should do all bookmark-related reads and writes, so @@ -58,13 +58,13 @@ class bmstore(dict): """ def __init__(self, repo): - dict.__init__(self) self._repo = repo + self._refmap = refmap = {} # refspec: node + self._nodemap = nodemap = {} # node: sorted([refspec, ...]) self._clean = True self._aclean = True nm = repo.changelog.nodemap tonode = bin # force local lookup - setitem = dict.__setitem__ try: with _getbkfile(repo) as bkfile: for line in bkfile: @@ -76,7 +76,15 @@ class bmstore(dict): node = tonode(sha) if node in nm: refspec = encoding.tolocal(refspec) - setitem(self, refspec, node) + refmap[refspec] = node + nrefs = nodemap.get(node) + if nrefs is None: + nodemap[node] = [refspec] + else: + nrefs.append(refspec) + if nrefs[-2] > refspec: + # bookmarks weren't sorted before 4.5 + nrefs.sort() except (TypeError, ValueError): # TypeError: # - bin(...) @@ -96,38 +104,78 @@ class bmstore(dict): @active.setter def active(self, mark): - if mark is not None and mark not in self: + if mark is not None and mark not in self._refmap: raise AssertionError('bookmark %s does not exist!' % mark) self._active = mark self._aclean = False - def __setitem__(self, *args, **kwargs): - raise error.ProgrammingError("use 'bookmarks.applychanges' instead") + def __len__(self): + return len(self._refmap) + + def __iter__(self): + return iter(self._refmap) + + def iteritems(self): + return self._refmap.iteritems() + + def items(self): + return self._refmap.items() - def _set(self, key, value): - self._clean = False - return dict.__setitem__(self, key, value) + # TODO: maybe rename to allnames()? + def keys(self): + return self._refmap.keys() + + # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated + # could be self._nodemap.keys() + def values(self): + return self._refmap.values() + + def __contains__(self, mark): + return mark in self._refmap + + def __getitem__(self, mark): + return self._refmap[mark] - def __delitem__(self, key): - raise error.ProgrammingError("use 'bookmarks.applychanges' instead") + def get(self, mark, default=None): + return self._refmap.get(mark, default) - def _del(self, key): + def _set(self, mark, node): self._clean = False - return dict.__delitem__(self, key) + if mark in self._refmap: + self._del(mark) + self._refmap[mark] = node + nrefs = self._nodemap.get(node) + if nrefs is None: + self._nodemap[node] = [mark] + else: + nrefs.append(mark) + nrefs.sort() - def update(self, *others): - raise error.ProgrammingError("use 'bookmarks.applychanges' instead") + def _del(self, mark): + self._clean = False + node = self._refmap.pop(mark) + nrefs = self._nodemap[node] + if len(nrefs) == 1: + assert nrefs[0] == mark + del self._nodemap[node] + else: + nrefs.remove(mark) + + def names(self, node): + """Return a sorted list of bookmarks pointing to the specified node""" + return self._nodemap.get(node, []) def changectx(self, mark): - return self._repo[self[mark]] + node = self._refmap[mark] + return self._repo[node] def applychanges(self, repo, tr, changes): """Apply a list of changes to bookmarks """ bmchanges = tr.changes.get('bookmarks') for name, node in changes: - old = self.get(name) + old = self._refmap.get(name) if node is None: self._del(name) else: @@ -151,7 +199,7 @@ class bmstore(dict): def _writerepo(self, repo): """Factored out for extensibility""" rbm = repo._bookmarks - if rbm.active not in self: + if rbm.active not in self._refmap: rbm.active = None rbm._writeactive() @@ -182,7 +230,7 @@ class bmstore(dict): self._aclean = True def _write(self, fp): - for name, node in sorted(self.iteritems()): + for name, node in sorted(self._refmap.iteritems()): fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) self._clean = True self._repo.invalidatevolatilesets() @@ -208,15 +256,15 @@ class bmstore(dict): If divergent bookmark are to be deleted, they will be returned as list. """ cur = self._repo['.'].node() - if mark in self and not force: + if mark in self._refmap and not force: if target: - if self[mark] == target and target == cur: + if self._refmap[mark] == target and target == cur: # re-activating a bookmark return [] rev = self._repo[target].rev() anc = self._repo.changelog.ancestors([rev]) bmctx = self.changectx(mark) - divs = [self[b] for b in self + divs = [self._refmap[b] for b in self._refmap if b.split('@', 1)[0] == mark.split('@', 1)[0]] # allow resolving a single divergent bookmark even if moving @@ -765,7 +813,7 @@ def validdest(repo, old, new): return new.node() in obsutil.foreground(repo, [old.node()]) else: # still an independent clause as it is lazier (and therefore faster) - return old.descendant(new) + return old.isancestorof(new) def checkformat(repo, mark): """return a valid version of a potential bookmark name @@ -875,11 +923,14 @@ def _printbookmarks(ui, repo, bmarks, ** """ opts = pycompat.byteskwargs(opts) fm = ui.formatter('bookmarks', opts) + contexthint = fm.contexthint('bookmark rev node active') hexfn = fm.hexfunc if len(bmarks) == 0 and fm.isplain(): ui.status(_("no bookmarks set\n")) for bmark, (n, prefix, label) in sorted(bmarks.iteritems()): fm.startitem() + if 'ctx' in contexthint: + fm.context(ctx=repo[n]) if not ui.quiet: fm.plain(' %s ' % prefix, label=label) fm.write('bookmark', '%s', bmark, label=label) diff --git a/mercurial/bundle2.py b/mercurial/bundle2.py --- a/mercurial/bundle2.py +++ b/mercurial/bundle2.py @@ -628,9 +628,10 @@ class bundle20(object): def addparam(self, name, value=None): """add a stream level parameter""" if not name: - raise ValueError(r'empty parameter name') + raise error.ProgrammingError(b'empty parameter name') if name[0:1] not in pycompat.bytestr(string.ascii_letters): - raise ValueError(r'non letter first character: %s' % name) + raise error.ProgrammingError(b'non letter first character: %s' + % name) self._params.append((name, value)) def addpart(self, part): @@ -1877,7 +1878,7 @@ def handleremotechangegroup(op, inpart): real_part.validate() except error.Abort as e: raise error.Abort(_('bundle at %s is corrupted:\n%s') % - (util.hidepassword(raw_url), str(e))) + (util.hidepassword(raw_url), bytes(e))) assert not inpart.read() @parthandler('reply:changegroup', ('return', 'in-reply-to')) diff --git a/mercurial/bundlerepo.py b/mercurial/bundlerepo.py --- a/mercurial/bundlerepo.py +++ b/mercurial/bundlerepo.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import os import shutil -import tempfile from .i18n import _ from .node import nullid @@ -270,7 +269,7 @@ class bundlerepository(localrepo.localre try: localrepo.localrepository.__init__(self, ui, repopath) except error.RepoError: - self._tempparent = tempfile.mkdtemp() + self._tempparent = pycompat.mkdtemp() localrepo.instance(ui, self._tempparent, 1) localrepo.localrepository.__init__(self, ui, self._tempparent) self.ui.setconfig('phases', 'publish', False, 'bundlerepo') diff --git a/mercurial/cext/bdiff.c b/mercurial/cext/bdiff.c --- a/mercurial/cext/bdiff.c +++ b/mercurial/cext/bdiff.c @@ -157,9 +157,7 @@ cleanup: PyBuffer_Release(&bb); free(al); free(bl); - if (l.next) { - bdiff_freehunks(l.next); - } + bdiff_freehunks(l.next); return result; } diff --git a/mercurial/cext/parsers.c b/mercurial/cext/parsers.c --- a/mercurial/cext/parsers.c +++ b/mercurial/cext/parsers.c @@ -713,7 +713,7 @@ void dirs_module_init(PyObject *mod); void manifest_module_init(PyObject *mod); void revlog_module_init(PyObject *mod); -static const int version = 4; +static const int version = 5; static void module_init(PyObject *mod) { diff --git a/mercurial/cext/pathencode.c b/mercurial/cext/pathencode.c --- a/mercurial/cext/pathencode.c +++ b/mercurial/cext/pathencode.c @@ -474,7 +474,10 @@ static Py_ssize_t basicencode(char *dest static const uint32_t twobytes[8] = {0, 0, 0x87fffffe}; static const uint32_t onebyte[8] = { - 1, 0x2bff3bfa, 0x68000001, 0x2fffffff, + 1, + 0x2bff3bfa, + 0x68000001, + 0x2fffffff, }; Py_ssize_t destlen = 0; @@ -655,16 +658,10 @@ static int sha1hash(char hash[20], const PyObject *shaobj, *hashobj; if (shafunc == NULL) { - PyObject *hashlib, *name = PyBytes_FromString("hashlib"); - - if (name == NULL) - return -1; - - hashlib = PyImport_Import(name); - Py_DECREF(name); - + PyObject *hashlib = PyImport_ImportModule("hashlib"); if (hashlib == NULL) { - PyErr_SetString(PyExc_ImportError, "hashlib"); + PyErr_SetString(PyExc_ImportError, + "pathencode failed to find hashlib"); return -1; } shafunc = PyObject_GetAttrString(hashlib, "sha1"); @@ -673,12 +670,12 @@ static int sha1hash(char hash[20], const if (shafunc == NULL) { PyErr_SetString(PyExc_AttributeError, "module 'hashlib' has no " - "attribute 'sha1'"); + "attribute 'sha1' in pathencode"); return -1; } } - shaobj = PyObject_CallFunction(shafunc, "s#", str, len); + shaobj = PyObject_CallFunction(shafunc, PY23("s#", "y#"), str, len); if (shaobj == NULL) return -1; diff --git a/mercurial/cext/revlog.c b/mercurial/cext/revlog.c --- a/mercurial/cext/revlog.c +++ b/mercurial/cext/revlog.c @@ -248,6 +248,20 @@ static const char *index_node(indexObjec return data ? data + 32 : NULL; } +/* + * Return the 20-byte SHA of the node corresponding to the given rev. The + * rev is assumed to be existing. If not, an exception is set. + */ +static const char *index_node_existing(indexObject *self, Py_ssize_t pos) +{ + const char *node = index_node(self, pos); + if (node == NULL) { + PyErr_Format(PyExc_IndexError, "could not access rev %d", + (int)pos); + } + return node; +} + static int nt_insert(indexObject *self, const char *node, int rev); static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen) @@ -1052,10 +1066,12 @@ static int nt_insert(indexObject *self, return 0; } if (v < 0) { - const char *oldnode = index_node(self, -(v + 1)); + const char *oldnode = index_node_existing(self, -(v + 1)); int noff; - if (!oldnode || !memcmp(oldnode, node, 20)) { + if (oldnode == NULL) + return -1; + if (!memcmp(oldnode, node, 20)) { n->children[k] = -rev - 1; return 0; } @@ -1135,9 +1151,9 @@ static int index_find_node(indexObject * */ if (self->ntmisses++ < 4) { for (rev = self->ntrev - 1; rev >= 0; rev--) { - const char *n = index_node(self, rev); + const char *n = index_node_existing(self, rev); if (n == NULL) - return -2; + return -3; if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) { if (nt_insert(self, n, rev) == -1) return -3; @@ -1146,11 +1162,9 @@ static int index_find_node(indexObject * } } else { for (rev = self->ntrev - 1; rev >= 0; rev--) { - const char *n = index_node(self, rev); - if (n == NULL) { - self->ntrev = rev + 1; - return -2; - } + const char *n = index_node_existing(self, rev); + if (n == NULL) + return -3; if (nt_insert(self, n, rev) == -1) { self->ntrev = rev + 1; return -3; @@ -1216,27 +1230,84 @@ static PyObject *index_getitem(indexObje return NULL; } +/* + * Fully populate the radix tree. + */ +static int nt_populate(indexObject *self) { + int rev; + if (self->ntrev > 0) { + for (rev = self->ntrev - 1; rev >= 0; rev--) { + const char *n = index_node_existing(self, rev); + if (n == NULL) + return -1; + if (nt_insert(self, n, rev) == -1) + return -1; + } + self->ntrev = -1; + } + return 0; +} + static int nt_partialmatch(indexObject *self, const char *node, Py_ssize_t nodelen) { - int rev; + if (nt_init(self) == -1) + return -3; + if (nt_populate(self) == -1) + return -3; + + return nt_find(self, node, nodelen, 1); +} + +/* + * Find the length of the shortest unique prefix of node. + * + * Return values: + * + * -3: error (exception set) + * -2: not found (no exception set) + * rest: length of shortest prefix + */ +static int nt_shortest(indexObject *self, const char *node) +{ + int level, off; if (nt_init(self) == -1) return -3; + if (nt_populate(self) == -1) + return -3; - if (self->ntrev > 0) { - /* ensure that the radix tree is fully populated */ - for (rev = self->ntrev - 1; rev >= 0; rev--) { - const char *n = index_node(self, rev); + for (level = off = 0; level < 40; level++) { + int k, v; + nodetree *n = &self->nt[off]; + k = nt_level(node, level); + v = n->children[k]; + if (v < 0) { + const char *n; + v = -(v + 1); + n = index_node_existing(self, v); if (n == NULL) + return -3; + if (memcmp(node, n, 20) != 0) + /* + * Found a unique prefix, but it wasn't for the + * requested node (i.e the requested node does + * not exist). + */ return -2; - if (nt_insert(self, n, rev) == -1) - return -3; + return level + 1; } - self->ntrev = rev; + if (v == 0) + return -2; + off = v; } - - return nt_find(self, node, nodelen, 1); + /* + * The node was still not unique after 40 hex digits, so this won't + * happen. Also, if we get here, then there's a programming error in + * this file that made us insert a node longer than 40 hex digits. + */ + PyErr_SetString(PyExc_Exception, "broken node tree"); + return -3; } static PyObject *index_partialmatch(indexObject *self, PyObject *args) @@ -1249,7 +1320,7 @@ static PyObject *index_partialmatch(inde if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen)) return NULL; - if (nodelen < 4) { + if (nodelen < 1) { PyErr_SetString(PyExc_ValueError, "key too short"); return NULL; } @@ -1280,15 +1351,36 @@ static PyObject *index_partialmatch(inde return PyBytes_FromStringAndSize(nullid, 20); } - fullnode = index_node(self, rev); + fullnode = index_node_existing(self, rev); if (fullnode == NULL) { - PyErr_Format(PyExc_IndexError, - "could not access rev %d", rev); return NULL; } return PyBytes_FromStringAndSize(fullnode, 20); } +static PyObject *index_shortest(indexObject *self, PyObject *args) +{ + Py_ssize_t nodelen; + PyObject *val; + char *node; + int length; + + if (!PyArg_ParseTuple(args, "O", &val)) + return NULL; + if (node_check(val, &node, &nodelen) == -1) + return NULL; + + self->ntlookups++; + length = nt_shortest(self, node); + if (length == -3) + return NULL; + if (length == -2) { + raise_revlog_error(); + return NULL; + } + return PyInt_FromLong(length); +} + static PyObject *index_m_get(indexObject *self, PyObject *args) { Py_ssize_t nodelen; @@ -1758,10 +1850,11 @@ static int index_slice_del(indexObject * Py_ssize_t i; for (i = start + 1; i < self->length - 1; i++) { - const char *node = index_node(self, i); + const char *node = index_node_existing(self, i); + if (node == NULL) + return -1; - if (node) - nt_insert(self, node, -1); + nt_insert(self, node, -1); } if (self->added) nt_invalidate_added(self, 0); @@ -1977,6 +2070,8 @@ static PyMethodDef index_methods[] = { "insert an index entry"}, {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS, "match a potentially ambiguous node ID"}, + {"shortest", (PyCFunction)index_shortest, METH_VARARGS, + "find length of shortest hex nodeid of a binary ID"}, {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"}, {NULL} /* Sentinel */ diff --git a/mercurial/changegroup.py b/mercurial/changegroup.py --- a/mercurial/changegroup.py +++ b/mercurial/changegroup.py @@ -9,7 +9,6 @@ from __future__ import absolute_import import os import struct -import tempfile import weakref from .i18n import _ @@ -80,7 +79,7 @@ def writechunks(ui, chunks, filename, vf # small (4k is common on Linux). fh = open(filename, "wb", 131072) else: - fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") + fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") fh = os.fdopen(fd, r"wb") cleanup = filename for c in chunks: @@ -238,18 +237,16 @@ class cg1unpacker(object): pos = next yield closechunk() - def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): - # We know that we'll never have more manifests than we had - # changesets. - self.callback = prog(_('manifests'), numchanges) + def _unpackmanifests(self, repo, revmap, trp, prog): + self.callback = prog.increment # no need to check for empty manifest group here: # if the result of the merge of 1 and 2 is the same in 3 and 4, # no new manifest will be created and the manifest group will # be empty during the pull self.manifestheader() deltas = self.deltaiter() - repo.manifestlog._revlog.addgroup(deltas, revmap, trp) - repo.ui.progress(_('manifests'), None) + repo.manifestlog.addgroup(deltas, revmap, trp) + prog.complete() self.callback = None def apply(self, repo, tr, srctype, url, targetphase=phases.draft, @@ -294,16 +291,9 @@ class cg1unpacker(object): # pull off the changeset group repo.ui.status(_("adding changesets\n")) clstart = len(cl) - class prog(object): - def __init__(self, step, total): - self._step = step - self._total = total - self._count = 1 - def __call__(self): - repo.ui.progress(self._step, self._count, unit=_('chunks'), - total=self._total) - self._count += 1 - self.callback = prog(_('changesets'), expectedtotal) + progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'), + total=expectedtotal) + self.callback = progress.increment efiles = set() def onchangelog(cl, node): @@ -319,12 +309,16 @@ class cg1unpacker(object): config='warn-empty-changegroup') clend = len(cl) changesets = clend - clstart - repo.ui.progress(_('changesets'), None) + progress.complete() self.callback = None # pull off the manifest group repo.ui.status(_("adding manifests\n")) - self._unpackmanifests(repo, revmap, trp, prog, changesets) + # We know that we'll never have more manifests than we had + # changesets. + progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'), + total=changesets) + self._unpackmanifests(repo, revmap, trp, progress) needfiles = {} if repo.ui.configbool('server', 'validate'): @@ -476,9 +470,8 @@ class cg3unpacker(cg2unpacker): node, p1, p2, deltabase, cs, flags = headertuple return node, p1, p2, deltabase, cs, flags - def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): - super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog, - numchanges) + def _unpackmanifests(self, repo, revmap, trp, prog): + super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) for chunkdata in iter(self.filelogheader, {}): # If we get here, there are directory manifests in the changegroup d = chunkdata["filename"] @@ -523,7 +516,6 @@ class cg1packer(object): reorder = stringutil.parsebool(reorder) self._repo = repo self._reorder = reorder - self._progress = repo.ui.progress if self._repo.ui.verbose and not self._repo.ui.debugflag: self._verbosenote = self._repo.ui.note else: @@ -572,18 +564,20 @@ class cg1packer(object): revs.insert(0, p) # build deltas - total = len(revs) - 1 - msgbundling = _('bundling') + progress = None + if units is not None: + progress = self._repo.ui.makeprogress(_('bundling'), unit=units, + total=(len(revs) - 1)) for r in xrange(len(revs) - 1): - if units is not None: - self._progress(msgbundling, r + 1, unit=units, total=total) + if progress: + progress.update(r + 1) prev, curr = revs[r], revs[r + 1] linknode = lookup(revlog.node(curr)) for c in self.revchunk(revlog, curr, prev, linknode): yield c - if units is not None: - self._progress(msgbundling, None) + if progress: + progress.complete() yield self.close() # filter any nodes that claim to be part of the known set @@ -749,12 +743,8 @@ class cg1packer(object): # The 'source' parameter is useful for extensions def generatefiles(self, changedfiles, linknodes, commonrevs, source): repo = self._repo - progress = self._progress - msgbundling = _('bundling') - - total = len(changedfiles) - # for progress output - msgfiles = _('files') + progress = repo.ui.makeprogress(_('bundling'), unit=_('files'), + total=len(changedfiles)) for i, fname in enumerate(sorted(changedfiles)): filerevlog = repo.file(fname) if not filerevlog: @@ -769,8 +759,7 @@ class cg1packer(object): filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) if filenodes: - progress(msgbundling, i + 1, item=fname, unit=msgfiles, - total=total) + progress.update(i + 1, item=fname) h = self.fileheader(fname) size = len(h) yield h @@ -778,7 +767,7 @@ class cg1packer(object): size += len(chunk) yield chunk self._verbosenote(_('%8.i %s\n') % (size, fname)) - progress(msgbundling, None) + progress.complete() def deltaparent(self, revlog, rev, p1, p2, prev): if not revlog.candelta(prev, rev): @@ -982,12 +971,13 @@ def makestream(repo, outgoing, version, def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): revisions = 0 files = 0 + progress = repo.ui.makeprogress(_('files'), unit=_('files'), + total=expectedfiles) for chunkdata in iter(source.filelogheader, {}): files += 1 f = chunkdata["filename"] repo.ui.debug("adding %s revisions\n" % f) - repo.ui.progress(_('files'), files, unit=_('files'), - total=expectedfiles) + progress.increment() fl = repo.file(f) o = len(fl) try: @@ -1008,7 +998,7 @@ def _addchangegroupfiles(repo, source, r _("received spurious file revlog entry")) if not needs: del needfiles[f] - repo.ui.progress(_('files'), None) + progress.complete() for f, needs in needfiles.iteritems(): fl = repo.file(f) diff --git a/mercurial/cmdutil.py b/mercurial/cmdutil.py --- a/mercurial/cmdutil.py +++ b/mercurial/cmdutil.py @@ -10,7 +10,6 @@ from __future__ import absolute_import import errno import os import re -import tempfile from .i18n import _ from .node import ( @@ -36,8 +35,8 @@ from . import ( obsolete, patch, pathutil, + phases, pycompat, - registrar, revlog, rewriteutil, scmutil, @@ -203,17 +202,21 @@ def setupwrapcolorwrite(ui): return oldwrite def filterchunks(ui, originalhunks, usecurses, testfile, operation=None): - if usecurses: - if testfile: - recordfn = crecordmod.testdecorator(testfile, - crecordmod.testchunkselector) - else: - recordfn = crecordmod.chunkselector - - return crecordmod.filterpatch(ui, originalhunks, recordfn, operation) - - else: - return patch.filterpatch(ui, originalhunks, operation) + try: + if usecurses: + if testfile: + recordfn = crecordmod.testdecorator( + testfile, crecordmod.testchunkselector) + else: + recordfn = crecordmod.chunkselector + + return crecordmod.filterpatch(ui, originalhunks, recordfn, + operation) + except crecordmod.fallbackerror as e: + ui.warn('%s\n' % e.message) + ui.warn(_('falling back to text mode\n')) + + return patch.filterpatch(ui, originalhunks, operation) def recordfilter(ui, originalhunks, operation=None): """ Prompts the user to filter the originalhunks and return a list of @@ -331,7 +334,7 @@ def dorecord(ui, repo, commitfunc, cmdsu try: # backup continues for f in tobackup: - fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.', + fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.', dir=backupdir) os.close(fd) ui.debug('backup %r as %r\n' % (f, tmpname)) @@ -419,7 +422,7 @@ class dirnode(object): Represent a directory in user working copy with information required for the purpose of tersing its status. - path is the path to the directory + path is the path to the directory, without a trailing '/' statuses is a set of statuses of all files in this directory (this includes all the files in all the subdirectories too) @@ -456,7 +459,7 @@ class dirnode(object): # does the dirnode object for subdir exists if subdir not in self.subdirs: - subdirpath = os.path.join(self.path, subdir) + subdirpath = pathutil.join(self.path, subdir) self.subdirs[subdir] = dirnode(subdirpath) # try adding the file in subdir @@ -471,7 +474,7 @@ class dirnode(object): def iterfilepaths(self): """Yield (status, path) for files directly under this directory.""" for f, st in self.files: - yield st, os.path.join(self.path, f) + yield st, pathutil.join(self.path, f) def tersewalk(self, terseargs): """ @@ -485,7 +488,7 @@ class dirnode(object): 1) All the files in the directory (including all the files in its subdirectories) share the same status and the user has asked us to terse - that status. -> yield (status, dirpath) + that status. -> yield (status, dirpath). dirpath will end in '/'. 2) Otherwise, we do following: @@ -502,7 +505,7 @@ class dirnode(object): # Making sure we terse only when the status abbreviation is # passed as terse argument if onlyst in terseargs: - yield onlyst, self.path + pycompat.ossep + yield onlyst, self.path + '/' return # add the files to status list @@ -591,8 +594,8 @@ To mark files as resolved: hg resolve - return _commentlines(msg) def _helpmessage(continuecmd, abortcmd): - msg = _('To continue: %s\n' - 'To abort: %s') % (continuecmd, abortcmd) + msg = _('To continue: %s\n' + 'To abort: %s') % (continuecmd, abortcmd) return _commentlines(msg) def _rebasemsg(): @@ -606,7 +609,7 @@ def _unshelvemsg(): def _updatecleanmsg(dest=None): warning = _('warning: this will discard uncommitted changes') - return 'hg update --clean %s (%s)' % (dest or '.', warning) + return 'hg update --clean %s (%s)' % (dest or '.', warning) def _graftmsg(): # tweakdefaults requires `update` to have a rev hence the `.` @@ -633,7 +636,7 @@ STATES = ( ('histedit', fileexistspredicate('histedit-state'), _histeditmsg), ('bisect', fileexistspredicate('bisect.state'), _bisectmsg), ('graft', fileexistspredicate('graftstate'), _graftmsg), - ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg), + ('unshelve', fileexistspredicate('shelvedstate'), _unshelvemsg), ('rebase', fileexistspredicate('rebasestate'), _rebasemsg), # The merge state is part of a list that will be iterated over. # They need to be last because some of the other unfinished states may also @@ -787,16 +790,12 @@ def changebranch(ui, repo, revs, label): extra=extra, branch=label) - commitphase = ctx.phase() - overrides = {('phases', 'new-commit'): commitphase} - with repo.ui.configoverride(overrides, 'branch-change'): - newnode = repo.commitctx(mc) - + newnode = repo.commitctx(mc) replacements[ctx.node()] = (newnode,) ui.debug('new node id is %s\n' % hex(newnode)) # create obsmarkers and move bookmarks - scmutil.cleanupnodes(repo, replacements, 'branch-change') + scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True) # move the working copy too wctx = repo[None] @@ -1248,7 +1247,8 @@ def copy(ui, repo, pats, opts, rename=Fa dryrun=dryrun, cwd=cwd) if rename and not dryrun: if not after and srcexists and not samefile: - repo.wvfs.unlinkpath(abssrc) + rmdir = repo.ui.configbool('experimental', 'removeemptydirs') + repo.wvfs.unlinkpath(abssrc, rmdir=rmdir) wctx.forget([abssrc]) # pat: ossep @@ -1685,7 +1685,7 @@ def showmarker(fm, marker, index=None): fm.write('date', '(%s) ', fm.formatdate(marker.date())) meta = marker.metadata().copy() meta.pop('date', None) - smeta = util.rapply(pycompat.maybebytestr, meta) + smeta = pycompat.rapply(pycompat.maybebytestr, meta) fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', ')) fm.plain('\n') @@ -1884,10 +1884,14 @@ def walkchangerevs(repo, match, opts, pr yielding each context, the iterator will first call the prepare function on each context in the window in forward order.''' + allfiles = opts.get('all_files') follow = opts.get('follow') or opts.get('follow_first') revs = _walkrevs(repo, opts) if not revs: return [] + if allfiles and len(revs) > 1: + raise error.Abort(_("multiple revisions not supported with " + "--all-files")) wanted = set() slowpath = match.anypats() or (not match.always() and opts.get('removed')) fncache = {} @@ -1993,7 +1997,11 @@ def walkchangerevs(repo, match, opts, pr ctx = change(rev) if not fns: def fns_generator(): - for f in ctx.files(): + if allfiles: + fiter = iter(ctx) + else: + fiter = ctx.files() + for f in fiter: if match(f): yield f fns = fns_generator() @@ -2137,15 +2145,13 @@ def forget(ui, repo, match, prefix, expl return bad, forgot def files(ui, ctx, m, fm, fmt, subrepos): - rev = ctx.rev() ret = 1 - ds = ctx.repo().dirstate - + + needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint() for f in ctx.matches(m): - if rev is None and ds[f] == 'r': - continue fm.startitem() - if ui.verbose: + fm.context(ctx=ctx) + if needsfctx: fc = ctx[f] fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags()) fm.data(abspath=f) @@ -2181,13 +2187,12 @@ def remove(ui, repo, m, prefix, after, f warn = False subs = sorted(wctx.substate) - total = len(subs) - count = 0 + progress = ui.makeprogress(_('searching'), total=len(subs), + unit=_('subrepos')) for subpath in subs: - count += 1 submatch = matchmod.subdirmatcher(subpath, m) if subrepos or m.exact(subpath) or any(submatch.files()): - ui.progress(_('searching'), count, total=total, unit=_('subrepos')) + progress.increment() sub = wctx.sub(subpath) try: if sub.removefiles(submatch, prefix, after, force, subrepos, @@ -2196,13 +2201,13 @@ def remove(ui, repo, m, prefix, after, f except error.LookupError: warnings.append(_("skipping missing subrepository: %s\n") % join(subpath)) - ui.progress(_('searching'), None) + progress.complete() # warn about failure to delete explicit files/dirs deleteddirs = util.dirs(deleted) files = m.files() - total = len(files) - count = 0 + progress = ui.makeprogress(_('deleting'), total=len(files), + unit=_('files')) for f in files: def insubrepo(): for subpath in wctx.substate: @@ -2210,8 +2215,7 @@ def remove(ui, repo, m, prefix, after, f return True return False - count += 1 - ui.progress(_('deleting'), count, total=total, unit=_('files')) + progress.increment() isdir = f in deleteddirs or wctx.hasdir(f) if (f in repo.dirstate or isdir or f == '.' or insubrepo() or f in subs): @@ -2226,50 +2230,47 @@ def remove(ui, repo, m, prefix, after, f % m.rel(f)) # missing files will generate a warning elsewhere ret = 1 - ui.progress(_('deleting'), None) + progress.complete() if force: list = modified + deleted + clean + added elif after: list = deleted remaining = modified + added + clean - total = len(remaining) - count = 0 + progress = ui.makeprogress(_('skipping'), total=len(remaining), + unit=_('files')) for f in remaining: - count += 1 - ui.progress(_('skipping'), count, total=total, unit=_('files')) + progress.increment() if ui.verbose or (f in files): warnings.append(_('not removing %s: file still exists\n') % m.rel(f)) ret = 1 - ui.progress(_('skipping'), None) + progress.complete() else: list = deleted + clean - total = len(modified) + len(added) - count = 0 + progress = ui.makeprogress(_('skipping'), + total=(len(modified) + len(added)), + unit=_('files')) for f in modified: - count += 1 - ui.progress(_('skipping'), count, total=total, unit=_('files')) + progress.increment() warnings.append(_('not removing %s: file is modified (use -f' ' to force removal)\n') % m.rel(f)) ret = 1 for f in added: - count += 1 - ui.progress(_('skipping'), count, total=total, unit=_('files')) + progress.increment() warnings.append(_("not removing %s: file has been marked for add" " (use 'hg forget' to undo add)\n") % m.rel(f)) ret = 1 - ui.progress(_('skipping'), None) + progress.complete() list = sorted(list) - total = len(list) - count = 0 + progress = ui.makeprogress(_('deleting'), total=len(list), + unit=_('files')) for f in list: - count += 1 if ui.verbose or not m.exact(f): - ui.progress(_('deleting'), count, total=total, unit=_('files')) + progress.increment() ui.status(_('removing %s\n') % m.rel(f)) - ui.progress(_('deleting'), None) + progress.complete() if not dryrun: with repo.wlock(): @@ -2277,7 +2278,9 @@ def remove(ui, repo, m, prefix, after, f for f in list: if f in added: continue # we never unlink added files on remove - repo.wvfs.unlinkpath(f, ignoremissing=True) + rmdir = repo.ui.configbool('experimental', + 'removeemptydirs') + repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir) repo[None].forget(list) if warn: @@ -2295,6 +2298,7 @@ def _updatecatformatter(fm, ctx, matcher if decode: data = ctx.repo().wwritedata(path, data) fm.startitem() + fm.context(ctx=ctx) fm.write('data', '%s', data) fm.data(abspath=path, path=matcher.rel(path)) @@ -2541,21 +2545,19 @@ def amend(ui, repo, old, extra, pats, op # This not what we expect from amend. return old.node() + commitphase = None if opts.get('secret'): - commitphase = 'secret' - else: - commitphase = old.phase() - overrides = {('phases', 'new-commit'): commitphase} - with ui.configoverride(overrides, 'amend'): - newid = repo.commitctx(new) + commitphase = phases.secret + newid = repo.commitctx(new) # Reroute the working copy parent to the new changeset repo.setparents(newid, nullid) mapping = {old.node(): (newid,)} obsmetadata = None if opts.get('note'): - obsmetadata = {'note': opts['note']} - scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata) + obsmetadata = {'note': encoding.fromlocal(opts['note'])} + scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata, + fixphase=True, targetphase=commitphase) # Fixing the dirstate because localrepo.commitctx does not update # it. This is rather convenient because we did not need to update @@ -3002,12 +3004,6 @@ def revert(ui, repo, ctx, parents, *pats if not opts.get('dry_run'): needdata = ('revert', 'add', 'undelete') - if _revertprefetch is not _revertprefetchstub: - ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, " - "add a callback to 'scmutil.fileprefetchhooks'", - '4.6', stacklevel=1) - _revertprefetch(repo, ctx, - *[actions[name][0] for name in needdata]) oplist = [actions[name][0] for name in needdata] prefetch = scmutil.prefetchfiles matchfiles = scmutil.matchfiles @@ -3026,12 +3022,6 @@ def revert(ui, repo, ctx, parents, *pats raise error.Abort("subrepository '%s' does not exist in %s!" % (sub, short(ctx.node()))) -def _revertprefetchstub(repo, ctx, *files): - """Stub method for detecting extension wrapping of _revertprefetch(), to - issue a deprecation warning.""" - -_revertprefetch = _revertprefetchstub - def _performrevert(repo, parents, ctx, actions, interactive=False, tobackup=None): """function that actually perform all the actions computed for revert @@ -3051,7 +3041,8 @@ def _performrevert(repo, parents, ctx, a def doremove(f): try: - repo.wvfs.unlinkpath(f) + rmdir = repo.ui.configbool('experimental', 'removeemptydirs') + repo.wvfs.unlinkpath(f, rmdir=rmdir) except OSError: pass repo.dirstate.remove(f) @@ -3168,12 +3159,6 @@ def _performrevert(repo, parents, ctx, a if f in copied: repo.dirstate.copy(copied[f], f) -class command(registrar.command): - """deprecated: used registrar.command instead""" - def _doregister(self, func, name, *args, **kwargs): - func._deprecatedregistrar = True # flag for deprecwarn in extensions.py - return super(command, self)._doregister(func, name, *args, **kwargs) - # a list of (ui, repo, otherpeer, opts, missing) functions called by # commands.outgoing. "missing" is "missing" of the result of # "findcommonoutgoing()" @@ -3198,7 +3183,7 @@ summaryremotehooks = util.hooks() # (state file, clearable, allowcommit, error, hint) unfinishedstates = [ ('graftstate', True, False, _('graft in progress'), - _("use 'hg graft --continue' or 'hg update' to abort")), + _("use 'hg graft --continue' or 'hg graft --stop' to stop")), ('updatestate', True, False, _('last update was interrupted'), _("use 'hg update' to get a consistent checkout")) ] @@ -3285,23 +3270,3 @@ def wrongtooltocontinue(repo, task): if after[1]: hint = after[0] raise error.Abort(_('no %s in progress') % task, hint=hint) - -class changeset_printer(logcmdutil.changesetprinter): - - def __init__(self, ui, *args, **kwargs): - msg = ("'cmdutil.changeset_printer' is deprecated, " - "use 'logcmdutil.logcmdutil'") - ui.deprecwarn(msg, "4.6") - super(changeset_printer, self).__init__(ui, *args, **kwargs) - -def displaygraph(ui, *args, **kwargs): - msg = ("'cmdutil.displaygraph' is deprecated, " - "use 'logcmdutil.displaygraph'") - ui.deprecwarn(msg, "4.6") - return logcmdutil.displaygraph(ui, *args, **kwargs) - -def show_changeset(ui, *args, **kwargs): - msg = ("'cmdutil.show_changeset' is deprecated, " - "use 'logcmdutil.changesetdisplayer'") - ui.deprecwarn(msg, "4.6") - return logcmdutil.changesetdisplayer(ui, *args, **kwargs) diff --git a/mercurial/commands.py b/mercurial/commands.py --- a/mercurial/commands.py +++ b/mercurial/commands.py @@ -40,7 +40,6 @@ from . import ( hbisect, help, hg, - lock as lockmod, logcmdutil, merge as mergemod, obsolete, @@ -50,10 +49,12 @@ from . import ( pycompat, rcutil, registrar, + repair, revsetlang, rewriteutil, scmutil, server, + state as statemod, streamclone, tags as tagsmod, templatekw, @@ -63,12 +64,9 @@ from . import ( ) from .utils import ( dateutil, - procutil, stringutil, ) -release = lockmod.release - table = {} table.update(debugcommandsmod.command._table) @@ -335,13 +333,13 @@ def annotate(ui, repo, *pats, **opts): formatrev = formathex = pycompat.bytestr opmap = [('user', ' ', lambda x: x.fctx.user(), ui.shortuser), - ('number', ' ', lambda x: x.fctx.rev(), formatrev), - ('changeset', ' ', lambda x: hexfn(x.fctx.node()), formathex), + ('rev', ' ', lambda x: x.fctx.rev(), formatrev), + ('node', ' ', lambda x: hexfn(x.fctx.node()), formathex), ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)), ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr), ('line_number', ':', lambda x: x.lineno, pycompat.bytestr), ] - fieldnamemap = {'number': 'rev', 'changeset': 'node'} + opnamemap = {'rev': 'number', 'node': 'changeset'} if (not opts.get('user') and not opts.get('changeset') and not opts.get('date') and not opts.get('file')): @@ -359,11 +357,12 @@ def annotate(ui, repo, *pats, **opts): else: def makefunc(get, fmt): return get - funcmap = [(makefunc(get, fmt), sep) for op, sep, get, fmt in opmap - if opts.get(op)] + datahint = rootfm.datahint() + funcmap = [(makefunc(get, fmt), sep) for fn, sep, get, fmt in opmap + if opts.get(opnamemap.get(fn, fn)) or fn in datahint] funcmap[0] = (funcmap[0][0], '') # no separator in front of first column - fields = ' '.join(fieldnamemap.get(op, op) for op, sep, get, fmt in opmap - if opts.get(op)) + fields = ' '.join(fn for fn, sep, get, fmt in opmap + if opts.get(opnamemap.get(fn, fn)) or fn in datahint) def bad(x, y): raise error.Abort("%s: %s" % (x, y)) @@ -560,13 +559,8 @@ def backout(ui, repo, node=None, rev=Non Returns 0 on success, 1 if nothing to backout or there are unresolved files. ''' - wlock = lock = None - try: - wlock = repo.wlock() - lock = repo.lock() + with repo.wlock(), repo.lock(): return _dobackout(ui, repo, node, rev, **opts) - finally: - release(lock, wlock) def _dobackout(ui, repo, node=None, rev=None, **opts): opts = pycompat.byteskwargs(opts) @@ -617,21 +611,16 @@ def _dobackout(ui, repo, node=None, rev= bheads = repo.branchheads(branch) rctx = scmutil.revsingle(repo, hex(parent)) if not opts.get('merge') and op1 != node: - dsguard = dirstateguard.dirstateguard(repo, 'backout') - try: - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), - 'backout') - stats = mergemod.update(repo, parent, True, True, node, False) + with dirstateguard.dirstateguard(repo, 'backout'): + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'backout'): + stats = mergemod.update(repo, parent, True, True, node, False) repo.setparents(op1, op2) - dsguard.close() - hg._showstats(repo, stats) - if stats.unresolvedcount: - repo.ui.status(_("use 'hg resolve' to retry unresolved " - "file merges\n")) - return 1 - finally: - ui.setconfig('ui', 'forcemerge', '', '') - lockmod.release(dsguard) + hg._showstats(repo, stats) + if stats.unresolvedcount: + repo.ui.status(_("use 'hg resolve' to retry unresolved " + "file merges\n")) + return 1 else: hg.clean(repo, node, show_stats=False) repo.dirstate.setbranch(branch) @@ -667,12 +656,9 @@ def _dobackout(ui, repo, node=None, rev= hg.clean(repo, op1, show_stats=False) ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip())) - try: - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), - 'backout') + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'backout'): return hg.merge(repo, hex(repo.changelog.tip())) - finally: - ui.setconfig('ui', 'forcemerge', '', '') return 0 @command('bisect', @@ -1234,7 +1220,7 @@ def bundle(ui, repo, fname, dest=None, * other = hg.peer(repo, opts, dest) revs = [repo[r].hex() for r in revs] revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) - heads = revs and map(repo.lookup, revs) or revs + heads = revs and pycompat.maplist(repo.lookup, revs) or revs outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=heads, force=opts.get('force'), @@ -1536,13 +1522,8 @@ def commit(ui, repo, *pats, **opts): hg commit --amend --date now """ - wlock = lock = None - try: - wlock = repo.wlock() - lock = repo.lock() + with repo.wlock(), repo.lock(): return _docommit(ui, repo, *pats, **opts) - finally: - release(lock, wlock) def _docommit(ui, repo, *pats, **opts): if opts.get(r'interactive'): @@ -1895,7 +1876,9 @@ def diff(ui, repo, *pats, **opts): root=opts.get('root')) @command('^export', - [('o', 'output', '', + [('B', 'bookmark', '', + _('export changes only reachable by given bookmark')), + ('o', 'output', '', _('print output to file with formatted name'), _('FORMAT')), ('', 'switch-parent', None, _('diff against the second parent')), ('r', 'rev', [], _('revisions to export'), _('REV')), @@ -1938,6 +1921,9 @@ def export(ui, repo, *changesets, **opts of files it detects as binary. With -a, export will generate a diff anyway, probably with undesirable results. + With -B/--bookmark changesets reachable by the given bookmark are + selected. + Use the -g/--git option to generate diffs in the git extended diff format. See :hg:`help diffs` for more information. @@ -1966,11 +1952,24 @@ def export(ui, repo, *changesets, **opts Returns 0 on success. """ opts = pycompat.byteskwargs(opts) + bookmark = opts.get('bookmark') changesets += tuple(opts.get('rev', [])) - if not changesets: - changesets = ['.'] - repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn') - revs = scmutil.revrange(repo, changesets) + + if bookmark and changesets: + raise error.Abort(_("-r and -B are mutually exclusive")) + + if bookmark: + if bookmark not in repo._bookmarks: + raise error.Abort(_("bookmark '%s' not found") % bookmark) + + revs = scmutil.bookmarkrevs(repo, bookmark) + else: + if not changesets: + changesets = ['.'] + + repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn') + revs = scmutil.revrange(repo, changesets) + if not revs: raise error.Abort(_("export requires at least one changeset")) if len(revs) > 1: @@ -2108,8 +2107,12 @@ def forget(ui, repo, *pats, **opts): 'graft', [('r', 'rev', [], _('revisions to graft'), _('REV')), ('c', 'continue', False, _('resume interrupted graft')), + ('', 'stop', False, _('stop interrupted graft')), + ('', 'abort', False, _('abort interrupted graft')), ('e', 'edit', False, _('invoke editor on commit messages')), ('', 'log', None, _('append graft info to log message')), + ('', 'no-commit', None, + _("don't commit, just apply the changes in working directory")), ('f', 'force', False, _('force graft')), ('D', 'currentdate', False, _('record the current date as commit date')), @@ -2143,10 +2146,7 @@ def graft(ui, repo, *revs, **opts): Once all conflicts are addressed, the graft process can be continued with the -c/--continue option. - .. note:: - - The -c/--continue option does not reapply earlier options, except - for --force. + The -c/--continue option reapplies all the earlier options. .. container:: verbose @@ -2188,6 +2188,10 @@ def _dograft(ui, repo, *revs, **opts): revs = list(revs) revs.extend(opts.get('rev')) + # a dict of data to be stored in state file + statedata = {} + # list of new nodes created by ongoing graft + statedata['newnodes'] = [] if not opts.get('user') and opts.get('currentuser'): opts['user'] = ui.username() @@ -2198,17 +2202,62 @@ def _dograft(ui, repo, *revs, **opts): **pycompat.strkwargs(opts)) cont = False - if opts.get('continue'): + if opts.get('no_commit'): + if opts.get('edit'): + raise error.Abort(_("cannot specify --no-commit and " + "--edit together")) + if opts.get('currentuser'): + raise error.Abort(_("cannot specify --no-commit and " + "--currentuser together")) + if opts.get('currentdate'): + raise error.Abort(_("cannot specify --no-commit and " + "--currentdate together")) + if opts.get('log'): + raise error.Abort(_("cannot specify --no-commit and " + "--log together")) + + graftstate = statemod.cmdstate(repo, 'graftstate') + + if opts.get('stop'): + if opts.get('continue'): + raise error.Abort(_("cannot use '--continue' and " + "'--stop' together")) + if opts.get('abort'): + raise error.Abort(_("cannot use '--abort' and '--stop' together")) + + if any((opts.get('edit'), opts.get('log'), opts.get('user'), + opts.get('date'), opts.get('currentdate'), + opts.get('currentuser'), opts.get('rev'))): + raise error.Abort(_("cannot specify any other flag with '--stop'")) + return _stopgraft(ui, repo, graftstate) + elif opts.get('abort'): + if opts.get('continue'): + raise error.Abort(_("cannot use '--continue' and " + "'--abort' together")) + if any((opts.get('edit'), opts.get('log'), opts.get('user'), + opts.get('date'), opts.get('currentdate'), + opts.get('currentuser'), opts.get('rev'))): + raise error.Abort(_("cannot specify any other flag with '--abort'")) + + return _abortgraft(ui, repo, graftstate) + elif opts.get('continue'): cont = True if revs: raise error.Abort(_("can't specify --continue and revisions")) # read in unfinished revisions - try: - nodes = repo.vfs.read('graftstate').splitlines() + if graftstate.exists(): + statedata = _readgraftstate(repo, graftstate) + if statedata.get('date'): + opts['date'] = statedata['date'] + if statedata.get('user'): + opts['user'] = statedata['user'] + if statedata.get('log'): + opts['log'] = True + if statedata.get('no_commit'): + opts['no_commit'] = statedata.get('no_commit') + nodes = statedata['nodes'] revs = [repo[node].rev() for node in nodes] - except IOError as inst: - if inst.errno != errno.ENOENT: - raise + else: cmdutil.wrongtooltocontinue(repo, _('graft')) else: if not revs: @@ -2292,6 +2341,8 @@ def _dograft(ui, repo, *revs, **opts): if not revs: return -1 + if opts.get('no_commit'): + statedata['no_commit'] = True for pos, ctx in enumerate(repo.set("%ld", revs)): desc = '%d:%s "%s"' % (ctx.rev(), ctx, ctx.description().split('\n', 1)[0]) @@ -2312,60 +2363,134 @@ def _dograft(ui, repo, *revs, **opts): user = ctx.user() if opts.get('user'): user = opts['user'] + statedata['user'] = user date = ctx.date() if opts.get('date'): date = opts['date'] + statedata['date'] = date message = ctx.description() if opts.get('log'): message += '\n(grafted from %s)' % ctx.hex() + statedata['log'] = True # we don't merge the first commit when continuing if not cont: # perform the graft merge with p1(rev) as 'ancestor' - try: - # ui.forcemerge is an internal variable, do not document - repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), - 'graft') - stats = mergemod.graft(repo, ctx, ctx.p1(), - ['local', 'graft']) - finally: - repo.ui.setconfig('ui', 'forcemerge', '', 'graft') + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'graft'): + stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'graft']) # report any conflicts if stats.unresolvedcount > 0: # write out state for --continue - nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]] - repo.vfs.write('graftstate', ''.join(nodelines)) - extra = '' - if opts.get('user'): - extra += ' --user %s' % procutil.shellquote(opts['user']) - if opts.get('date'): - extra += ' --date %s' % procutil.shellquote(opts['date']) - if opts.get('log'): - extra += ' --log' - hint=_("use 'hg resolve' and 'hg graft --continue%s'") % extra + nodes = [repo[rev].hex() for rev in revs[pos:]] + statedata['nodes'] = nodes + stateversion = 1 + graftstate.save(stateversion, statedata) + hint = _("use 'hg resolve' and 'hg graft --continue'") raise error.Abort( _("unresolved conflicts, can't continue"), hint=hint) else: cont = False - # commit - node = repo.commit(text=message, user=user, - date=date, extra=extra, editor=editor) - if node is None: - ui.warn( - _('note: graft of %d:%s created no changes to commit\n') % - (ctx.rev(), ctx)) + # commit if --no-commit is false + if not opts.get('no_commit'): + node = repo.commit(text=message, user=user, date=date, extra=extra, + editor=editor) + if node is None: + ui.warn( + _('note: graft of %d:%s created no changes to commit\n') % + (ctx.rev(), ctx)) + # checking that newnodes exist because old state files won't have it + elif statedata.get('newnodes') is not None: + statedata['newnodes'].append(node) # remove state when we complete successfully if not opts.get('dry_run'): - repo.vfs.unlinkpath('graftstate', ignoremissing=True) - + graftstate.delete() + + return 0 + +def _abortgraft(ui, repo, graftstate): + """abort the interrupted graft and rollbacks to the state before interrupted + graft""" + if not graftstate.exists(): + raise error.Abort(_("no interrupted graft to abort")) + statedata = _readgraftstate(repo, graftstate) + newnodes = statedata.get('newnodes') + if newnodes is None: + # and old graft state which does not have all the data required to abort + # the graft + raise error.Abort(_("cannot abort using an old graftstate")) + + # changeset from which graft operation was started + startctx = None + if len(newnodes) > 0: + startctx = repo[newnodes[0]].p1() + else: + startctx = repo['.'] + # whether to strip or not + cleanup = False + if newnodes: + newnodes = [repo[r].rev() for r in newnodes] + cleanup = True + # checking that none of the newnodes turned public or is public + immutable = [c for c in newnodes if not repo[c].mutable()] + if immutable: + repo.ui.warn(_("cannot clean up public changesets %s\n") + % ', '.join(bytes(repo[r]) for r in immutable), + hint=_("see 'hg help phases' for details")) + cleanup = False + + # checking that no new nodes are created on top of grafted revs + desc = set(repo.changelog.descendants(newnodes)) + if desc - set(newnodes): + repo.ui.warn(_("new changesets detected on destination " + "branch, can't strip\n")) + cleanup = False + + if cleanup: + with repo.wlock(), repo.lock(): + hg.updaterepo(repo, startctx.node(), overwrite=True) + # stripping the new nodes created + strippoints = [c.node() for c in repo.set("roots(%ld)", + newnodes)] + repair.strip(repo.ui, repo, strippoints, backup=False) + + if not cleanup: + # we don't update to the startnode if we can't strip + startctx = repo['.'] + hg.updaterepo(repo, startctx.node(), overwrite=True) + + ui.status(_("graft aborted\n")) + ui.status(_("working directory is now at %s\n") % startctx.hex()[:12]) + graftstate.delete() + return 0 + +def _readgraftstate(repo, graftstate): + """read the graft state file and return a dict of the data stored in it""" + try: + return graftstate.read() + except error.CorruptedState: + nodes = repo.vfs.read('graftstate').splitlines() + return {'nodes': nodes} + +def _stopgraft(ui, repo, graftstate): + """stop the interrupted graft""" + if not graftstate.exists(): + raise error.Abort(_("no interrupted graft found")) + pctx = repo['.'] + hg.updaterepo(repo, pctx.node(), overwrite=True) + graftstate.delete() + ui.status(_("stopped the interrupted graft\n")) + ui.status(_("working directory is now at %s\n") % pctx.hex()[:12]) return 0 @command('grep', [('0', 'print0', None, _('end fields with NUL')), - ('', 'all', None, _('print all revisions that match')), + ('', 'all', None, _('print all revisions that match (DEPRECATED) ')), + ('', 'diff', None, _('print all revisions when the term was introduced ' + 'or removed')), ('a', 'text', None, _('treat all files as text')), ('f', 'follow', None, _('follow changeset history,' @@ -2376,6 +2501,8 @@ def _dograft(ui, repo, *revs, **opts): ('n', 'line-number', None, _('print matching line numbers')), ('r', 'rev', [], _('only search files changed within revision range'), _('REV')), + ('', 'all-files', None, + _('include all files in the changeset while grepping (EXPERIMENTAL)')), ('u', 'user', None, _('list the author (long with -v)')), ('d', 'date', None, _('list the date (short with -q)')), ] + formatteropts + walkopts, @@ -2392,7 +2519,7 @@ def grep(ui, repo, pattern, *pats, **opt file in which it finds a match. To get it to print every revision that contains a change in match status ("-" for a match that becomes a non-match, or "+" for a non-match that becomes a match), use the - --all flag. + --diff flag. PATTERN can be any Python (roughly Perl-compatible) regular expression. @@ -2404,6 +2531,17 @@ def grep(ui, repo, pattern, *pats, **opt Returns 0 if a match is found, 1 otherwise. """ opts = pycompat.byteskwargs(opts) + diff = opts.get('all') or opts.get('diff') + if diff and opts.get('all_files'): + raise error.Abort(_('--diff and --all-files are mutually exclusive')) + # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working + if opts.get('all_files') is None and not opts.get('rev') and not diff: + # experimental config: commands.grep.all-files + opts['all_files'] = ui.configbool('commands', 'grep.all-files') + plaingrep = opts.get('all_files') and not opts.get('rev') + if plaingrep: + opts['rev'] = ['wdir()'] + reflags = re.M if opts.get('ignore_case'): reflags |= re.I @@ -2481,7 +2619,7 @@ def grep(ui, repo, pattern, *pats, **opt yield ('+', b[i]) def display(fm, fn, ctx, pstates, states): - rev = ctx.rev() + rev = scmutil.intrev(ctx) if fm.isplain(): formatuser = ui.shortuser else: @@ -2494,22 +2632,27 @@ def grep(ui, repo, pattern, *pats, **opt @util.cachefunc def binary(): flog = getfile(fn) - return stringutil.binary(flog.read(ctx.filenode(fn))) + try: + return stringutil.binary(flog.read(ctx.filenode(fn))) + except error.WdirUnsupported: + return ctx[fn].isbinary() fieldnamemap = {'filename': 'file', 'linenumber': 'line_number'} - if opts.get('all'): + if diff: iter = difflinestates(pstates, states) else: iter = [('', l) for l in states] for change, l in iter: fm.startitem() - fm.data(node=fm.hexfunc(ctx.node())) + fm.context(ctx=ctx) + fm.data(node=fm.hexfunc(scmutil.binnode(ctx))) + cols = [ ('filename', fn, True), - ('rev', rev, True), + ('rev', rev, not plaingrep), ('linenumber', l.linenum, opts.get('line_number')), ] - if opts.get('all'): + if diff: cols.append(('change', change, True)) cols.extend([ ('user', formatuser(ctx.user()), opts.get('user')), @@ -2569,8 +2712,10 @@ def grep(ui, repo, pattern, *pats, **opt fnode = ctx.filenode(fn) except error.LookupError: continue - - copied = flog.renamed(fnode) + try: + copied = flog.renamed(fnode) + except error.WdirUnsupported: + copied = ctx[fn].renamed() copy = follow and copied and copied[0] if copy: copies.setdefault(rev, {})[fn] = copy @@ -2581,7 +2726,11 @@ def grep(ui, repo, pattern, *pats, **opt files.append(fn) if fn not in matches[rev]: - grepbody(fn, rev, flog.read(fnode)) + try: + content = flog.read(fnode) + except error.WdirUnsupported: + content = ctx[fn].data() + grepbody(fn, rev, content) pfn = copy or fn if pfn not in matches[parent]: @@ -2607,7 +2756,7 @@ def grep(ui, repo, pattern, *pats, **opt if pstates or states: r = display(fm, fn, ctx, pstates, states) found = found or r - if r and not opts.get('all'): + if r and not diff: skip[fn] = True if copy: skip[copy] = True @@ -3071,69 +3220,62 @@ def import_(ui, repo, patch1=None, *patc raise error.Abort(_('cannot use --exact with --prefix')) base = opts["base"] - wlock = dsguard = lock = tr = None msgs = [] ret = 0 - - try: - wlock = repo.wlock() - + with repo.wlock(): if update: cmdutil.checkunfinished(repo) if (exact or not opts.get('force')): cmdutil.bailifchanged(repo) if not opts.get('no_commit'): - lock = repo.lock() - tr = repo.transaction('import') + lock = repo.lock + tr = lambda: repo.transaction('import') + dsguard = util.nullcontextmanager else: - dsguard = dirstateguard.dirstateguard(repo, 'import') - parents = repo[None].parents() - for patchurl in patches: - if patchurl == '-': - ui.status(_('applying patch from stdin\n')) - patchfile = ui.fin - patchurl = 'stdin' # for error message - else: - patchurl = os.path.join(base, patchurl) - ui.status(_('applying %s\n') % patchurl) - patchfile = hg.openpath(ui, patchurl) - - haspatch = False - for hunk in patch.split(patchfile): - with patch.extract(ui, hunk) as patchdata: - msg, node, rej = cmdutil.tryimportone(ui, repo, patchdata, - parents, opts, - msgs, hg.clean) - if msg: - haspatch = True - ui.note(msg + '\n') - if update or exact: - parents = repo[None].parents() + lock = util.nullcontextmanager + tr = util.nullcontextmanager + dsguard = lambda: dirstateguard.dirstateguard(repo, 'import') + with lock(), tr(), dsguard(): + parents = repo[None].parents() + for patchurl in patches: + if patchurl == '-': + ui.status(_('applying patch from stdin\n')) + patchfile = ui.fin + patchurl = 'stdin' # for error message else: - parents = [repo[node]] - if rej: - ui.write_err(_("patch applied partially\n")) - ui.write_err(_("(fix the .rej files and run " - "`hg commit --amend`)\n")) - ret = 1 - break - - if not haspatch: - raise error.Abort(_('%s: no diffs found') % patchurl) - - if tr: - tr.close() - if msgs: - repo.savecommitmessage('\n* * *\n'.join(msgs)) - if dsguard: - dsguard.close() + patchurl = os.path.join(base, patchurl) + ui.status(_('applying %s\n') % patchurl) + patchfile = hg.openpath(ui, patchurl) + + haspatch = False + for hunk in patch.split(patchfile): + with patch.extract(ui, hunk) as patchdata: + msg, node, rej = cmdutil.tryimportone(ui, repo, + patchdata, + parents, opts, + msgs, hg.clean) + if msg: + haspatch = True + ui.note(msg + '\n') + if update or exact: + parents = repo[None].parents() + else: + parents = [repo[node]] + if rej: + ui.write_err(_("patch applied partially\n")) + ui.write_err(_("(fix the .rej files and run " + "`hg commit --amend`)\n")) + ret = 1 + break + + if not haspatch: + raise error.Abort(_('%s: no diffs found') % patchurl) + + if msgs: + repo.savecommitmessage('\n* * *\n'.join(msgs)) return ret - finally: - if tr: - tr.release() - release(lock, dsguard, wlock) @command('incoming|in', [('f', 'force', None, @@ -3291,7 +3433,13 @@ def locate(ui, repo, *pats, **opts): badfn=lambda x, y: False) ui.pager('locate') - for abs in ctx.matches(m): + if ctx.rev() is None: + # When run on the working copy, "locate" includes removed files, so + # we get the list of files from the dirstate. + filesgen = sorted(repo.dirstate.matches(m)) + else: + filesgen = ctx.matches(m) + for abs in filesgen: if opts.get('fullpath'): ui.write(repo.wjoin(abs), end) else: @@ -3545,6 +3693,7 @@ def manifest(ui, repo, node=None, rev=No ui.pager('manifest') for f in ctx: fm.startitem() + fm.context(ctx=ctx) fl = ctx[f].flags() fm.condwrite(ui.debugflag, 'hash', '%s ', hex(mf[f])) fm.condwrite(ui.verbose, 'mode type', '%s %1s ', mode[fl], char[fl]) @@ -3623,15 +3772,13 @@ def merge(ui, repo, node=None, **opts): displayer.close() return 0 - try: - # ui.forcemerge is an internal variable, do not document - repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge') + # ui.forcemerge is an internal variable, do not document + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'merge'): force = opts.get('force') labels = ['working copy', 'merge rev'] return hg.merge(repo, node, force=force, mergeforce=force, labels=labels, abort=abort) - finally: - ui.setconfig('ui', 'forcemerge', '', 'merge') @command('outgoing|out', [('f', 'force', None, _('run even when the destination is unrelated')), @@ -3679,6 +3826,13 @@ def outgoing(ui, repo, dest=None, **opts Returns 0 if there are outgoing changes, 1 otherwise. """ + # hg._outgoing() needs to re-resolve the path in order to handle #branch + # style URLs, so don't overwrite dest. + path = ui.paths.getpath(dest, default=('default-push', 'default')) + if not path: + raise error.Abort(_('default repository not configured!'), + hint=_("see 'hg help config.paths'")) + opts = pycompat.byteskwargs(opts) if opts.get('graph'): logcmdutil.checkunsupportedgraphflags([], opts) @@ -3696,8 +3850,7 @@ def outgoing(ui, repo, dest=None, **opts return 0 if opts.get('bookmarks'): - dest = ui.expandpath(dest or 'default-push', dest or 'default') - dest, branches = hg.parseurl(dest, opts.get('branch')) + dest = path.pushloc or path.loc other = hg.peer(repo, opts, dest) if 'bookmarks' not in other.listkeys('namespaces'): ui.warn(_("remote doesn't support bookmarks\n")) @@ -3706,7 +3859,7 @@ def outgoing(ui, repo, dest=None, **opts ui.pager('outgoing') return bookmarks.outgoing(ui, repo, other) - repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default') + repo._subtoppath = path.pushloc or path.loc try: return hg.outgoing(ui, repo, dest, opts) finally: @@ -4391,7 +4544,8 @@ def resolve(ui, repo, *pats, **opts): ui.pager('resolve') fm = ui.formatter('resolve', opts) ms = mergemod.mergestate.read(repo) - m = scmutil.match(repo[None], pats, opts) + wctx = repo[None] + m = scmutil.match(wctx, pats, opts) # Labels and keys based on merge state. Unresolved path conflicts show # as 'P'. Resolved path conflicts show as 'R', the same as normal @@ -4411,6 +4565,7 @@ def resolve(ui, repo, *pats, **opts): label, key = mergestateinfo[ms[f]] fm.startitem() + fm.context(ctx=wctx) fm.condwrite(not nostatus, 'status', '%s ', key, label=label) fm.write('path', '%s\n', f, label=label) fm.end() @@ -4488,15 +4643,14 @@ def resolve(ui, repo, *pats, **opts): try: # preresolve file - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), - 'resolve') - complete, r = ms.preresolve(f, wctx) + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'resolve'): + complete, r = ms.preresolve(f, wctx) if not complete: tocomplete.append(f) elif r: ret = 1 finally: - ui.setconfig('ui', 'forcemerge', '', 'resolve') ms.commit() # replace filemerge's .orig file with our resolve file, but only @@ -4512,13 +4666,12 @@ def resolve(ui, repo, *pats, **opts): for f in tocomplete: try: # resolve file - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), - 'resolve') - r = ms.resolve(f, wctx) + overrides = {('ui', 'forcemerge'): opts.get('tool', '')} + with ui.configoverride(overrides, 'resolve'): + r = ms.resolve(f, wctx) if r: ret = 1 finally: - ui.setconfig('ui', 'forcemerge', '', 'resolve') ms.commit() # replace filemerge's .orig file with our resolve file @@ -4747,7 +4900,8 @@ def root(ui, repo): ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')), ('', 'style', '', _('template style to use'), _('STYLE')), ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')), - ('', 'certificate', '', _('SSL certificate file'), _('FILE'))] + ('', 'certificate', '', _('SSL certificate file'), _('FILE')), + ('', 'print-url', None, _('start and print only the URL'))] + subrepoopts, _('[OPTION]...'), optionalrepo=True) @@ -4779,6 +4933,8 @@ def serve(ui, repo, **opts): opts = pycompat.byteskwargs(opts) if opts["stdio"] and opts["cmdserver"]: raise error.Abort(_("cannot use --stdio with --cmdserver")) + if opts["print_url"] and ui.verbose: + raise error.Abort(_("cannot use --print-url with --verbose")) if opts["stdio"]: if repo is None: @@ -4790,6 +4946,8 @@ def serve(ui, repo, **opts): service = server.createservice(ui, repo, opts) return server.runservice(opts, initfn=service.init, runfn=service.run) +_NOTTERSE = 'nothing' + @command('^status|st', [('A', 'all', None, _('show status of all files')), ('m', 'modified', None, _('show only modified files')), @@ -4800,7 +4958,7 @@ def serve(ui, repo, **opts): ('u', 'unknown', None, _('show only unknown (not tracked) files')), ('i', 'ignored', None, _('show only ignored files')), ('n', 'no-status', None, _('hide status prefix')), - ('t', 'terse', '', _('show the terse output (EXPERIMENTAL)')), + ('t', 'terse', _NOTTERSE, _('show the terse output (EXPERIMENTAL)')), ('C', 'copies', None, _('show source of copied files')), ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), ('', 'rev', [], _('show difference from revision'), _('REV')), @@ -4898,6 +5056,11 @@ def status(ui, repo, *pats, **opts): revs = opts.get('rev') change = opts.get('change') terse = opts.get('terse') + if terse is _NOTTERSE: + if revs: + terse = '' + else: + terse = ui.config('commands', 'status.terse') if revs and change: msg = _('cannot specify --rev and --change at the same time') @@ -4939,7 +5102,8 @@ def status(ui, repo, *pats, **opts): # we need to compute clean and unknown to terse stat = repo.status(ctx1.node(), ctx2.node(), m, 'ignored' in show or 'i' in terse, - True, True, opts.get('subrepos')) + clean=True, unknown=True, + listsubrepos=opts.get('subrepos')) stat = cmdutil.tersedir(stat, terse) else: @@ -4963,6 +5127,7 @@ def status(ui, repo, *pats, **opts): label = 'status.' + state for f in files: fm.startitem() + fm.context(ctx=ctx2) fm.condwrite(showchar, 'status', '%s ', char, label=label) fm.write('path', fmt, repo.pathto(f, cwd), label=label) if f in copy: @@ -5301,10 +5466,7 @@ def tag(ui, repo, name1, *names, **opts) Returns 0 on success. """ opts = pycompat.byteskwargs(opts) - wlock = lock = None - try: - wlock = repo.wlock() - lock = repo.lock() + with repo.wlock(), repo.lock(): rev_ = "." names = [t.strip() for t in (name1,) + names] if len(names) != len(set(names)): @@ -5375,8 +5537,6 @@ def tag(ui, repo, name1, *names, **opts) tagsmod.tag(repo, names, node, message, opts.get('local'), opts.get('user'), date, editor=editor) - finally: - release(lock, wlock) @command('tags', formatteropts, '', intents={INTENT_READONLY}) def tags(ui, repo, **opts): @@ -5392,6 +5552,7 @@ def tags(ui, repo, **opts): opts = pycompat.byteskwargs(opts) ui.pager('tags') fm = ui.formatter('tags', opts) + contexthint = fm.contexthint('tag rev node type') hexfunc = fm.hexfunc tagtype = "" @@ -5404,6 +5565,8 @@ def tags(ui, repo, **opts): tagtype = 'local' fm.startitem() + if 'ctx' in contexthint: + fm.context(ctx=repo[n]) fm.write('tag', '%s', t, label=label) fmt = " " * (30 - encoding.colwidth(t)) + ' %5d:%s' fm.condwrite(not ui.quiet, 'rev node', fmt, @@ -5583,18 +5746,19 @@ def update(ui, repo, node=None, **opts): repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn') ctx = scmutil.revsingle(repo, rev, rev) rev = ctx.rev() - if ctx.hidden(): + hidden = ctx.hidden() + overrides = {('ui', 'forcemerge'): opts.get(r'tool', '')} + with ui.configoverride(overrides, 'update'): + ret = hg.updatetotally(ui, repo, rev, brev, clean=clean, + updatecheck=updatecheck) + if hidden: ctxstr = ctx.hex()[:12] - ui.warn(_("updating to a hidden changeset %s\n") % ctxstr) + ui.warn(_("updated to hidden changeset %s\n") % ctxstr) if ctx.obsolete(): obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx) ui.warn("(%s)\n" % obsfatemsg) - - repo.ui.setconfig('ui', 'forcemerge', opts.get(r'tool'), 'update') - - return hg.updatetotally(ui, repo, rev, brev, clean=clean, - updatecheck=updatecheck) + return ret @command('verify', []) def verify(ui, repo): diff --git a/mercurial/commandserver.py b/mercurial/commandserver.py --- a/mercurial/commandserver.py +++ b/mercurial/commandserver.py @@ -256,7 +256,7 @@ class server(object): self.cout, self.cerr) try: - ret = (dispatch.dispatch(req) or 0) & 255 # might return None + ret = dispatch.dispatch(req) & 255 self.cresult.write(struct.pack('>i', int(ret))) finally: # restore old cwd @@ -494,6 +494,8 @@ class unixforkingservice(object): conn.close() # release handle in parent process else: try: + selector.close() + self._sock.close() self._runworker(conn) conn.close() os._exit(0) diff --git a/mercurial/config.py b/mercurial/config.py --- a/mercurial/config.py +++ b/mercurial/config.py @@ -215,7 +215,7 @@ def parselist(value): parts.append('') if s[offset:offset + 1] == '"' and not parts[-1]: return _parse_quote, parts, offset + 1 - elif s[offset:offset + 1] == '"' and parts[-1][-1] == '\\': + elif s[offset:offset + 1] == '"' and parts[-1][-1:] == '\\': parts[-1] = parts[-1][:-1] + s[offset:offset + 1] return _parse_plain, parts, offset + 1 parts[-1] += s[offset:offset + 1] diff --git a/mercurial/configitems.py b/mercurial/configitems.py --- a/mercurial/configitems.py +++ b/mercurial/configitems.py @@ -147,6 +147,9 @@ coreconfigitem('annotate', 'nobinary', coreconfigitem('annotate', 'noprefix', default=False, ) +coreconfigitem('annotate', 'word-diff', + default=False, +) coreconfigitem('auth', 'cookiefile', default=None, ) @@ -184,6 +187,9 @@ coreconfigitem('color', 'mode', coreconfigitem('color', 'pagermode', default=dynamicdefault, ) +coreconfigitem('commands', 'grep.all-files', + default=False, +) coreconfigitem('commands', 'show.aliasprefix', default=list, ) @@ -193,13 +199,14 @@ coreconfigitem('commands', 'status.relat coreconfigitem('commands', 'status.skipstates', default=[], ) +coreconfigitem('commands', 'status.terse', + default='', +) coreconfigitem('commands', 'status.verbose', default=False, ) coreconfigitem('commands', 'update.check', default=None, - # Deprecated, remove after 4.4 release - alias=[('experimental', 'updatecheck')] ) coreconfigitem('commands', 'update.requiredest', default=False, @@ -208,6 +215,9 @@ coreconfigitem('committemplate', '.*', default=None, generic=True, ) +coreconfigitem('convert', 'bzr.saverev', + default=True, +) coreconfigitem('convert', 'cvsps.cache', default=True, ) @@ -362,6 +372,9 @@ coreconfigitem('devel', 'user.obsmarker' coreconfigitem('devel', 'warn-config-unknown', default=None, ) +coreconfigitem('devel', 'debug.extensions', + default=False, +) coreconfigitem('devel', 'debug.peer-request', default=False, ) @@ -395,6 +408,9 @@ coreconfigitem('diff', 'nobinary', coreconfigitem('diff', 'noprefix', default=False, ) +coreconfigitem('diff', 'word-diff', + default=False, +) coreconfigitem('email', 'bcc', default=None, ) @@ -508,9 +524,6 @@ coreconfigitem('experimental', 'evolutio coreconfigitem('experimental', 'evolution.track-operation', default=True, ) -coreconfigitem('experimental', 'worddiff', - default=False, -) coreconfigitem('experimental', 'maxdeltachainspan', default=-1, ) @@ -559,12 +572,18 @@ coreconfigitem('experimental', 'httppost coreconfigitem('experimental', 'mergedriver', default=None, ) +coreconfigitem('experimental', 'nointerrupt', default=False) +coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True) + coreconfigitem('experimental', 'obsmarkers-exchange-debug', default=False, ) coreconfigitem('experimental', 'remotenames', default=False, ) +coreconfigitem('experimental', 'removeemptydirs', + default=True, +) coreconfigitem('experimental', 'revlogv2', default=None, ) @@ -581,10 +600,10 @@ coreconfigitem('experimental', 'sparse-r default=False, ) coreconfigitem('experimental', 'sparse-read.density-threshold', - default=0.25, + default=0.50, ) coreconfigitem('experimental', 'sparse-read.min-gap-size', - default='256K', + default='65K', ) coreconfigitem('experimental', 'treemanifest', default=False, @@ -604,6 +623,9 @@ coreconfigitem('experimental', 'web.api. coreconfigitem('experimental', 'web.api.debugreflect', default=False, ) +coreconfigitem('experimental', 'worker.wdir-get-thread-safe', + default=False, +) coreconfigitem('experimental', 'xdiff', default=False, ) @@ -615,9 +637,6 @@ coreconfigitem('extdata', '.*', default=None, generic=True, ) -coreconfigitem('format', 'aggressivemergedeltas', - default=False, -) coreconfigitem('format', 'chunkcachesize', default=None, ) @@ -636,6 +655,9 @@ coreconfigitem('format', 'maxchainlen', coreconfigitem('format', 'obsstore-version', default=None, ) +coreconfigitem('format', 'sparse-revlog', + default=False, +) coreconfigitem('format', 'usefncache', default=True, ) @@ -866,6 +888,9 @@ coreconfigitem('profiling', 'sort', coreconfigitem('profiling', 'statformat', default='hotpath', ) +coreconfigitem('profiling', 'time-track', + default='cpu', +) coreconfigitem('profiling', 'type', default='stat', ) @@ -902,6 +927,10 @@ coreconfigitem('progress', 'width', coreconfigitem('push', 'pushvars.server', default=False, ) +coreconfigitem('revlog', 'optimize-delta-parent-choice', + default=True, + # formely an experimental option: format.aggressivemergedeltas +) coreconfigitem('server', 'bookmarks-pushkey-compat', default=True, ) @@ -932,16 +961,16 @@ coreconfigitem('server', 'concurrent-pus coreconfigitem('server', 'disablefullbundle', default=False, ) -coreconfigitem('server', 'streamunbundle', - default=False, +coreconfigitem('server', 'maxhttpheaderlen', + default=1024, ) coreconfigitem('server', 'pullbundle', default=False, ) -coreconfigitem('server', 'maxhttpheaderlen', - default=1024, +coreconfigitem('server', 'preferuncompressed', + default=False, ) -coreconfigitem('server', 'preferuncompressed', +coreconfigitem('server', 'streamunbundle', default=False, ) coreconfigitem('server', 'uncompressed', @@ -1065,6 +1094,9 @@ coreconfigitem('ui', 'formatted', coreconfigitem('ui', 'graphnodetemplate', default=None, ) +coreconfigitem('ui', 'history-editing-backup', + default=True, +) coreconfigitem('ui', 'interactive', default=None, ) @@ -1074,6 +1106,9 @@ coreconfigitem('ui', 'interface', coreconfigitem('ui', 'interface.chunkselector', default=None, ) +coreconfigitem('ui', 'large-file-limit', + default=10000000, +) coreconfigitem('ui', 'logblockedtimes', default=False, ) @@ -1225,7 +1260,8 @@ coreconfigitem('web', 'accesslog', coreconfigitem('web', 'address', default='', ) -coreconfigitem('web', 'allow_archive', +coreconfigitem('web', 'allow-archive', + alias=[('web', 'allow_archive')], default=list, ) coreconfigitem('web', 'allow_read', diff --git a/mercurial/context.py b/mercurial/context.py --- a/mercurial/context.py +++ b/mercurial/context.py @@ -10,7 +10,6 @@ from __future__ import absolute_import import errno import filecmp import os -import re import stat from .i18n import _ @@ -24,7 +23,6 @@ from .node import ( short, wdirfilenodeids, wdirid, - wdirrev, ) from . import ( dagop, @@ -52,8 +50,6 @@ from .utils import ( propertycache = util.propertycache -nonascii = re.compile(br'[^\x21-\x7f]').search - class basectx(object): """A basectx object represents the common logic for its children: changectx: read-only context that is already present in the repo, @@ -185,8 +181,8 @@ class basectx(object): def mutable(self): return self.phase() > phases.public - def getfileset(self, expr): - return fileset.getfileset(self, expr) + def matchfileset(self, expr, badfn=None): + return fileset.match(self, expr, badfn=badfn) def obsolete(self): """True if the changeset is obsolete""" @@ -298,14 +294,18 @@ class basectx(object): auditor=r.nofsauditor, ctx=self, listsubrepos=listsubrepos, badfn=badfn) - def diff(self, ctx2=None, match=None, **opts): + def diff(self, ctx2=None, match=None, changes=None, opts=None, + losedatafn=None, prefix='', relroot='', copy=None, + hunksfilterfn=None): """Returns a diff generator for the given contexts and matcher""" if ctx2 is None: ctx2 = self.p1() if ctx2 is not None: ctx2 = self._repo[ctx2] - diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts)) - return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) + return patch.diff(self._repo, ctx2, self, match=match, changes=changes, + opts=opts, losedatafn=losedatafn, prefix=prefix, + relroot=relroot, copy=copy, + hunksfilterfn=hunksfilterfn) def dirs(self): return self._manifest.dirs() @@ -377,31 +377,6 @@ class basectx(object): return r -def changectxdeprecwarn(repo): - # changectx's constructor will soon lose support for these forms of - # changeids: - # * stringinfied ints - # * bookmarks, tags, branches, and other namespace identifiers - # * hex nodeid prefixes - # - # Depending on your use case, replace repo[x] by one of these: - # * If you want to support general revsets, use scmutil.revsingle(x) - # * If you know that "x" is a stringified int, use repo[int(x)] - # * If you know that "x" is a bookmark, use repo._bookmarks.changectx(x) - # * If you know that "x" is a tag, use repo[repo.tags()[x]] - # * If you know that "x" is a branch or in some other namespace, - # use the appropriate mechanism for that namespace - # * If you know that "x" is a hex nodeid prefix, use - # repo[scmutil.resolvehexnodeidprefix(repo, x)] - # * If "x" is a string that can be any of the above, but you don't want - # to allow general revsets (perhaps because "x" may come from a remote - # user and the revset may be too costly), use scmutil.revsymbol(repo, x) - # * If "x" can be a mix of the above, you'll have to figure it out - # yourself - repo.ui.deprecwarn("changectx.__init__ is getting more limited, see " - "context.changectxdeprecwarn() for details", "4.6", - stacklevel=4) - class changectx(basectx): """A changecontext object makes access to data related to a particular changeset convenient. It represents a read-only context already present in @@ -415,22 +390,22 @@ class changectx(basectx): self._node = repo.changelog.node(changeid) self._rev = changeid return - if changeid == 'null': + elif changeid == 'null': self._node = nullid self._rev = nullrev return - if changeid == 'tip': + elif changeid == 'tip': self._node = repo.changelog.tip() self._rev = repo.changelog.rev(self._node) return - if (changeid == '.' - or repo.local() and changeid == repo.dirstate.p1()): + elif (changeid == '.' + or repo.local() and changeid == repo.dirstate.p1()): # this is a hack to delay/avoid loading obsmarkers # when we know that '.' won't be hidden self._node = repo.dirstate.p1() self._rev = repo.unfiltered().changelog.rev(self._node) return - if len(changeid) == 20: + elif len(changeid) == 20: try: self._node = changeid self._rev = repo.changelog.rev(changeid) @@ -438,27 +413,17 @@ class changectx(basectx): except error.FilteredLookupError: raise except LookupError: - pass + # check if it might have come from damaged dirstate + # + # XXX we could avoid the unfiltered if we had a recognizable + # exception for filtered changeset access + if (repo.local() + and changeid in repo.unfiltered().dirstate.parents()): + msg = _("working directory has unknown parent '%s'!") + raise error.Abort(msg % short(changeid)) + changeid = hex(changeid) # for the error message - try: - r = int(changeid) - if '%d' % r != changeid: - raise ValueError - l = len(repo.changelog) - if r < 0: - r += l - if r < 0 or r >= l and r != wdirrev: - raise ValueError - self._rev = r - self._node = repo.changelog.node(r) - changectxdeprecwarn(repo) - return - except error.FilteredIndexError: - raise - except (ValueError, OverflowError, IndexError): - pass - - if len(changeid) == 40: + elif len(changeid) == 40: try: self._node = bin(changeid) self._rev = repo.changelog.rev(self._node) @@ -467,39 +432,15 @@ class changectx(basectx): raise except (TypeError, LookupError): pass - - # lookup bookmarks through the name interface - try: - self._node = repo.names.singlenode(repo, changeid) - self._rev = repo.changelog.rev(self._node) - changectxdeprecwarn(repo) - return - except KeyError: - pass - - self._node = scmutil.resolvehexnodeidprefix(repo, changeid) - if self._node is not None: - self._rev = repo.changelog.rev(self._node) - changectxdeprecwarn(repo) - return + else: + raise error.ProgrammingError( + "unsupported changeid '%s' of type %s" % + (changeid, type(changeid))) # lookup failed - # check if it might have come from damaged dirstate - # - # XXX we could avoid the unfiltered if we had a recognizable - # exception for filtered changeset access - if (repo.local() - and changeid in repo.unfiltered().dirstate.parents()): - msg = _("working directory has unknown parent '%s'!") - raise error.Abort(msg % short(changeid)) - try: - if len(changeid) == 20 and nonascii(changeid): - changeid = hex(changeid) - except TypeError: - pass except (error.FilteredIndexError, error.FilteredLookupError): raise error.FilteredRepoLookupError(_("filtered revision '%s'") - % changeid) + % pycompat.bytestr(changeid)) except error.FilteredRepoLookupError: raise except IndexError: @@ -649,8 +590,14 @@ class changectx(basectx): return changectx(self._repo, anc) def descendant(self, other): - """True if other is descendant of this changeset""" - return self._repo.changelog.descendant(self._rev, other._rev) + msg = (b'ctx.descendant(other) is deprecated, ' + 'use ctx.isancestorof(other)') + self._repo.ui.deprecwarn(msg, b'4.7') + return self.isancestorof(other) + + def isancestorof(self, other): + """True if this changeset is an ancestor of other""" + return self._repo.changelog.isancestorrev(self._rev, other._rev) def walk(self, match): '''Generates matching file names.''' @@ -1294,7 +1241,8 @@ class committablectx(basectx): unknown=True, ignored=False)) def matches(self, match): - return sorted(self._repo.dirstate.matches(match)) + ds = self._repo.dirstate + return sorted(f for f in ds.matches(match) if ds[f] != 'r') def ancestors(self): for p in self._parents: @@ -1399,7 +1347,8 @@ class workingctx(committablectx): ui.warn(_("%s does not exist!\n") % uipath(f)) rejected.append(f) continue - if st.st_size > 10000000: + limit = ui.configbytes('ui', 'large-file-limit') + if limit != 0 and st.st_size > limit: ui.warn(_("%s: up to %d MB of RAM may be required " "to manage this file\n" "(use 'hg revert %s' to cancel the " @@ -1773,7 +1722,9 @@ class workingfilectx(committablefilectx) def remove(self, ignoremissing=False): """wraps unlink for a repo's working directory""" - self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) + rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs') + self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing, + rmdir=rmdir) def write(self, data, flags, backgroundclose=False, **kwargs): """wraps repo.wwrite""" diff --git a/mercurial/copies.py b/mercurial/copies.py --- a/mercurial/copies.py +++ b/mercurial/copies.py @@ -254,6 +254,11 @@ def _computenonoverlap(repo, c1, c2, add repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1))) if u2: repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2))) + + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + u1 = [f for f in u1 if narrowmatch(f)] + u2 = [f for f in u2 if narrowmatch(f)] return u1, u2 def _makegetfctx(ctx): @@ -411,14 +416,14 @@ def _fullcopytracing(repo, c1, c2, base) # common ancestor or not without explicitly checking it, it's better to # determine that here. # - # base.descendant(wc) and base.descendant(base) are False, work around that + # base.isancestorof(wc) is False, work around that _c1 = c1.p1() if c1.rev() is None else c1 _c2 = c2.p1() if c2.rev() is None else c2 # an endpoint is "dirty" if it isn't a descendant of the merge base # if we have a dirty endpoint, we need to trigger graft logic, and also # keep track of which endpoint is dirty - dirtyc1 = not (base == _c1 or base.descendant(_c1)) - dirtyc2 = not (base == _c2 or base.descendant(_c2)) + dirtyc1 = not base.isancestorof(_c1) + dirtyc2 = not base.isancestorof(_c2) graft = dirtyc1 or dirtyc2 tca = base if graft: diff --git a/mercurial/crecord.py b/mercurial/crecord.py --- a/mercurial/crecord.py +++ b/mercurial/crecord.py @@ -65,6 +65,11 @@ except ImportError: # compiled with curses curses = False +class fallbackerror(error.Abort): + """Error that indicates the client should try to fallback to text mode.""" + # Inherits from error.Abort so that existing behavior is preserved if the + # calling code does not know how to fallback. + def checkcurses(ui): """Return True if the user wants to use curses @@ -529,8 +534,8 @@ def chunkselector(ui, headerlist, operat origsigtstp = signal.getsignal(signal.SIGTSTP) try: curses.wrapper(chunkselector.main) - if chunkselector.initerr is not None: - raise error.Abort(chunkselector.initerr) + if chunkselector.initexc is not None: + raise chunkselector.initexc # ncurses does not restore signal handler for SIGTSTP finally: if origsigtstp is not sentinel: @@ -549,7 +554,7 @@ def testchunkselector(testfn, ui, header """ chunkselector = curseschunkselector(headerlist, ui, operation) if testfn and os.path.exists(testfn): - testf = open(testfn) + testf = open(testfn, 'rb') testcommands = [x.rstrip('\n') for x in testf.readlines()] testf.close() while True: @@ -666,6 +671,7 @@ class curseschunkselector(object): nextitem = currentitem self.currentselecteditem = nextitem + self.recenterdisplayedarea() def downarrowevent(self): """ @@ -705,6 +711,7 @@ class curseschunkselector(object): nextitem = currentitem self.currentselecteditem = nextitem + self.recenterdisplayedarea() def rightarrowevent(self): """ @@ -1718,7 +1725,7 @@ are you sure you want to review/edit and self.stdscr = stdscr # error during initialization, cannot be printed in the curses # interface, it should be printed by the calling code - self.initerr = None + self.initexc = None self.yscreensize, self.xscreensize = self.stdscr.getmaxyx() curses.start_color() @@ -1751,7 +1758,8 @@ are you sure you want to review/edit and try: self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize) except curses.error: - self.initerr = _('this diff is too large to be displayed') + self.initexc = fallbackerror( + _('this diff is too large to be displayed')) return # initialize selecteditemendline (initial start-line is 0) self.selecteditemendline = self.getnumlinesdisplayed( diff --git a/mercurial/debugcommands.py b/mercurial/debugcommands.py --- a/mercurial/debugcommands.py +++ b/mercurial/debugcommands.py @@ -21,7 +21,6 @@ import stat import string import subprocess import sys -import tempfile import time from .i18n import _ @@ -71,7 +70,6 @@ from . import ( scmutil, setdiscovery, simplemerge, - smartset, sshpeer, sslutil, streamclone, @@ -183,18 +181,14 @@ def debugbuilddag(ui, repo, text=None, initialmergedlines.append("") tags = [] - - wlock = lock = tr = None - try: - wlock = repo.wlock() - lock = repo.lock() - tr = repo.transaction("builddag") - + progress = ui.makeprogress(_('building'), unit=_('revisions'), + total=total) + with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"): at = -1 atbranch = 'default' nodeids = [] id = 0 - ui.progress(_('building'), id, unit=_('revisions'), total=total) + progress.update(id) for type, data in dagparser.parsedag(text): if type == 'n': ui.note(('node %s\n' % pycompat.bytestr(data))) @@ -267,14 +261,10 @@ def debugbuilddag(ui, repo, text=None, elif type == 'a': ui.note(('branch %s\n' % data)) atbranch = data - ui.progress(_('building'), id, unit=_('revisions'), total=total) - tr.close() + progress.update(id) if tags: repo.vfs.write("localtags", "".join(tags)) - finally: - ui.progress(_('building'), None) - release(tr, lock, wlock) def _debugchangegroup(ui, gen, all=None, indent=0, **opts): indent_string = ' ' * indent @@ -437,7 +427,7 @@ def debugcheckstate(ui, repo): 'hg debugcolor') def debugcolor(ui, repo, **opts): """show available color, effects or style""" - ui.write(('color mode: %s\n') % ui._colormode) + ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode)) if opts.get(r'style'): return _debugdisplaystyle(ui) else: @@ -630,6 +620,8 @@ def debugdeltachain(ui, repo, file_=None opts = pycompat.byteskwargs(opts) r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) index = r.index + start = r.start + length = r.length generaldelta = r.version & revlog.FLAG_GENERALDELTA withsparseread = getattr(r, '_withsparseread', False) @@ -677,8 +669,6 @@ def debugdeltachain(ui, repo, file_=None comp, uncomp, deltatype, chain, chainsize = revinfo(rev) chainbase = chain[0] chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) - start = r.start - length = r.length basestart = start(chainbase) revstart = start(rev) lineardist = revstart + comp - basestart @@ -688,8 +678,15 @@ def debugdeltachain(ui, repo, file_=None except IndexError: prevrev = -1 - chainratio = float(chainsize) / float(uncomp) - extraratio = float(extradist) / float(chainsize) + if uncomp != 0: + chainratio = float(chainsize) / float(uncomp) + else: + chainratio = chainsize + + if chainsize != 0: + extraratio = float(extradist) / float(chainsize) + else: + extraratio = extradist fm.startitem() fm.write('rev chainid chainlen prevrev deltatype compsize ' @@ -718,7 +715,10 @@ def debugdeltachain(ui, repo, file_=None if largestblock < blksize: largestblock = blksize - readdensity = float(chainsize) / float(readsize) + if readsize: + readdensity = float(chainsize) / float(readsize) + else: + readdensity = 1 fm.write('readsize largestblock readdensity srchunks', ' %10d %10d %9.5f %8d', @@ -838,8 +838,8 @@ def debugdownload(ui, repo, url, output= if output: dest.close() -@command('debugextensions', cmdutil.formatteropts, [], norepo=True) -def debugextensions(ui, **opts): +@command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True) +def debugextensions(ui, repo, **opts): '''show information about active extensions''' opts = pycompat.byteskwargs(opts) exts = extensions.extensions(ui) @@ -885,16 +885,38 @@ def debugextensions(ui, **opts): fm.end() @command('debugfileset', - [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))], - _('[-r REV] FILESPEC')) + [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')), + ('', 'all-files', False, + _('test files from all revisions and working directory'))], + _('[-r REV] [--all-files] FILESPEC')) def debugfileset(ui, repo, expr, **opts): '''parse and apply a fileset specification''' - ctx = scmutil.revsingle(repo, opts.get(r'rev'), None) + opts = pycompat.byteskwargs(opts) + ctx = scmutil.revsingle(repo, opts.get('rev'), None) if ui.verbose: tree = fileset.parse(expr) ui.note(fileset.prettyformat(tree), "\n") - for f in ctx.getfileset(expr): + files = set() + if opts['all_files']: + for r in repo: + c = repo[r] + files.update(c.files()) + files.update(c.substate) + if opts['all_files'] or ctx.rev() is None: + wctx = repo[None] + files.update(repo.dirstate.walk(scmutil.matchall(repo), + subrepos=list(wctx.substate), + unknown=True, ignored=True)) + files.update(wctx.substate) + else: + files.update(ctx.files()) + files.update(ctx.substate) + + m = ctx.matchfileset(expr) + for f in sorted(files): + if not m(f): + continue ui.write("%s\n" % f) @command('debugformat', @@ -971,7 +993,7 @@ def debugfsinfo(ui, path="."): ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no')) casesensitive = '(unknown)' try: - with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f: + with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f: casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no' except OSError: pass @@ -1143,7 +1165,7 @@ def debuginstall(ui, **opts): opts = pycompat.byteskwargs(opts) def writetemp(contents): - (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") + (fd, name) = pycompat.mkstemp(prefix="hg-debuginstall-") f = os.fdopen(fd, r"wb") f.write(contents) f.close() @@ -1597,7 +1619,7 @@ def debugobsolete(ui, repo, precursor=No if opts['rev']: raise error.Abort('cannot select revision when creating marker') metadata = {} - metadata['user'] = opts['user'] or ui.username() + metadata['user'] = encoding.fromlocal(opts['user'] or ui.username()) succs = tuple(parsenodeid(succ) for succ in successors) l = repo.lock() try: @@ -2237,8 +2259,8 @@ def debugrevspec(ui, repo, expr, **opts) arevs = revset.makematcher(treebystage['analyzed'])(repo) brevs = revset.makematcher(treebystage['optimized'])(repo) if opts['show_set'] or (opts['show_set'] is None and ui.verbose): - ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n") - ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n") + ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n") + ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n") arevs = list(arevs) brevs = list(brevs) if arevs == brevs: @@ -2261,7 +2283,7 @@ def debugrevspec(ui, repo, expr, **opts) func = revset.makematcher(tree) revs = func(repo) if opts['show_set'] or (opts['show_set'] is None and ui.verbose): - ui.write(("* set:\n"), smartset.prettyformat(revs), "\n") + ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n") if not opts['show_revs']: return for c in revs: @@ -2291,7 +2313,13 @@ def debugserve(ui, repo, **opts): if opts['logiofd']: # Line buffered because output is line based. - logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) + try: + logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) + except OSError as e: + if e.errno != errno.ESPIPE: + raise + # can't seek a pipe, so `ab` mode fails on py3 + logfh = os.fdopen(int(opts['logiofd']), r'wb', 1) elif opts['logiofile']: logfh = open(opts['logiofile'], 'ab', 1) @@ -2484,9 +2512,17 @@ def debugtemplate(ui, repo, tmpl, **opts if revs is None: tres = formatter.templateresources(ui, repo) t = formatter.maketemplater(ui, tmpl, resources=tres) + if ui.verbose: + kwds, funcs = t.symbolsuseddefault() + ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) + ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) ui.write(t.renderdefault(props)) else: displayer = logcmdutil.maketemplater(ui, repo, tmpl) + if ui.verbose: + kwds, funcs = displayer.t.symbolsuseddefault() + ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) + ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) for r in revs: displayer.show(repo[r], **pycompat.strkwargs(props)) displayer.close() @@ -2544,7 +2580,8 @@ def debugwalk(ui, repo, *pats, **opts): """show how files match on given patterns""" opts = pycompat.byteskwargs(opts) m = scmutil.match(repo[None], pats, opts) - ui.write(('matcher: %r\n' % m)) + if ui.verbose: + ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') items = list(repo[None].walk(m)) if not items: return @@ -3018,10 +3055,12 @@ def debugwireproto(ui, repo, path=None, if isinstance(res, wireprotov2peer.commandresponse): val = list(res.cborobjects()) - ui.status(_('response: %s\n') % stringutil.pprint(val)) + ui.status(_('response: %s\n') % + stringutil.pprint(val, bprefix=True)) else: - ui.status(_('response: %s\n') % stringutil.pprint(res)) + ui.status(_('response: %s\n') % + stringutil.pprint(res, bprefix=True)) elif action == 'batchbegin': if batchedcommands is not None: @@ -3093,7 +3132,8 @@ def debugwireproto(ui, repo, path=None, continue if res.headers.get('Content-Type') == 'application/mercurial-cbor': - ui.write(_('cbor> %s\n') % stringutil.pprint(cbor.loads(body))) + ui.write(_('cbor> %s\n') % + stringutil.pprint(cbor.loads(body), bprefix=True)) elif action == 'close': peer.close() diff --git a/mercurial/patch.py b/mercurial/diffutil.py copy from mercurial/patch.py copy to mercurial/diffutil.py --- a/mercurial/patch.py +++ b/mercurial/diffutil.py @@ -1,2243 +1,26 @@ -# patch.py - patch file parsing routines +# diffutil.py - utility functions related to diff and patch # # Copyright 2006 Brendan Cully # Copyright 2007 Chris Mason +# Copyright 2018 Octobus # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -from __future__ import absolute_import, print_function - -import collections -import contextlib -import copy -import email -import errno -import hashlib -import os -import posixpath -import re -import shutil -import tempfile -import zlib +from __future__ import absolute_import from .i18n import _ -from .node import ( - hex, - short, -) + from . import ( - copies, - diffhelper, - encoding, - error, - mail, mdiff, - pathutil, pycompat, - scmutil, - similar, - util, - vfs as vfsmod, -) -from .utils import ( - dateutil, - procutil, - stringutil, ) -stringio = util.stringio - -gitre = re.compile(br'diff --git a/(.*) b/(.*)') -tabsplitter = re.compile(br'(\t+|[^\t]+)') -wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|' - '[^ \ta-zA-Z0-9_\x80-\xff])') - -PatchError = error.PatchError - -# public functions - -def split(stream): - '''return an iterator of individual patches from a stream''' - def isheader(line, inheader): - if inheader and line.startswith((' ', '\t')): - # continuation - return True - if line.startswith((' ', '-', '+')): - # diff line - don't check for header pattern in there - return False - l = line.split(': ', 1) - return len(l) == 2 and ' ' not in l[0] - - def chunk(lines): - return stringio(''.join(lines)) - - def hgsplit(stream, cur): - inheader = True - - for line in stream: - if not line.strip(): - inheader = False - if not inheader and line.startswith('# HG changeset patch'): - yield chunk(cur) - cur = [] - inheader = True - - cur.append(line) - - if cur: - yield chunk(cur) - - def mboxsplit(stream, cur): - for line in stream: - if line.startswith('From '): - for c in split(chunk(cur[1:])): - yield c - cur = [] - - cur.append(line) - - if cur: - for c in split(chunk(cur[1:])): - yield c - - def mimesplit(stream, cur): - def msgfp(m): - fp = stringio() - g = email.Generator.Generator(fp, mangle_from_=False) - g.flatten(m) - fp.seek(0) - return fp - - for line in stream: - cur.append(line) - c = chunk(cur) - - m = pycompat.emailparser().parse(c) - if not m.is_multipart(): - yield msgfp(m) - else: - ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') - for part in m.walk(): - ct = part.get_content_type() - if ct not in ok_types: - continue - yield msgfp(part) - - def headersplit(stream, cur): - inheader = False - - for line in stream: - if not inheader and isheader(line, inheader): - yield chunk(cur) - cur = [] - inheader = True - if inheader and not isheader(line, inheader): - inheader = False - - cur.append(line) - - if cur: - yield chunk(cur) - - def remainder(cur): - yield chunk(cur) - - class fiter(object): - def __init__(self, fp): - self.fp = fp - - def __iter__(self): - return self - - def next(self): - l = self.fp.readline() - if not l: - raise StopIteration - return l - - __next__ = next - - inheader = False - cur = [] - - mimeheaders = ['content-type'] - - if not util.safehasattr(stream, 'next'): - # http responses, for example, have readline but not next - stream = fiter(stream) - - for line in stream: - cur.append(line) - if line.startswith('# HG changeset patch'): - return hgsplit(stream, cur) - elif line.startswith('From '): - return mboxsplit(stream, cur) - elif isheader(line, inheader): - inheader = True - if line.split(':', 1)[0].lower() in mimeheaders: - # let email parser handle this - return mimesplit(stream, cur) - elif line.startswith('--- ') and inheader: - # No evil headers seen by diff start, split by hand - return headersplit(stream, cur) - # Not enough info, keep reading - - # if we are here, we have a very plain patch - return remainder(cur) - -## Some facility for extensible patch parsing: -# list of pairs ("header to match", "data key") -patchheadermap = [('Date', 'date'), - ('Branch', 'branch'), - ('Node ID', 'nodeid'), - ] - -@contextlib.contextmanager -def extract(ui, fileobj): - '''extract patch from data read from fileobj. - - patch can be a normal patch or contained in an email message. - - return a dictionary. Standard keys are: - - filename, - - message, - - user, - - date, - - branch, - - node, - - p1, - - p2. - Any item can be missing from the dictionary. If filename is missing, - fileobj did not contain a patch. Caller must unlink filename when done.''' - - fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') - tmpfp = os.fdopen(fd, r'wb') - try: - yield _extract(ui, fileobj, tmpname, tmpfp) - finally: - tmpfp.close() - os.unlink(tmpname) - -def _extract(ui, fileobj, tmpname, tmpfp): - - # attempt to detect the start of a patch - # (this heuristic is borrowed from quilt) - diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |' - br'retrieving revision [0-9]+(\.[0-9]+)*$|' - br'---[ \t].*?^\+\+\+[ \t]|' - br'\*\*\*[ \t].*?^---[ \t])', - re.MULTILINE | re.DOTALL) - - data = {} - - msg = pycompat.emailparser().parse(fileobj) - - subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject']) - data['user'] = msg[r'From'] and mail.headdecode(msg[r'From']) - if not subject and not data['user']: - # Not an email, restore parsed headers if any - subject = '\n'.join(': '.join(map(encoding.strtolocal, h)) - for h in msg.items()) + '\n' - - # should try to parse msg['Date'] - parents = [] - - if subject: - if subject.startswith('[PATCH'): - pend = subject.find(']') - if pend >= 0: - subject = subject[pend + 1:].lstrip() - subject = re.sub(br'\n[ \t]+', ' ', subject) - ui.debug('Subject: %s\n' % subject) - if data['user']: - ui.debug('From: %s\n' % data['user']) - diffs_seen = 0 - ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') - message = '' - for part in msg.walk(): - content_type = pycompat.bytestr(part.get_content_type()) - ui.debug('Content-Type: %s\n' % content_type) - if content_type not in ok_types: - continue - payload = part.get_payload(decode=True) - m = diffre.search(payload) - if m: - hgpatch = False - hgpatchheader = False - ignoretext = False - - ui.debug('found patch at byte %d\n' % m.start(0)) - diffs_seen += 1 - cfp = stringio() - for line in payload[:m.start(0)].splitlines(): - if line.startswith('# HG changeset patch') and not hgpatch: - ui.debug('patch generated by hg export\n') - hgpatch = True - hgpatchheader = True - # drop earlier commit message content - cfp.seek(0) - cfp.truncate() - subject = None - elif hgpatchheader: - if line.startswith('# User '): - data['user'] = line[7:] - ui.debug('From: %s\n' % data['user']) - elif line.startswith("# Parent "): - parents.append(line[9:].lstrip()) - elif line.startswith("# "): - for header, key in patchheadermap: - prefix = '# %s ' % header - if line.startswith(prefix): - data[key] = line[len(prefix):] - else: - hgpatchheader = False - elif line == '---': - ignoretext = True - if not hgpatchheader and not ignoretext: - cfp.write(line) - cfp.write('\n') - message = cfp.getvalue() - if tmpfp: - tmpfp.write(payload) - if not payload.endswith('\n'): - tmpfp.write('\n') - elif not diffs_seen and message and content_type == 'text/plain': - message += '\n' + payload - - if subject and not message.startswith(subject): - message = '%s\n%s' % (subject, message) - data['message'] = message - tmpfp.close() - if parents: - data['p1'] = parents.pop(0) - if parents: - data['p2'] = parents.pop(0) - - if diffs_seen: - data['filename'] = tmpname - - return data - -class patchmeta(object): - """Patched file metadata - - 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY - or COPY. 'path' is patched file path. 'oldpath' is set to the - origin file when 'op' is either COPY or RENAME, None otherwise. If - file mode is changed, 'mode' is a tuple (islink, isexec) where - 'islink' is True if the file is a symlink and 'isexec' is True if - the file is executable. Otherwise, 'mode' is None. - """ - def __init__(self, path): - self.path = path - self.oldpath = None - self.mode = None - self.op = 'MODIFY' - self.binary = False - - def setmode(self, mode): - islink = mode & 0o20000 - isexec = mode & 0o100 - self.mode = (islink, isexec) - - def copy(self): - other = patchmeta(self.path) - other.oldpath = self.oldpath - other.mode = self.mode - other.op = self.op - other.binary = self.binary - return other - - def _ispatchinga(self, afile): - if afile == '/dev/null': - return self.op == 'ADD' - return afile == 'a/' + (self.oldpath or self.path) - - def _ispatchingb(self, bfile): - if bfile == '/dev/null': - return self.op == 'DELETE' - return bfile == 'b/' + self.path - - def ispatching(self, afile, bfile): - return self._ispatchinga(afile) and self._ispatchingb(bfile) - - def __repr__(self): - return "" % (self.op, self.path) - -def readgitpatch(lr): - """extract git-style metadata about patches from """ - - # Filter patch for git information - gp = None - gitpatches = [] - for line in lr: - line = line.rstrip(' \r\n') - if line.startswith('diff --git a/'): - m = gitre.match(line) - if m: - if gp: - gitpatches.append(gp) - dst = m.group(2) - gp = patchmeta(dst) - elif gp: - if line.startswith('--- '): - gitpatches.append(gp) - gp = None - continue - if line.startswith('rename from '): - gp.op = 'RENAME' - gp.oldpath = line[12:] - elif line.startswith('rename to '): - gp.path = line[10:] - elif line.startswith('copy from '): - gp.op = 'COPY' - gp.oldpath = line[10:] - elif line.startswith('copy to '): - gp.path = line[8:] - elif line.startswith('deleted file'): - gp.op = 'DELETE' - elif line.startswith('new file mode '): - gp.op = 'ADD' - gp.setmode(int(line[-6:], 8)) - elif line.startswith('new mode '): - gp.setmode(int(line[-6:], 8)) - elif line.startswith('GIT binary patch'): - gp.binary = True - if gp: - gitpatches.append(gp) - - return gitpatches - -class linereader(object): - # simple class to allow pushing lines back into the input stream - def __init__(self, fp): - self.fp = fp - self.buf = [] - - def push(self, line): - if line is not None: - self.buf.append(line) - - def readline(self): - if self.buf: - l = self.buf[0] - del self.buf[0] - return l - return self.fp.readline() - - def __iter__(self): - return iter(self.readline, '') - -class abstractbackend(object): - def __init__(self, ui): - self.ui = ui - - def getfile(self, fname): - """Return target file data and flags as a (data, (islink, - isexec)) tuple. Data is None if file is missing/deleted. - """ - raise NotImplementedError - - def setfile(self, fname, data, mode, copysource): - """Write data to target file fname and set its mode. mode is a - (islink, isexec) tuple. If data is None, the file content should - be left unchanged. If the file is modified after being copied, - copysource is set to the original file name. - """ - raise NotImplementedError - - def unlink(self, fname): - """Unlink target file.""" - raise NotImplementedError - - def writerej(self, fname, failed, total, lines): - """Write rejected lines for fname. total is the number of hunks - which failed to apply and total the total number of hunks for this - files. - """ - - def exists(self, fname): - raise NotImplementedError - - def close(self): - raise NotImplementedError - -class fsbackend(abstractbackend): - def __init__(self, ui, basedir): - super(fsbackend, self).__init__(ui) - self.opener = vfsmod.vfs(basedir) - - def getfile(self, fname): - if self.opener.islink(fname): - return (self.opener.readlink(fname), (True, False)) - - isexec = False - try: - isexec = self.opener.lstat(fname).st_mode & 0o100 != 0 - except OSError as e: - if e.errno != errno.ENOENT: - raise - try: - return (self.opener.read(fname), (False, isexec)) - except IOError as e: - if e.errno != errno.ENOENT: - raise - return None, None - - def setfile(self, fname, data, mode, copysource): - islink, isexec = mode - if data is None: - self.opener.setflags(fname, islink, isexec) - return - if islink: - self.opener.symlink(data, fname) - else: - self.opener.write(fname, data) - if isexec: - self.opener.setflags(fname, False, True) - - def unlink(self, fname): - self.opener.unlinkpath(fname, ignoremissing=True) - - def writerej(self, fname, failed, total, lines): - fname = fname + ".rej" - self.ui.warn( - _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % - (failed, total, fname)) - fp = self.opener(fname, 'w') - fp.writelines(lines) - fp.close() - - def exists(self, fname): - return self.opener.lexists(fname) - -class workingbackend(fsbackend): - def __init__(self, ui, repo, similarity): - super(workingbackend, self).__init__(ui, repo.root) - self.repo = repo - self.similarity = similarity - self.removed = set() - self.changed = set() - self.copied = [] - - def _checkknown(self, fname): - if self.repo.dirstate[fname] == '?' and self.exists(fname): - raise PatchError(_('cannot patch %s: file is not tracked') % fname) - - def setfile(self, fname, data, mode, copysource): - self._checkknown(fname) - super(workingbackend, self).setfile(fname, data, mode, copysource) - if copysource is not None: - self.copied.append((copysource, fname)) - self.changed.add(fname) - - def unlink(self, fname): - self._checkknown(fname) - super(workingbackend, self).unlink(fname) - self.removed.add(fname) - self.changed.add(fname) - - def close(self): - wctx = self.repo[None] - changed = set(self.changed) - for src, dst in self.copied: - scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst) - if self.removed: - wctx.forget(sorted(self.removed)) - for f in self.removed: - if f not in self.repo.dirstate: - # File was deleted and no longer belongs to the - # dirstate, it was probably marked added then - # deleted, and should not be considered by - # marktouched(). - changed.discard(f) - if changed: - scmutil.marktouched(self.repo, changed, self.similarity) - return sorted(self.changed) - -class filestore(object): - def __init__(self, maxsize=None): - self.opener = None - self.files = {} - self.created = 0 - self.maxsize = maxsize - if self.maxsize is None: - self.maxsize = 4*(2**20) - self.size = 0 - self.data = {} - - def setfile(self, fname, data, mode, copied=None): - if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: - self.data[fname] = (data, mode, copied) - self.size += len(data) - else: - if self.opener is None: - root = tempfile.mkdtemp(prefix='hg-patch-') - self.opener = vfsmod.vfs(root) - # Avoid filename issues with these simple names - fn = '%d' % self.created - self.opener.write(fn, data) - self.created += 1 - self.files[fname] = (fn, mode, copied) - - def getfile(self, fname): - if fname in self.data: - return self.data[fname] - if not self.opener or fname not in self.files: - return None, None, None - fn, mode, copied = self.files[fname] - return self.opener.read(fn), mode, copied - - def close(self): - if self.opener: - shutil.rmtree(self.opener.base) - -class repobackend(abstractbackend): - def __init__(self, ui, repo, ctx, store): - super(repobackend, self).__init__(ui) - self.repo = repo - self.ctx = ctx - self.store = store - self.changed = set() - self.removed = set() - self.copied = {} - - def _checkknown(self, fname): - if fname not in self.ctx: - raise PatchError(_('cannot patch %s: file is not tracked') % fname) - - def getfile(self, fname): - try: - fctx = self.ctx[fname] - except error.LookupError: - return None, None - flags = fctx.flags() - return fctx.data(), ('l' in flags, 'x' in flags) - - def setfile(self, fname, data, mode, copysource): - if copysource: - self._checkknown(copysource) - if data is None: - data = self.ctx[fname].data() - self.store.setfile(fname, data, mode, copysource) - self.changed.add(fname) - if copysource: - self.copied[fname] = copysource - - def unlink(self, fname): - self._checkknown(fname) - self.removed.add(fname) - - def exists(self, fname): - return fname in self.ctx - - def close(self): - return self.changed | self.removed - -# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 -unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') -contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') -eolmodes = ['strict', 'crlf', 'lf', 'auto'] - -class patchfile(object): - def __init__(self, ui, gp, backend, store, eolmode='strict'): - self.fname = gp.path - self.eolmode = eolmode - self.eol = None - self.backend = backend - self.ui = ui - self.lines = [] - self.exists = False - self.missing = True - self.mode = gp.mode - self.copysource = gp.oldpath - self.create = gp.op in ('ADD', 'COPY', 'RENAME') - self.remove = gp.op == 'DELETE' - if self.copysource is None: - data, mode = backend.getfile(self.fname) - else: - data, mode = store.getfile(self.copysource)[:2] - if data is not None: - self.exists = self.copysource is None or backend.exists(self.fname) - self.missing = False - if data: - self.lines = mdiff.splitnewlines(data) - if self.mode is None: - self.mode = mode - if self.lines: - # Normalize line endings - if self.lines[0].endswith('\r\n'): - self.eol = '\r\n' - elif self.lines[0].endswith('\n'): - self.eol = '\n' - if eolmode != 'strict': - nlines = [] - for l in self.lines: - if l.endswith('\r\n'): - l = l[:-2] + '\n' - nlines.append(l) - self.lines = nlines - else: - if self.create: - self.missing = False - if self.mode is None: - self.mode = (False, False) - if self.missing: - self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) - self.ui.warn(_("(use '--prefix' to apply patch relative to the " - "current directory)\n")) - - self.hash = {} - self.dirty = 0 - self.offset = 0 - self.skew = 0 - self.rej = [] - self.fileprinted = False - self.printfile(False) - self.hunks = 0 - - def writelines(self, fname, lines, mode): - if self.eolmode == 'auto': - eol = self.eol - elif self.eolmode == 'crlf': - eol = '\r\n' - else: - eol = '\n' - - if self.eolmode != 'strict' and eol and eol != '\n': - rawlines = [] - for l in lines: - if l and l[-1] == '\n': - l = l[:-1] + eol - rawlines.append(l) - lines = rawlines - - self.backend.setfile(fname, ''.join(lines), mode, self.copysource) - - def printfile(self, warn): - if self.fileprinted: - return - if warn or self.ui.verbose: - self.fileprinted = True - s = _("patching file %s\n") % self.fname - if warn: - self.ui.warn(s) - else: - self.ui.note(s) - - - def findlines(self, l, linenum): - # looks through the hash and finds candidate lines. The - # result is a list of line numbers sorted based on distance - # from linenum - - cand = self.hash.get(l, []) - if len(cand) > 1: - # resort our list of potentials forward then back. - cand.sort(key=lambda x: abs(x - linenum)) - return cand - - def write_rej(self): - # our rejects are a little different from patch(1). This always - # creates rejects in the same form as the original patch. A file - # header is inserted so that you can run the reject through patch again - # without having to type the filename. - if not self.rej: - return - base = os.path.basename(self.fname) - lines = ["--- %s\n+++ %s\n" % (base, base)] - for x in self.rej: - for l in x.hunk: - lines.append(l) - if l[-1:] != '\n': - lines.append("\n\ No newline at end of file\n") - self.backend.writerej(self.fname, len(self.rej), self.hunks, lines) - - def apply(self, h): - if not h.complete(): - raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % - (h.number, h.desc, len(h.a), h.lena, len(h.b), - h.lenb)) - - self.hunks += 1 - - if self.missing: - self.rej.append(h) - return -1 - - if self.exists and self.create: - if self.copysource: - self.ui.warn(_("cannot create %s: destination already " - "exists\n") % self.fname) - else: - self.ui.warn(_("file %s already exists\n") % self.fname) - self.rej.append(h) - return -1 - - if isinstance(h, binhunk): - if self.remove: - self.backend.unlink(self.fname) - else: - l = h.new(self.lines) - self.lines[:] = l - self.offset += len(l) - self.dirty = True - return 0 - - horig = h - if (self.eolmode in ('crlf', 'lf') - or self.eolmode == 'auto' and self.eol): - # If new eols are going to be normalized, then normalize - # hunk data before patching. Otherwise, preserve input - # line-endings. - h = h.getnormalized() - - # fast case first, no offsets, no fuzz - old, oldstart, new, newstart = h.fuzzit(0, False) - oldstart += self.offset - orig_start = oldstart - # if there's skew we want to emit the "(offset %d lines)" even - # when the hunk cleanly applies at start + skew, so skip the - # fast case code - if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart): - if self.remove: - self.backend.unlink(self.fname) - else: - self.lines[oldstart:oldstart + len(old)] = new - self.offset += len(new) - len(old) - self.dirty = True - return 0 - - # ok, we couldn't match the hunk. Lets look for offsets and fuzz it - self.hash = {} - for x, s in enumerate(self.lines): - self.hash.setdefault(s, []).append(x) - - for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1): - for toponly in [True, False]: - old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) - oldstart = oldstart + self.offset + self.skew - oldstart = min(oldstart, len(self.lines)) - if old: - cand = self.findlines(old[0][1:], oldstart) - else: - # Only adding lines with no or fuzzed context, just - # take the skew in account - cand = [oldstart] - - for l in cand: - if not old or diffhelper.testhunk(old, self.lines, l): - self.lines[l : l + len(old)] = new - self.offset += len(new) - len(old) - self.skew = l - orig_start - self.dirty = True - offset = l - orig_start - fuzzlen - if fuzzlen: - msg = _("Hunk #%d succeeded at %d " - "with fuzz %d " - "(offset %d lines).\n") - self.printfile(True) - self.ui.warn(msg % - (h.number, l + 1, fuzzlen, offset)) - else: - msg = _("Hunk #%d succeeded at %d " - "(offset %d lines).\n") - self.ui.note(msg % (h.number, l + 1, offset)) - return fuzzlen - self.printfile(True) - self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) - self.rej.append(horig) - return -1 - - def close(self): - if self.dirty: - self.writelines(self.fname, self.lines, self.mode) - self.write_rej() - return len(self.rej) - -class header(object): - """patch header - """ - diffgit_re = re.compile('diff --git a/(.*) b/(.*)$') - diff_re = re.compile('diff -r .* (.*)$') - allhunks_re = re.compile('(?:index|deleted file) ') - pretty_re = re.compile('(?:new file|deleted file) ') - special_re = re.compile('(?:index|deleted|copy|rename) ') - newfile_re = re.compile('(?:new file)') - - def __init__(self, header): - self.header = header - self.hunks = [] - - def binary(self): - return any(h.startswith('index ') for h in self.header) - - def pretty(self, fp): - for h in self.header: - if h.startswith('index '): - fp.write(_('this modifies a binary file (all or nothing)\n')) - break - if self.pretty_re.match(h): - fp.write(h) - if self.binary(): - fp.write(_('this is a binary file\n')) - break - if h.startswith('---'): - fp.write(_('%d hunks, %d lines changed\n') % - (len(self.hunks), - sum([max(h.added, h.removed) for h in self.hunks]))) - break - fp.write(h) - - def write(self, fp): - fp.write(''.join(self.header)) - - def allhunks(self): - return any(self.allhunks_re.match(h) for h in self.header) - - def files(self): - match = self.diffgit_re.match(self.header[0]) - if match: - fromfile, tofile = match.groups() - if fromfile == tofile: - return [fromfile] - return [fromfile, tofile] - else: - return self.diff_re.match(self.header[0]).groups() - - def filename(self): - return self.files()[-1] - - def __repr__(self): - return '
' % (' '.join(map(repr, self.files()))) - - def isnewfile(self): - return any(self.newfile_re.match(h) for h in self.header) - - def special(self): - # Special files are shown only at the header level and not at the hunk - # level for example a file that has been deleted is a special file. - # The user cannot change the content of the operation, in the case of - # the deleted file he has to take the deletion or not take it, he - # cannot take some of it. - # Newly added files are special if they are empty, they are not special - # if they have some content as we want to be able to change it - nocontent = len(self.header) == 2 - emptynewfile = self.isnewfile() and nocontent - return emptynewfile or \ - any(self.special_re.match(h) for h in self.header) - -class recordhunk(object): - """patch hunk - - XXX shouldn't we merge this with the other hunk class? - """ - - def __init__(self, header, fromline, toline, proc, before, hunk, after, - maxcontext=None): - def trimcontext(lines, reverse=False): - if maxcontext is not None: - delta = len(lines) - maxcontext - if delta > 0: - if reverse: - return delta, lines[delta:] - else: - return delta, lines[:maxcontext] - return 0, lines - - self.header = header - trimedbefore, self.before = trimcontext(before, True) - self.fromline = fromline + trimedbefore - self.toline = toline + trimedbefore - _trimedafter, self.after = trimcontext(after, False) - self.proc = proc - self.hunk = hunk - self.added, self.removed = self.countchanges(self.hunk) - - def __eq__(self, v): - if not isinstance(v, recordhunk): - return False - - return ((v.hunk == self.hunk) and - (v.proc == self.proc) and - (self.fromline == v.fromline) and - (self.header.files() == v.header.files())) - - def __hash__(self): - return hash((tuple(self.hunk), - tuple(self.header.files()), - self.fromline, - self.proc)) - - def countchanges(self, hunk): - """hunk -> (n+,n-)""" - add = len([h for h in hunk if h.startswith('+')]) - rem = len([h for h in hunk if h.startswith('-')]) - return add, rem - - def reversehunk(self): - """return another recordhunk which is the reverse of the hunk - - If this hunk is diff(A, B), the returned hunk is diff(B, A). To do - that, swap fromline/toline and +/- signs while keep other things - unchanged. - """ - m = {'+': '-', '-': '+', '\\': '\\'} - hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk] - return recordhunk(self.header, self.toline, self.fromline, self.proc, - self.before, hunk, self.after) - - def write(self, fp): - delta = len(self.before) + len(self.after) - if self.after and self.after[-1] == '\\ No newline at end of file\n': - delta -= 1 - fromlen = delta + self.removed - tolen = delta + self.added - fp.write('@@ -%d,%d +%d,%d @@%s\n' % - (self.fromline, fromlen, self.toline, tolen, - self.proc and (' ' + self.proc))) - fp.write(''.join(self.before + self.hunk + self.after)) - - pretty = write - - def filename(self): - return self.header.filename() - - def __repr__(self): - return '' % (self.filename(), self.fromline) - -def getmessages(): - return { - 'multiple': { - 'apply': _("apply change %d/%d to '%s'?"), - 'discard': _("discard change %d/%d to '%s'?"), - 'record': _("record change %d/%d to '%s'?"), - }, - 'single': { - 'apply': _("apply this change to '%s'?"), - 'discard': _("discard this change to '%s'?"), - 'record': _("record this change to '%s'?"), - }, - 'help': { - 'apply': _('[Ynesfdaq?]' - '$$ &Yes, apply this change' - '$$ &No, skip this change' - '$$ &Edit this change manually' - '$$ &Skip remaining changes to this file' - '$$ Apply remaining changes to this &file' - '$$ &Done, skip remaining changes and files' - '$$ Apply &all changes to all remaining files' - '$$ &Quit, applying no changes' - '$$ &? (display help)'), - 'discard': _('[Ynesfdaq?]' - '$$ &Yes, discard this change' - '$$ &No, skip this change' - '$$ &Edit this change manually' - '$$ &Skip remaining changes to this file' - '$$ Discard remaining changes to this &file' - '$$ &Done, skip remaining changes and files' - '$$ Discard &all changes to all remaining files' - '$$ &Quit, discarding no changes' - '$$ &? (display help)'), - 'record': _('[Ynesfdaq?]' - '$$ &Yes, record this change' - '$$ &No, skip this change' - '$$ &Edit this change manually' - '$$ &Skip remaining changes to this file' - '$$ Record remaining changes to this &file' - '$$ &Done, skip remaining changes and files' - '$$ Record &all changes to all remaining files' - '$$ &Quit, recording no changes' - '$$ &? (display help)'), - } - } - -def filterpatch(ui, headers, operation=None): - """Interactively filter patch chunks into applied-only chunks""" - messages = getmessages() - - if operation is None: - operation = 'record' - - def prompt(skipfile, skipall, query, chunk): - """prompt query, and process base inputs - - - y/n for the rest of file - - y/n for the rest - - ? (help) - - q (quit) - - Return True/False and possibly updated skipfile and skipall. - """ - newpatches = None - if skipall is not None: - return skipall, skipfile, skipall, newpatches - if skipfile is not None: - return skipfile, skipfile, skipall, newpatches - while True: - resps = messages['help'][operation] - r = ui.promptchoice("%s %s" % (query, resps)) - ui.write("\n") - if r == 8: # ? - for c, t in ui.extractchoices(resps)[1]: - ui.write('%s - %s\n' % (c, encoding.lower(t))) - continue - elif r == 0: # yes - ret = True - elif r == 1: # no - ret = False - elif r == 2: # Edit patch - if chunk is None: - ui.write(_('cannot edit patch for whole file')) - ui.write("\n") - continue - if chunk.header.binary(): - ui.write(_('cannot edit patch for binary file')) - ui.write("\n") - continue - # Patch comment based on the Git one (based on comment at end of - # https://mercurial-scm.org/wiki/RecordExtension) - phelp = '---' + _(""" -To remove '-' lines, make them ' ' lines (context). -To remove '+' lines, delete them. -Lines starting with # will be removed from the patch. - -If the patch applies cleanly, the edited hunk will immediately be -added to the record list. If it does not apply cleanly, a rejects -file will be generated: you can use that when you try again. If -all lines of the hunk are removed, then the edit is aborted and -the hunk is left unchanged. -""") - (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-", - suffix=".diff") - ncpatchfp = None - try: - # Write the initial patch - f = util.nativeeolwriter(os.fdopen(patchfd, r'wb')) - chunk.header.write(f) - chunk.write(f) - f.write('\n'.join(['# ' + i for i in phelp.splitlines()])) - f.close() - # Start the editor and wait for it to complete - editor = ui.geteditor() - ret = ui.system("%s \"%s\"" % (editor, patchfn), - environ={'HGUSER': ui.username()}, - blockedtag='filterpatch') - if ret != 0: - ui.warn(_("editor exited with exit code %d\n") % ret) - continue - # Remove comment lines - patchfp = open(patchfn, r'rb') - ncpatchfp = stringio() - for line in util.iterfile(patchfp): - line = util.fromnativeeol(line) - if not line.startswith('#'): - ncpatchfp.write(line) - patchfp.close() - ncpatchfp.seek(0) - newpatches = parsepatch(ncpatchfp) - finally: - os.unlink(patchfn) - del ncpatchfp - # Signal that the chunk shouldn't be applied as-is, but - # provide the new patch to be used instead. - ret = False - elif r == 3: # Skip - ret = skipfile = False - elif r == 4: # file (Record remaining) - ret = skipfile = True - elif r == 5: # done, skip remaining - ret = skipall = False - elif r == 6: # all - ret = skipall = True - elif r == 7: # quit - raise error.Abort(_('user quit')) - return ret, skipfile, skipall, newpatches - - seen = set() - applied = {} # 'filename' -> [] of chunks - skipfile, skipall = None, None - pos, total = 1, sum(len(h.hunks) for h in headers) - for h in headers: - pos += len(h.hunks) - skipfile = None - fixoffset = 0 - hdr = ''.join(h.header) - if hdr in seen: - continue - seen.add(hdr) - if skipall is None: - h.pretty(ui) - msg = (_('examine changes to %s?') % - _(' and ').join("'%s'" % f for f in h.files())) - r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None) - if not r: - continue - applied[h.filename()] = [h] - if h.allhunks(): - applied[h.filename()] += h.hunks - continue - for i, chunk in enumerate(h.hunks): - if skipfile is None and skipall is None: - chunk.pretty(ui) - if total == 1: - msg = messages['single'][operation] % chunk.filename() - else: - idx = pos - len(h.hunks) + i - msg = messages['multiple'][operation] % (idx, total, - chunk.filename()) - r, skipfile, skipall, newpatches = prompt(skipfile, - skipall, msg, chunk) - if r: - if fixoffset: - chunk = copy.copy(chunk) - chunk.toline += fixoffset - applied[chunk.filename()].append(chunk) - elif newpatches is not None: - for newpatch in newpatches: - for newhunk in newpatch.hunks: - if fixoffset: - newhunk.toline += fixoffset - applied[newhunk.filename()].append(newhunk) - else: - fixoffset += chunk.removed - chunk.added - return (sum([h for h in applied.itervalues() - if h[0].special() or len(h) > 1], []), {}) -class hunk(object): - def __init__(self, desc, num, lr, context): - self.number = num - self.desc = desc - self.hunk = [desc] - self.a = [] - self.b = [] - self.starta = self.lena = None - self.startb = self.lenb = None - if lr is not None: - if context: - self.read_context_hunk(lr) - else: - self.read_unified_hunk(lr) - - def getnormalized(self): - """Return a copy with line endings normalized to LF.""" - - def normalize(lines): - nlines = [] - for line in lines: - if line.endswith('\r\n'): - line = line[:-2] + '\n' - nlines.append(line) - return nlines - - # Dummy object, it is rebuilt manually - nh = hunk(self.desc, self.number, None, None) - nh.number = self.number - nh.desc = self.desc - nh.hunk = self.hunk - nh.a = normalize(self.a) - nh.b = normalize(self.b) - nh.starta = self.starta - nh.startb = self.startb - nh.lena = self.lena - nh.lenb = self.lenb - return nh - - def read_unified_hunk(self, lr): - m = unidesc.match(self.desc) - if not m: - raise PatchError(_("bad hunk #%d") % self.number) - self.starta, self.lena, self.startb, self.lenb = m.groups() - if self.lena is None: - self.lena = 1 - else: - self.lena = int(self.lena) - if self.lenb is None: - self.lenb = 1 - else: - self.lenb = int(self.lenb) - self.starta = int(self.starta) - self.startb = int(self.startb) - try: - diffhelper.addlines(lr, self.hunk, self.lena, self.lenb, - self.a, self.b) - except error.ParseError as e: - raise PatchError(_("bad hunk #%d: %s") % (self.number, e)) - # if we hit eof before finishing out the hunk, the last line will - # be zero length. Lets try to fix it up. - while len(self.hunk[-1]) == 0: - del self.hunk[-1] - del self.a[-1] - del self.b[-1] - self.lena -= 1 - self.lenb -= 1 - self._fixnewline(lr) - - def read_context_hunk(self, lr): - self.desc = lr.readline() - m = contextdesc.match(self.desc) - if not m: - raise PatchError(_("bad hunk #%d") % self.number) - self.starta, aend = m.groups() - self.starta = int(self.starta) - if aend is None: - aend = self.starta - self.lena = int(aend) - self.starta - if self.starta: - self.lena += 1 - for x in xrange(self.lena): - l = lr.readline() - if l.startswith('---'): - # lines addition, old block is empty - lr.push(l) - break - s = l[2:] - if l.startswith('- ') or l.startswith('! '): - u = '-' + s - elif l.startswith(' '): - u = ' ' + s - else: - raise PatchError(_("bad hunk #%d old text line %d") % - (self.number, x)) - self.a.append(u) - self.hunk.append(u) - - l = lr.readline() - if l.startswith('\ '): - s = self.a[-1][:-1] - self.a[-1] = s - self.hunk[-1] = s - l = lr.readline() - m = contextdesc.match(l) - if not m: - raise PatchError(_("bad hunk #%d") % self.number) - self.startb, bend = m.groups() - self.startb = int(self.startb) - if bend is None: - bend = self.startb - self.lenb = int(bend) - self.startb - if self.startb: - self.lenb += 1 - hunki = 1 - for x in xrange(self.lenb): - l = lr.readline() - if l.startswith('\ '): - # XXX: the only way to hit this is with an invalid line range. - # The no-eol marker is not counted in the line range, but I - # guess there are diff(1) out there which behave differently. - s = self.b[-1][:-1] - self.b[-1] = s - self.hunk[hunki - 1] = s - continue - if not l: - # line deletions, new block is empty and we hit EOF - lr.push(l) - break - s = l[2:] - if l.startswith('+ ') or l.startswith('! '): - u = '+' + s - elif l.startswith(' '): - u = ' ' + s - elif len(self.b) == 0: - # line deletions, new block is empty - lr.push(l) - break - else: - raise PatchError(_("bad hunk #%d old text line %d") % - (self.number, x)) - self.b.append(s) - while True: - if hunki >= len(self.hunk): - h = "" - else: - h = self.hunk[hunki] - hunki += 1 - if h == u: - break - elif h.startswith('-'): - continue - else: - self.hunk.insert(hunki - 1, u) - break - - if not self.a: - # this happens when lines were only added to the hunk - for x in self.hunk: - if x.startswith('-') or x.startswith(' '): - self.a.append(x) - if not self.b: - # this happens when lines were only deleted from the hunk - for x in self.hunk: - if x.startswith('+') or x.startswith(' '): - self.b.append(x[1:]) - # @@ -start,len +start,len @@ - self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, - self.startb, self.lenb) - self.hunk[0] = self.desc - self._fixnewline(lr) - - def _fixnewline(self, lr): - l = lr.readline() - if l.startswith('\ '): - diffhelper.fixnewline(self.hunk, self.a, self.b) - else: - lr.push(l) - - def complete(self): - return len(self.a) == self.lena and len(self.b) == self.lenb - - def _fuzzit(self, old, new, fuzz, toponly): - # this removes context lines from the top and bottom of list 'l'. It - # checks the hunk to make sure only context lines are removed, and then - # returns a new shortened list of lines. - fuzz = min(fuzz, len(old)) - if fuzz: - top = 0 - bot = 0 - hlen = len(self.hunk) - for x in xrange(hlen - 1): - # the hunk starts with the @@ line, so use x+1 - if self.hunk[x + 1].startswith(' '): - top += 1 - else: - break - if not toponly: - for x in xrange(hlen - 1): - if self.hunk[hlen - bot - 1].startswith(' '): - bot += 1 - else: - break - - bot = min(fuzz, bot) - top = min(fuzz, top) - return old[top:len(old) - bot], new[top:len(new) - bot], top - return old, new, 0 - - def fuzzit(self, fuzz, toponly): - old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly) - oldstart = self.starta + top - newstart = self.startb + top - # zero length hunk ranges already have their start decremented - if self.lena and oldstart > 0: - oldstart -= 1 - if self.lenb and newstart > 0: - newstart -= 1 - return old, oldstart, new, newstart - -class binhunk(object): - 'A binary patch file.' - def __init__(self, lr, fname): - self.text = None - self.delta = False - self.hunk = ['GIT binary patch\n'] - self._fname = fname - self._read(lr) - - def complete(self): - return self.text is not None - - def new(self, lines): - if self.delta: - return [applybindelta(self.text, ''.join(lines))] - return [self.text] - - def _read(self, lr): - def getline(lr, hunk): - l = lr.readline() - hunk.append(l) - return l.rstrip('\r\n') - - size = 0 - while True: - line = getline(lr, self.hunk) - if not line: - raise PatchError(_('could not extract "%s" binary data') - % self._fname) - if line.startswith('literal '): - size = int(line[8:].rstrip()) - break - if line.startswith('delta '): - size = int(line[6:].rstrip()) - self.delta = True - break - dec = [] - line = getline(lr, self.hunk) - while len(line) > 1: - l = line[0:1] - if l <= 'Z' and l >= 'A': - l = ord(l) - ord('A') + 1 - else: - l = ord(l) - ord('a') + 27 - try: - dec.append(util.b85decode(line[1:])[:l]) - except ValueError as e: - raise PatchError(_('could not decode "%s" binary patch: %s') - % (self._fname, stringutil.forcebytestr(e))) - line = getline(lr, self.hunk) - text = zlib.decompress(''.join(dec)) - if len(text) != size: - raise PatchError(_('"%s" length is %d bytes, should be %d') - % (self._fname, len(text), size)) - self.text = text - -def parsefilename(str): - # --- filename \t|space stuff - s = str[4:].rstrip('\r\n') - i = s.find('\t') - if i < 0: - i = s.find(' ') - if i < 0: - return s - return s[:i] - -def reversehunks(hunks): - '''reverse the signs in the hunks given as argument - - This function operates on hunks coming out of patch.filterpatch, that is - a list of the form: [header1, hunk1, hunk2, header2...]. Example usage: - - >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g - ... --- a/folder1/g - ... +++ b/folder1/g - ... @@ -1,7 +1,7 @@ - ... +firstline - ... c - ... 1 - ... 2 - ... + 3 - ... -4 - ... 5 - ... d - ... +lastline""" - >>> hunks = parsepatch([rawpatch]) - >>> hunkscomingfromfilterpatch = [] - >>> for h in hunks: - ... hunkscomingfromfilterpatch.append(h) - ... hunkscomingfromfilterpatch.extend(h.hunks) - - >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch) - >>> from . import util - >>> fp = util.stringio() - >>> for c in reversedhunks: - ... c.write(fp) - >>> fp.seek(0) or None - >>> reversedpatch = fp.read() - >>> print(pycompat.sysstr(reversedpatch)) - diff --git a/folder1/g b/folder1/g - --- a/folder1/g - +++ b/folder1/g - @@ -1,4 +1,3 @@ - -firstline - c - 1 - 2 - @@ -2,6 +1,6 @@ - c - 1 - 2 - - 3 - +4 - 5 - d - @@ -6,3 +5,2 @@ - 5 - d - -lastline - - ''' - - newhunks = [] - for c in hunks: - if util.safehasattr(c, 'reversehunk'): - c = c.reversehunk() - newhunks.append(c) - return newhunks - -def parsepatch(originalchunks, maxcontext=None): - """patch -> [] of headers -> [] of hunks - - If maxcontext is not None, trim context lines if necessary. - - >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g - ... --- a/folder1/g - ... +++ b/folder1/g - ... @@ -1,8 +1,10 @@ - ... 1 - ... 2 - ... -3 - ... 4 - ... 5 - ... 6 - ... +6.1 - ... +6.2 - ... 7 - ... 8 - ... +9''' - >>> out = util.stringio() - >>> headers = parsepatch([rawpatch], maxcontext=1) - >>> for header in headers: - ... header.write(out) - ... for hunk in header.hunks: - ... hunk.write(out) - >>> print(pycompat.sysstr(out.getvalue())) - diff --git a/folder1/g b/folder1/g - --- a/folder1/g - +++ b/folder1/g - @@ -2,3 +2,2 @@ - 2 - -3 - 4 - @@ -6,2 +5,4 @@ - 6 - +6.1 - +6.2 - 7 - @@ -8,1 +9,2 @@ - 8 - +9 - """ - class parser(object): - """patch parsing state machine""" - def __init__(self): - self.fromline = 0 - self.toline = 0 - self.proc = '' - self.header = None - self.context = [] - self.before = [] - self.hunk = [] - self.headers = [] - - def addrange(self, limits): - fromstart, fromend, tostart, toend, proc = limits - self.fromline = int(fromstart) - self.toline = int(tostart) - self.proc = proc - - def addcontext(self, context): - if self.hunk: - h = recordhunk(self.header, self.fromline, self.toline, - self.proc, self.before, self.hunk, context, maxcontext) - self.header.hunks.append(h) - self.fromline += len(self.before) + h.removed - self.toline += len(self.before) + h.added - self.before = [] - self.hunk = [] - self.context = context - - def addhunk(self, hunk): - if self.context: - self.before = self.context - self.context = [] - self.hunk = hunk - - def newfile(self, hdr): - self.addcontext([]) - h = header(hdr) - self.headers.append(h) - self.header = h - - def addother(self, line): - pass # 'other' lines are ignored - - def finished(self): - self.addcontext([]) - return self.headers - - transitions = { - 'file': {'context': addcontext, - 'file': newfile, - 'hunk': addhunk, - 'range': addrange}, - 'context': {'file': newfile, - 'hunk': addhunk, - 'range': addrange, - 'other': addother}, - 'hunk': {'context': addcontext, - 'file': newfile, - 'range': addrange}, - 'range': {'context': addcontext, - 'hunk': addhunk}, - 'other': {'other': addother}, - } - - p = parser() - fp = stringio() - fp.write(''.join(originalchunks)) - fp.seek(0) - - state = 'context' - for newstate, data in scanpatch(fp): - try: - p.transitions[state][newstate](p, data) - except KeyError: - raise PatchError('unhandled transition: %s -> %s' % - (state, newstate)) - state = newstate - del fp - return p.finished() - -def pathtransform(path, strip, prefix): - '''turn a path from a patch into a path suitable for the repository - - prefix, if not empty, is expected to be normalized with a / at the end. - - Returns (stripped components, path in repository). - - >>> pathtransform(b'a/b/c', 0, b'') - ('', 'a/b/c') - >>> pathtransform(b' a/b/c ', 0, b'') - ('', ' a/b/c') - >>> pathtransform(b' a/b/c ', 2, b'') - ('a/b/', 'c') - >>> pathtransform(b'a/b/c', 0, b'd/e/') - ('', 'd/e/a/b/c') - >>> pathtransform(b' a//b/c ', 2, b'd/e/') - ('a//b/', 'd/e/c') - >>> pathtransform(b'a/b/c', 3, b'') - Traceback (most recent call last): - PatchError: unable to strip away 1 of 3 dirs from a/b/c - ''' - pathlen = len(path) - i = 0 - if strip == 0: - return '', prefix + path.rstrip() - count = strip - while count > 0: - i = path.find('/', i) - if i == -1: - raise PatchError(_("unable to strip away %d of %d dirs from %s") % - (count, strip, path)) - i += 1 - # consume '//' in the path - while i < pathlen - 1 and path[i:i + 1] == '/': - i += 1 - count -= 1 - return path[:i].lstrip(), prefix + path[i:].rstrip() - -def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix): - nulla = afile_orig == "/dev/null" - nullb = bfile_orig == "/dev/null" - create = nulla and hunk.starta == 0 and hunk.lena == 0 - remove = nullb and hunk.startb == 0 and hunk.lenb == 0 - abase, afile = pathtransform(afile_orig, strip, prefix) - gooda = not nulla and backend.exists(afile) - bbase, bfile = pathtransform(bfile_orig, strip, prefix) - if afile == bfile: - goodb = gooda - else: - goodb = not nullb and backend.exists(bfile) - missing = not goodb and not gooda and not create - - # some diff programs apparently produce patches where the afile is - # not /dev/null, but afile starts with bfile - abasedir = afile[:afile.rfind('/') + 1] - bbasedir = bfile[:bfile.rfind('/') + 1] - if (missing and abasedir == bbasedir and afile.startswith(bfile) - and hunk.starta == 0 and hunk.lena == 0): - create = True - missing = False - - # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the - # diff is between a file and its backup. In this case, the original - # file should be patched (see original mpatch code). - isbackup = (abase == bbase and bfile.startswith(afile)) - fname = None - if not missing: - if gooda and goodb: - if isbackup: - fname = afile - else: - fname = bfile - elif gooda: - fname = afile - - if not fname: - if not nullb: - if isbackup: - fname = afile - else: - fname = bfile - elif not nulla: - fname = afile - else: - raise PatchError(_("undefined source and destination files")) - - gp = patchmeta(fname) - if create: - gp.op = 'ADD' - elif remove: - gp.op = 'DELETE' - return gp - -def scanpatch(fp): - """like patch.iterhunks, but yield different events - - - ('file', [header_lines + fromfile + tofile]) - - ('context', [context_lines]) - - ('hunk', [hunk_lines]) - - ('range', (-start,len, +start,len, proc)) - """ - lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)') - lr = linereader(fp) - - def scanwhile(first, p): - """scan lr while predicate holds""" - lines = [first] - for line in iter(lr.readline, ''): - if p(line): - lines.append(line) - else: - lr.push(line) - break - return lines - - for line in iter(lr.readline, ''): - if line.startswith('diff --git a/') or line.startswith('diff -r '): - def notheader(line): - s = line.split(None, 1) - return not s or s[0] not in ('---', 'diff') - header = scanwhile(line, notheader) - fromfile = lr.readline() - if fromfile.startswith('---'): - tofile = lr.readline() - header += [fromfile, tofile] - else: - lr.push(fromfile) - yield 'file', header - elif line.startswith(' '): - cs = (' ', '\\') - yield 'context', scanwhile(line, lambda l: l.startswith(cs)) - elif line.startswith(('-', '+')): - cs = ('-', '+', '\\') - yield 'hunk', scanwhile(line, lambda l: l.startswith(cs)) - else: - m = lines_re.match(line) - if m: - yield 'range', m.groups() - else: - yield 'other', line - -def scangitpatch(lr, firstline): - """ - Git patches can emit: - - rename a to b - - change b - - copy a to c - - change c - - We cannot apply this sequence as-is, the renamed 'a' could not be - found for it would have been renamed already. And we cannot copy - from 'b' instead because 'b' would have been changed already. So - we scan the git patch for copy and rename commands so we can - perform the copies ahead of time. - """ - pos = 0 - try: - pos = lr.fp.tell() - fp = lr.fp - except IOError: - fp = stringio(lr.fp.read()) - gitlr = linereader(fp) - gitlr.push(firstline) - gitpatches = readgitpatch(gitlr) - fp.seek(pos) - return gitpatches - -def iterhunks(fp): - """Read a patch and yield the following events: - - ("file", afile, bfile, firsthunk): select a new target file. - - ("hunk", hunk): a new hunk is ready to be applied, follows a - "file" event. - - ("git", gitchanges): current diff is in git format, gitchanges - maps filenames to gitpatch records. Unique event. - """ - afile = "" - bfile = "" - state = None - hunknum = 0 - emitfile = newfile = False - gitpatches = None - - # our states - BFILE = 1 - context = None - lr = linereader(fp) - - for x in iter(lr.readline, ''): - if state == BFILE and ( - (not context and x.startswith('@')) - or (context is not False and x.startswith('***************')) - or x.startswith('GIT binary patch')): - gp = None - if (gitpatches and - gitpatches[-1].ispatching(afile, bfile)): - gp = gitpatches.pop() - if x.startswith('GIT binary patch'): - h = binhunk(lr, gp.path) - else: - if context is None and x.startswith('***************'): - context = True - h = hunk(x, hunknum + 1, lr, context) - hunknum += 1 - if emitfile: - emitfile = False - yield 'file', (afile, bfile, h, gp and gp.copy() or None) - yield 'hunk', h - elif x.startswith('diff --git a/'): - m = gitre.match(x.rstrip(' \r\n')) - if not m: - continue - if gitpatches is None: - # scan whole input for git metadata - gitpatches = scangitpatch(lr, x) - yield 'git', [g.copy() for g in gitpatches - if g.op in ('COPY', 'RENAME')] - gitpatches.reverse() - afile = 'a/' + m.group(1) - bfile = 'b/' + m.group(2) - while gitpatches and not gitpatches[-1].ispatching(afile, bfile): - gp = gitpatches.pop() - yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) - if not gitpatches: - raise PatchError(_('failed to synchronize metadata for "%s"') - % afile[2:]) - gp = gitpatches[-1] - newfile = True - elif x.startswith('---'): - # check for a unified diff - l2 = lr.readline() - if not l2.startswith('+++'): - lr.push(l2) - continue - newfile = True - context = False - afile = parsefilename(x) - bfile = parsefilename(l2) - elif x.startswith('***'): - # check for a context diff - l2 = lr.readline() - if not l2.startswith('---'): - lr.push(l2) - continue - l3 = lr.readline() - lr.push(l3) - if not l3.startswith("***************"): - lr.push(l2) - continue - newfile = True - context = True - afile = parsefilename(x) - bfile = parsefilename(l2) - - if newfile: - newfile = False - emitfile = True - state = BFILE - hunknum = 0 - - while gitpatches: - gp = gitpatches.pop() - yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) - -def applybindelta(binchunk, data): - """Apply a binary delta hunk - The algorithm used is the algorithm from git's patch-delta.c - """ - def deltahead(binchunk): - i = 0 - for c in binchunk: - i += 1 - if not (ord(c) & 0x80): - return i - return i - out = "" - s = deltahead(binchunk) - binchunk = binchunk[s:] - s = deltahead(binchunk) - binchunk = binchunk[s:] - i = 0 - while i < len(binchunk): - cmd = ord(binchunk[i]) - i += 1 - if (cmd & 0x80): - offset = 0 - size = 0 - if (cmd & 0x01): - offset = ord(binchunk[i]) - i += 1 - if (cmd & 0x02): - offset |= ord(binchunk[i]) << 8 - i += 1 - if (cmd & 0x04): - offset |= ord(binchunk[i]) << 16 - i += 1 - if (cmd & 0x08): - offset |= ord(binchunk[i]) << 24 - i += 1 - if (cmd & 0x10): - size = ord(binchunk[i]) - i += 1 - if (cmd & 0x20): - size |= ord(binchunk[i]) << 8 - i += 1 - if (cmd & 0x40): - size |= ord(binchunk[i]) << 16 - i += 1 - if size == 0: - size = 0x10000 - offset_end = offset + size - out += data[offset:offset_end] - elif cmd != 0: - offset_end = i + cmd - out += binchunk[i:offset_end] - i += cmd - else: - raise PatchError(_('unexpected delta opcode 0')) - return out - -def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'): - """Reads a patch from fp and tries to apply it. - - Returns 0 for a clean patch, -1 if any rejects were found and 1 if - there was any fuzz. - - If 'eolmode' is 'strict', the patch content and patched file are - read in binary mode. Otherwise, line endings are ignored when - patching then normalized according to 'eolmode'. - """ - return _applydiff(ui, fp, patchfile, backend, store, strip=strip, - prefix=prefix, eolmode=eolmode) - -def _canonprefix(repo, prefix): - if prefix: - prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix) - if prefix != '': - prefix += '/' - return prefix - -def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='', - eolmode='strict'): - prefix = _canonprefix(backend.repo, prefix) - def pstrip(p): - return pathtransform(p, strip - 1, prefix)[1] - - rejects = 0 - err = 0 - current_file = None - - for state, values in iterhunks(fp): - if state == 'hunk': - if not current_file: - continue - ret = current_file.apply(values) - if ret > 0: - err = 1 - elif state == 'file': - if current_file: - rejects += current_file.close() - current_file = None - afile, bfile, first_hunk, gp = values - if gp: - gp.path = pstrip(gp.path) - if gp.oldpath: - gp.oldpath = pstrip(gp.oldpath) - else: - gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, - prefix) - if gp.op == 'RENAME': - backend.unlink(gp.oldpath) - if not first_hunk: - if gp.op == 'DELETE': - backend.unlink(gp.path) - continue - data, mode = None, None - if gp.op in ('RENAME', 'COPY'): - data, mode = store.getfile(gp.oldpath)[:2] - if data is None: - # This means that the old path does not exist - raise PatchError(_("source file '%s' does not exist") - % gp.oldpath) - if gp.mode: - mode = gp.mode - if gp.op == 'ADD': - # Added files without content have no hunk and - # must be created - data = '' - if data or mode: - if (gp.op in ('ADD', 'RENAME', 'COPY') - and backend.exists(gp.path)): - raise PatchError(_("cannot create %s: destination " - "already exists") % gp.path) - backend.setfile(gp.path, data, mode, gp.oldpath) - continue - try: - current_file = patcher(ui, gp, backend, store, - eolmode=eolmode) - except PatchError as inst: - ui.warn(str(inst) + '\n') - current_file = None - rejects += 1 - continue - elif state == 'git': - for gp in values: - path = pstrip(gp.oldpath) - data, mode = backend.getfile(path) - if data is None: - # The error ignored here will trigger a getfile() - # error in a place more appropriate for error - # handling, and will not interrupt the patching - # process. - pass - else: - store.setfile(path, data, mode) - else: - raise error.Abort(_('unsupported parser state: %s') % state) - - if current_file: - rejects += current_file.close() - - if rejects: - return -1 - return err - -def _externalpatch(ui, repo, patcher, patchname, strip, files, - similarity): - """use to apply to the working directory. - returns whether patch was applied with fuzz factor.""" - - fuzz = False - args = [] - cwd = repo.root - if cwd: - args.append('-d %s' % procutil.shellquote(cwd)) - cmd = ('%s %s -p%d < %s' - % (patcher, ' '.join(args), strip, procutil.shellquote(patchname))) - fp = procutil.popen(cmd, 'rb') - try: - for line in util.iterfile(fp): - line = line.rstrip() - ui.note(line + '\n') - if line.startswith('patching file '): - pf = util.parsepatchoutput(line) - printed_file = False - files.add(pf) - elif line.find('with fuzz') >= 0: - fuzz = True - if not printed_file: - ui.warn(pf + '\n') - printed_file = True - ui.warn(line + '\n') - elif line.find('saving rejects to file') >= 0: - ui.warn(line + '\n') - elif line.find('FAILED') >= 0: - if not printed_file: - ui.warn(pf + '\n') - printed_file = True - ui.warn(line + '\n') - finally: - if files: - scmutil.marktouched(repo, files, similarity) - code = fp.close() - if code: - raise PatchError(_("patch command failed: %s") % - procutil.explainexit(code)) - return fuzz - -def patchbackend(ui, backend, patchobj, strip, prefix, files=None, - eolmode='strict'): - if files is None: - files = set() - if eolmode is None: - eolmode = ui.config('patch', 'eol') - if eolmode.lower() not in eolmodes: - raise error.Abort(_('unsupported line endings type: %s') % eolmode) - eolmode = eolmode.lower() - - store = filestore() - try: - fp = open(patchobj, 'rb') - except TypeError: - fp = patchobj - try: - ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix, - eolmode=eolmode) - finally: - if fp != patchobj: - fp.close() - files.update(backend.close()) - store.close() - if ret < 0: - raise PatchError(_('patch failed to apply')) - return ret > 0 - -def internalpatch(ui, repo, patchobj, strip, prefix='', files=None, - eolmode='strict', similarity=0): - """use builtin patch to apply to the working directory. - returns whether patch was applied with fuzz factor.""" - backend = workingbackend(ui, repo, similarity) - return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) - -def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None, - eolmode='strict'): - backend = repobackend(ui, repo, ctx, store) - return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) - -def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict', - similarity=0): - """Apply to the working directory. - - 'eolmode' specifies how end of lines should be handled. It can be: - - 'strict': inputs are read in binary mode, EOLs are preserved - - 'crlf': EOLs are ignored when patching and reset to CRLF - - 'lf': EOLs are ignored when patching and reset to LF - - None: get it from user settings, default to 'strict' - 'eolmode' is ignored when using an external patcher program. - - Returns whether patch was applied with fuzz factor. - """ - patcher = ui.config('ui', 'patch') - if files is None: - files = set() - if patcher: - return _externalpatch(ui, repo, patcher, patchname, strip, - files, similarity) - return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode, - similarity) - -def changedfiles(ui, repo, patchpath, strip=1, prefix=''): - backend = fsbackend(ui, repo.root) - prefix = _canonprefix(repo, prefix) - with open(patchpath, 'rb') as fp: - changed = set() - for state, values in iterhunks(fp): - if state == 'file': - afile, bfile, first_hunk, gp = values - if gp: - gp.path = pathtransform(gp.path, strip - 1, prefix)[1] - if gp.oldpath: - gp.oldpath = pathtransform(gp.oldpath, strip - 1, - prefix)[1] - else: - gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, - prefix) - changed.add(gp.path) - if gp.op == 'RENAME': - changed.add(gp.oldpath) - elif state not in ('hunk', 'git'): - raise error.Abort(_('unsupported parser state: %s') % state) - return changed - -class GitDiffRequired(Exception): - pass - def diffallopts(ui, opts=None, untrusted=False, section='diff'): '''return diffopts with all features supported and parsed''' return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section, git=True, whitespace=True, formatchanging=True) -diffopts = diffallopts - def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False, whitespace=False, formatchanging=False): '''return diffopts with only opted-in features parsed @@ -2270,7 +53,6 @@ def difffeatureopts(ui, opts=None, untru 'showfunc': get('show_function', 'showfunc'), 'context': get('unified', getter=ui.config), } - buildopts['worddiff'] = ui.configbool('experimental', 'worddiff') buildopts['xdiff'] = ui.configbool('experimental', 'xdiff') if git: @@ -2318,633 +100,6 @@ def difffeatureopts(ui, opts=None, untru buildopts['nobinary'] = (not binary if binary is not None else get('nobinary', forceplain=False)) buildopts['noprefix'] = get('noprefix', forceplain=False) + buildopts['worddiff'] = get('word_diff', 'word-diff', forceplain=False) return mdiff.diffopts(**pycompat.strkwargs(buildopts)) - -def diff(repo, node1=None, node2=None, match=None, changes=None, - opts=None, losedatafn=None, prefix='', relroot='', copy=None, - hunksfilterfn=None): - '''yields diff of changes to files between two nodes, or node and - working directory. - - if node1 is None, use first dirstate parent instead. - if node2 is None, compare node1 with working directory. - - losedatafn(**kwarg) is a callable run when opts.upgrade=True and - every time some change cannot be represented with the current - patch format. Return False to upgrade to git patch format, True to - accept the loss or raise an exception to abort the diff. It is - called with the name of current file being diffed as 'fn'. If set - to None, patches will always be upgraded to git format when - necessary. - - prefix is a filename prefix that is prepended to all filenames on - display (used for subrepos). - - relroot, if not empty, must be normalized with a trailing /. Any match - patterns that fall outside it will be ignored. - - copy, if not empty, should contain mappings {dst@y: src@x} of copy - information. - - hunksfilterfn, if not None, should be a function taking a filectx and - hunks generator that may yield filtered hunks. - ''' - for fctx1, fctx2, hdr, hunks in diffhunks( - repo, node1=node1, node2=node2, - match=match, changes=changes, opts=opts, - losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy, - ): - if hunksfilterfn is not None: - # If the file has been removed, fctx2 is None; but this should - # not occur here since we catch removed files early in - # logcmdutil.getlinerangerevs() for 'hg log -L'. - assert fctx2 is not None, \ - 'fctx2 unexpectly None in diff hunks filtering' - hunks = hunksfilterfn(fctx2, hunks) - text = ''.join(sum((list(hlines) for hrange, hlines in hunks), [])) - if hdr and (text or len(hdr) > 1): - yield '\n'.join(hdr) + '\n' - if text: - yield text - -def diffhunks(repo, node1=None, node2=None, match=None, changes=None, - opts=None, losedatafn=None, prefix='', relroot='', copy=None): - """Yield diff of changes to files in the form of (`header`, `hunks`) tuples - where `header` is a list of diff headers and `hunks` is an iterable of - (`hunkrange`, `hunklines`) tuples. - - See diff() for the meaning of parameters. - """ - - if opts is None: - opts = mdiff.defaultopts - - if not node1 and not node2: - node1 = repo.dirstate.p1() - - def lrugetfilectx(): - cache = {} - order = collections.deque() - def getfilectx(f, ctx): - fctx = ctx.filectx(f, filelog=cache.get(f)) - if f not in cache: - if len(cache) > 20: - del cache[order.popleft()] - cache[f] = fctx.filelog() - else: - order.remove(f) - order.append(f) - return fctx - return getfilectx - getfilectx = lrugetfilectx() - - ctx1 = repo[node1] - ctx2 = repo[node2] - - relfiltered = False - if relroot != '' and match.always(): - # as a special case, create a new matcher with just the relroot - pats = [relroot] - match = scmutil.match(ctx2, pats, default='path') - relfiltered = True - - if not changes: - changes = repo.status(ctx1, ctx2, match=match) - modified, added, removed = changes[:3] - - if not modified and not added and not removed: - return [] - - if repo.ui.debugflag: - hexfunc = hex - else: - hexfunc = short - revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node] - - if copy is None: - copy = {} - if opts.git or opts.upgrade: - copy = copies.pathcopies(ctx1, ctx2, match=match) - - if relroot is not None: - if not relfiltered: - # XXX this would ideally be done in the matcher, but that is - # generally meant to 'or' patterns, not 'and' them. In this case we - # need to 'and' all the patterns from the matcher with relroot. - def filterrel(l): - return [f for f in l if f.startswith(relroot)] - modified = filterrel(modified) - added = filterrel(added) - removed = filterrel(removed) - relfiltered = True - # filter out copies where either side isn't inside the relative root - copy = dict(((dst, src) for (dst, src) in copy.iteritems() - if dst.startswith(relroot) - and src.startswith(relroot))) - - modifiedset = set(modified) - addedset = set(added) - removedset = set(removed) - for f in modified: - if f not in ctx1: - # Fix up added, since merged-in additions appear as - # modifications during merges - modifiedset.remove(f) - addedset.add(f) - for f in removed: - if f not in ctx1: - # Merged-in additions that are then removed are reported as removed. - # They are not in ctx1, so We don't want to show them in the diff. - removedset.remove(f) - modified = sorted(modifiedset) - added = sorted(addedset) - removed = sorted(removedset) - for dst, src in list(copy.items()): - if src not in ctx1: - # Files merged in during a merge and then copied/renamed are - # reported as copies. We want to show them in the diff as additions. - del copy[dst] - - prefetchmatch = scmutil.matchfiles( - repo, list(modifiedset | addedset | removedset)) - scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch) - - def difffn(opts, losedata): - return trydiff(repo, revs, ctx1, ctx2, modified, added, removed, - copy, getfilectx, opts, losedata, prefix, relroot) - if opts.upgrade and not opts.git: - try: - def losedata(fn): - if not losedatafn or not losedatafn(fn=fn): - raise GitDiffRequired - # Buffer the whole output until we are sure it can be generated - return list(difffn(opts.copy(git=False), losedata)) - except GitDiffRequired: - return difffn(opts.copy(git=True), None) - else: - return difffn(opts, None) - -def diffsinglehunk(hunklines): - """yield tokens for a list of lines in a single hunk""" - for line in hunklines: - # chomp - chompline = line.rstrip('\n') - # highlight tabs and trailing whitespace - stripline = chompline.rstrip() - if line[0] == '-': - label = 'diff.deleted' - elif line[0] == '+': - label = 'diff.inserted' - else: - raise error.ProgrammingError('unexpected hunk line: %s' % line) - for token in tabsplitter.findall(stripline): - if '\t' == token[0]: - yield (token, 'diff.tab') - else: - yield (token, label) - - if chompline != stripline: - yield (chompline[len(stripline):], 'diff.trailingwhitespace') - if chompline != line: - yield (line[len(chompline):], '') - -def diffsinglehunkinline(hunklines): - """yield tokens for a list of lines in a single hunk, with inline colors""" - # prepare deleted, and inserted content - a = '' - b = '' - for line in hunklines: - if line[0] == '-': - a += line[1:] - elif line[0] == '+': - b += line[1:] - else: - raise error.ProgrammingError('unexpected hunk line: %s' % line) - # fast path: if either side is empty, use diffsinglehunk - if not a or not b: - for t in diffsinglehunk(hunklines): - yield t - return - # re-split the content into words - al = wordsplitter.findall(a) - bl = wordsplitter.findall(b) - # re-arrange the words to lines since the diff algorithm is line-based - aln = [s if s == '\n' else s + '\n' for s in al] - bln = [s if s == '\n' else s + '\n' for s in bl] - an = ''.join(aln) - bn = ''.join(bln) - # run the diff algorithm, prepare atokens and btokens - atokens = [] - btokens = [] - blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln) - for (a1, a2, b1, b2), btype in blocks: - changed = btype == '!' - for token in mdiff.splitnewlines(''.join(al[a1:a2])): - atokens.append((changed, token)) - for token in mdiff.splitnewlines(''.join(bl[b1:b2])): - btokens.append((changed, token)) - - # yield deleted tokens, then inserted ones - for prefix, label, tokens in [('-', 'diff.deleted', atokens), - ('+', 'diff.inserted', btokens)]: - nextisnewline = True - for changed, token in tokens: - if nextisnewline: - yield (prefix, label) - nextisnewline = False - # special handling line end - isendofline = token.endswith('\n') - if isendofline: - chomp = token[:-1] # chomp - token = chomp.rstrip() # detect spaces at the end - endspaces = chomp[len(token):] - # scan tabs - for maybetab in tabsplitter.findall(token): - if '\t' == maybetab[0]: - currentlabel = 'diff.tab' - else: - if changed: - currentlabel = label + '.changed' - else: - currentlabel = label + '.unchanged' - yield (maybetab, currentlabel) - if isendofline: - if endspaces: - yield (endspaces, 'diff.trailingwhitespace') - yield ('\n', '') - nextisnewline = True - -def difflabel(func, *args, **kw): - '''yields 2-tuples of (output, label) based on the output of func()''' - if kw.get(r'opts') and kw[r'opts'].worddiff: - dodiffhunk = diffsinglehunkinline - else: - dodiffhunk = diffsinglehunk - headprefixes = [('diff', 'diff.diffline'), - ('copy', 'diff.extended'), - ('rename', 'diff.extended'), - ('old', 'diff.extended'), - ('new', 'diff.extended'), - ('deleted', 'diff.extended'), - ('index', 'diff.extended'), - ('similarity', 'diff.extended'), - ('---', 'diff.file_a'), - ('+++', 'diff.file_b')] - textprefixes = [('@', 'diff.hunk'), - # - and + are handled by diffsinglehunk - ] - head = False - - # buffers a hunk, i.e. adjacent "-", "+" lines without other changes. - hunkbuffer = [] - def consumehunkbuffer(): - if hunkbuffer: - for token in dodiffhunk(hunkbuffer): - yield token - hunkbuffer[:] = [] - - for chunk in func(*args, **kw): - lines = chunk.split('\n') - linecount = len(lines) - for i, line in enumerate(lines): - if head: - if line.startswith('@'): - head = False - else: - if line and not line.startswith((' ', '+', '-', '@', '\\')): - head = True - diffline = False - if not head and line and line.startswith(('+', '-')): - diffline = True - - prefixes = textprefixes - if head: - prefixes = headprefixes - if diffline: - # buffered - bufferedline = line - if i + 1 < linecount: - bufferedline += "\n" - hunkbuffer.append(bufferedline) - else: - # unbuffered - for token in consumehunkbuffer(): - yield token - stripline = line.rstrip() - for prefix, label in prefixes: - if stripline.startswith(prefix): - yield (stripline, label) - if line != stripline: - yield (line[len(stripline):], - 'diff.trailingwhitespace') - break - else: - yield (line, '') - if i + 1 < linecount: - yield ('\n', '') - for token in consumehunkbuffer(): - yield token - -def diffui(*args, **kw): - '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' - return difflabel(diff, *args, **kw) - -def _filepairs(modified, added, removed, copy, opts): - '''generates tuples (f1, f2, copyop), where f1 is the name of the file - before and f2 is the the name after. For added files, f1 will be None, - and for removed files, f2 will be None. copyop may be set to None, 'copy' - or 'rename' (the latter two only if opts.git is set).''' - gone = set() - - copyto = dict([(v, k) for k, v in copy.items()]) - - addedset, removedset = set(added), set(removed) - - for f in sorted(modified + added + removed): - copyop = None - f1, f2 = f, f - if f in addedset: - f1 = None - if f in copy: - if opts.git: - f1 = copy[f] - if f1 in removedset and f1 not in gone: - copyop = 'rename' - gone.add(f1) - else: - copyop = 'copy' - elif f in removedset: - f2 = None - if opts.git: - # have we already reported a copy above? - if (f in copyto and copyto[f] in addedset - and copy[copyto[f]] == f): - continue - yield f1, f2, copyop - -def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, - copy, getfilectx, opts, losedatafn, prefix, relroot): - '''given input data, generate a diff and yield it in blocks - - If generating a diff would lose data like flags or binary data and - losedatafn is not None, it will be called. - - relroot is removed and prefix is added to every path in the diff output. - - If relroot is not empty, this function expects every path in modified, - added, removed and copy to start with it.''' - - def gitindex(text): - if not text: - text = "" - l = len(text) - s = hashlib.sha1('blob %d\0' % l) - s.update(text) - return hex(s.digest()) - - if opts.noprefix: - aprefix = bprefix = '' - else: - aprefix = 'a/' - bprefix = 'b/' - - def diffline(f, revs): - revinfo = ' '.join(["-r %s" % rev for rev in revs]) - return 'diff %s %s' % (revinfo, f) - - def isempty(fctx): - return fctx is None or fctx.size() == 0 - - date1 = dateutil.datestr(ctx1.date()) - date2 = dateutil.datestr(ctx2.date()) - - gitmode = {'l': '120000', 'x': '100755', '': '100644'} - - if relroot != '' and (repo.ui.configbool('devel', 'all-warnings') - or repo.ui.configbool('devel', 'check-relroot')): - for f in modified + added + removed + list(copy) + list(copy.values()): - if f is not None and not f.startswith(relroot): - raise AssertionError( - "file %s doesn't start with relroot %s" % (f, relroot)) - - for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts): - content1 = None - content2 = None - fctx1 = None - fctx2 = None - flag1 = None - flag2 = None - if f1: - fctx1 = getfilectx(f1, ctx1) - if opts.git or losedatafn: - flag1 = ctx1.flags(f1) - if f2: - fctx2 = getfilectx(f2, ctx2) - if opts.git or losedatafn: - flag2 = ctx2.flags(f2) - # if binary is True, output "summary" or "base85", but not "text diff" - if opts.text: - binary = False - else: - binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None) - - if losedatafn and not opts.git: - if (binary or - # copy/rename - f2 in copy or - # empty file creation - (not f1 and isempty(fctx2)) or - # empty file deletion - (isempty(fctx1) and not f2) or - # create with flags - (not f1 and flag2) or - # change flags - (f1 and f2 and flag1 != flag2)): - losedatafn(f2 or f1) - - path1 = f1 or f2 - path2 = f2 or f1 - path1 = posixpath.join(prefix, path1[len(relroot):]) - path2 = posixpath.join(prefix, path2[len(relroot):]) - header = [] - if opts.git: - header.append('diff --git %s%s %s%s' % - (aprefix, path1, bprefix, path2)) - if not f1: # added - header.append('new file mode %s' % gitmode[flag2]) - elif not f2: # removed - header.append('deleted file mode %s' % gitmode[flag1]) - else: # modified/copied/renamed - mode1, mode2 = gitmode[flag1], gitmode[flag2] - if mode1 != mode2: - header.append('old mode %s' % mode1) - header.append('new mode %s' % mode2) - if copyop is not None: - if opts.showsimilarity: - sim = similar.score(ctx1[path1], ctx2[path2]) * 100 - header.append('similarity index %d%%' % sim) - header.append('%s from %s' % (copyop, path1)) - header.append('%s to %s' % (copyop, path2)) - elif revs and not repo.ui.quiet: - header.append(diffline(path1, revs)) - - # fctx.is | diffopts | what to | is fctx.data() - # binary() | text nobinary git index | output? | outputted? - # ------------------------------------|---------------------------- - # yes | no no no * | summary | no - # yes | no no yes * | base85 | yes - # yes | no yes no * | summary | no - # yes | no yes yes 0 | summary | no - # yes | no yes yes >0 | summary | semi [1] - # yes | yes * * * | text diff | yes - # no | * * * * | text diff | yes - # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked - if binary and (not opts.git or (opts.git and opts.nobinary and not - opts.index)): - # fast path: no binary content will be displayed, content1 and - # content2 are only used for equivalent test. cmp() could have a - # fast path. - if fctx1 is not None: - content1 = b'\0' - if fctx2 is not None: - if fctx1 is not None and not fctx1.cmp(fctx2): - content2 = b'\0' # not different - else: - content2 = b'\0\0' - else: - # normal path: load contents - if fctx1 is not None: - content1 = fctx1.data() - if fctx2 is not None: - content2 = fctx2.data() - - if binary and opts.git and not opts.nobinary: - text = mdiff.b85diff(content1, content2) - if text: - header.append('index %s..%s' % - (gitindex(content1), gitindex(content2))) - hunks = (None, [text]), - else: - if opts.git and opts.index > 0: - flag = flag1 - if flag is None: - flag = flag2 - header.append('index %s..%s %s' % - (gitindex(content1)[0:opts.index], - gitindex(content2)[0:opts.index], - gitmode[flag])) - - uheaders, hunks = mdiff.unidiff(content1, date1, - content2, date2, - path1, path2, - binary=binary, opts=opts) - header.extend(uheaders) - yield fctx1, fctx2, header, hunks - -def diffstatsum(stats): - maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False - for f, a, r, b in stats: - maxfile = max(maxfile, encoding.colwidth(f)) - maxtotal = max(maxtotal, a + r) - addtotal += a - removetotal += r - binary = binary or b - - return maxfile, maxtotal, addtotal, removetotal, binary - -def diffstatdata(lines): - diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$') - - results = [] - filename, adds, removes, isbinary = None, 0, 0, False - - def addresult(): - if filename: - results.append((filename, adds, removes, isbinary)) - - # inheader is used to track if a line is in the - # header portion of the diff. This helps properly account - # for lines that start with '--' or '++' - inheader = False - - for line in lines: - if line.startswith('diff'): - addresult() - # starting a new file diff - # set numbers to 0 and reset inheader - inheader = True - adds, removes, isbinary = 0, 0, False - if line.startswith('diff --git a/'): - filename = gitre.search(line).group(2) - elif line.startswith('diff -r'): - # format: "diff -r ... -r ... filename" - filename = diffre.search(line).group(1) - elif line.startswith('@@'): - inheader = False - elif line.startswith('+') and not inheader: - adds += 1 - elif line.startswith('-') and not inheader: - removes += 1 - elif (line.startswith('GIT binary patch') or - line.startswith('Binary file')): - isbinary = True - addresult() - return results - -def diffstat(lines, width=80): - output = [] - stats = diffstatdata(lines) - maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats) - - countwidth = len(str(maxtotal)) - if hasbinary and countwidth < 3: - countwidth = 3 - graphwidth = width - countwidth - maxname - 6 - if graphwidth < 10: - graphwidth = 10 - - def scale(i): - if maxtotal <= graphwidth: - return i - # If diffstat runs out of room it doesn't print anything, - # which isn't very useful, so always print at least one + or - - # if there were at least some changes. - return max(i * graphwidth // maxtotal, int(bool(i))) - - for filename, adds, removes, isbinary in stats: - if isbinary: - count = 'Bin' - else: - count = '%d' % (adds + removes) - pluses = '+' * scale(adds) - minuses = '-' * scale(removes) - output.append(' %s%s | %*s %s%s\n' % - (filename, ' ' * (maxname - encoding.colwidth(filename)), - countwidth, count, pluses, minuses)) - - if stats: - output.append(_(' %d files changed, %d insertions(+), ' - '%d deletions(-)\n') - % (len(stats), totaladds, totalremoves)) - - return ''.join(output) - -def diffstatui(*args, **kw): - '''like diffstat(), but yields 2-tuples of (output, label) for - ui.write() - ''' - - for line in diffstat(*args, **kw).splitlines(): - if line and line[-1] in '+-': - name, graph = line.rsplit(' ', 1) - yield (name + ' ', '') - m = re.search(br'\++', graph) - if m: - yield (m.group(0), 'diffstat.inserted') - m = re.search(br'-+', graph) - if m: - yield (m.group(0), 'diffstat.deleted') - else: - yield (line, '') - yield ('\n', '') diff --git a/mercurial/dispatch.py b/mercurial/dispatch.py --- a/mercurial/dispatch.py +++ b/mercurial/dispatch.py @@ -83,20 +83,23 @@ class request(object): def run(): "run the command in sys.argv" - _initstdio() + initstdio() req = request(pycompat.sysargv[1:]) err = None try: - status = (dispatch(req) or 0) + status = dispatch(req) except error.StdioError as e: err = e status = -1 + + # In all cases we try to flush stdio streams. if util.safehasattr(req.ui, 'fout'): try: req.ui.fout.flush() except IOError as e: err = e status = -1 + if util.safehasattr(req.ui, 'ferr'): try: if err is not None and err.errno != errno.EPIPE: @@ -112,7 +115,7 @@ def run(): sys.exit(status & 255) if pycompat.ispy3: - def _initstdio(): + def initstdio(): pass def _silencestdio(): @@ -132,7 +135,7 @@ if pycompat.ispy3: except IOError: pass else: - def _initstdio(): + def initstdio(): for fp in (sys.stdin, sys.stdout, sys.stderr): procutil.setbinary(fp) @@ -172,7 +175,7 @@ def _formatargs(args): return ' '.join(procutil.shellquote(a) for a in args) def dispatch(req): - "run the command specified in req.args" + """run the command specified in req.args; returns an integer status code""" if req.ferr: ferr = req.ferr elif req.ui: @@ -205,9 +208,9 @@ def dispatch(req): msg = _formatargs(req.args) starttime = util.timer() - ret = None + ret = 1 # default of Python exit code on unhandled exception try: - ret = _runcatch(req) + ret = _runcatch(req) or 0 except error.ProgrammingError as inst: req.ui.warn(_('** ProgrammingError: %s\n') % inst) if inst.hint: @@ -236,7 +239,7 @@ def dispatch(req): req.ui.log('uiblocked', 'ui blocked ms', **pycompat.strkwargs(req.ui._blockedtimes)) req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n", - msg, ret or 0, duration) + msg, ret & 255, duration) try: req._runexithandlers() except: # exiting, so no re-raises @@ -285,8 +288,8 @@ def _runcatch(req): req.args[2] != 'serve' or req.args[3] != '--stdio'): raise error.Abort( - _('potentially unsafe serve --stdio invocation: %r') % - (req.args,)) + _('potentially unsafe serve --stdio invocation: %s') % + (stringutil.pprint(req.args),)) try: debugger = 'pdb' @@ -808,6 +811,13 @@ def _dispatch(req): if req.repo: uis.add(req.repo.ui) + if (req.earlyoptions['verbose'] or req.earlyoptions['debug'] + or req.earlyoptions['quiet']): + for opt in ('verbose', 'debug', 'quiet'): + val = pycompat.bytestr(bool(req.earlyoptions[opt])) + for ui_ in uis: + ui_.setconfig('ui', opt, val, '--' + opt) + if req.earlyoptions['profile']: for ui_ in uis: ui_.setconfig('profiling', 'enabled', 'true', '--profile') @@ -873,8 +883,11 @@ def _dispatch(req): if options["profile"]: profiler.start() + # if abbreviated version of this were used, take them in account, now if options['verbose'] or options['debug'] or options['quiet']: for opt in ('verbose', 'debug', 'quiet'): + if options[opt] == req.earlyoptions[opt]: + continue val = pycompat.bytestr(bool(options[opt])) for ui_ in uis: ui_.setconfig('ui', opt, val, '--' + opt) @@ -1025,7 +1038,7 @@ def _exceptionwarning(ui): '** which supports versions %s of Mercurial.\n' '** Please disable %s and try your action again.\n' '** If that fixes the bug please report it to %s\n') - % (name, testedwith, name, report)) + % (name, testedwith, name, stringutil.forcebytestr(report))) else: bugtracker = ui.config('ui', 'supportcontact') if bugtracker is None: diff --git a/mercurial/encoding.py b/mercurial/encoding.py --- a/mercurial/encoding.py +++ b/mercurial/encoding.py @@ -98,6 +98,16 @@ class localstr(bytes): def __hash__(self): return hash(self._utf8) # avoid collisions in local string space +class safelocalstr(bytes): + """Tagged string denoting it was previously an internal UTF-8 string, + and can be converted back to UTF-8 losslessly + + >>> assert safelocalstr(b'\\xc3') == b'\\xc3' + >>> assert b'\\xc3' == safelocalstr(b'\\xc3') + >>> assert b'\\xc3' in {safelocalstr(b'\\xc3'): 0} + >>> assert safelocalstr(b'\\xc3') in {b'\\xc3': 0} + """ + def tolocal(s): """ Convert a string from internal UTF-8 to local encoding @@ -145,7 +155,7 @@ def tolocal(s): r = u.encode(_sysstr(encoding), u"replace") if u == r.decode(_sysstr(encoding)): # r is a safe, non-lossy encoding of s - return r + return safelocalstr(r) return localstr(s, r) except UnicodeDecodeError: # we should only get here if we're looking at an ancient changeset @@ -154,7 +164,7 @@ def tolocal(s): r = u.encode(_sysstr(encoding), u"replace") if u == r.decode(_sysstr(encoding)): # r is a safe, non-lossy encoding of s - return r + return safelocalstr(r) return localstr(u.encode('UTF-8'), r) except UnicodeDecodeError: u = s.decode("utf-8", "replace") # last ditch @@ -407,7 +417,7 @@ def jsonescape(s, paranoid=False): JSON is problematic for us because it doesn't support non-Unicode bytes. To deal with this, we take the following approach: - - localstr objects are converted back to UTF-8 + - localstr/safelocalstr objects are converted back to UTF-8 - valid UTF-8/ASCII strings are passed as-is - other strings are converted to UTF-8b surrogate encoding - apply JSON-specified string escaping @@ -500,6 +510,7 @@ def toutf8b(s): - local strings that have a cached known UTF-8 encoding (aka localstr) get sent as UTF-8 so Unicode-oriented clients get the Unicode data they want + - non-lossy local strings (aka safelocalstr) get sent as UTF-8 as well - because we must preserve UTF-8 bytestring in places such as filenames, metadata can't be roundtripped without help @@ -509,11 +520,17 @@ def toutf8b(s): internal surrogate encoding as a UTF-8 string.) ''' - if not isinstance(s, localstr) and isasciistr(s): + if isinstance(s, localstr): + # assume that the original UTF-8 sequence would never contain + # invalid characters in U+DCxx range + return s._utf8 + elif isinstance(s, safelocalstr): + # already verified that s is non-lossy in legacy encoding, which + # shouldn't contain characters in U+DCxx range + return fromlocal(s) + elif isasciistr(s): return s if "\xed" not in s: - if isinstance(s, localstr): - return s._utf8 try: s.decode('utf-8', _utf8strict) return s diff --git a/mercurial/error.py b/mercurial/error.py --- a/mercurial/error.py +++ b/mercurial/error.py @@ -241,7 +241,7 @@ class BundleUnknownFeatureError(BundleVa if val is None: entries.append(val) else: - entries.append("%s=%r" % (par, val)) + entries.append("%s=%r" % (par, pycompat.maybebytestr(val))) if entries: msg = '%s - %s' % (msg, ', '.join(entries)) ValueError.__init__(self, msg) diff --git a/mercurial/exchange.py b/mercurial/exchange.py --- a/mercurial/exchange.py +++ b/mercurial/exchange.py @@ -531,6 +531,9 @@ def push(repo, remote, force=False, revs _pushobsolete(pushop) _pushbookmark(pushop) + if repo.ui.configbool('experimental', 'remotenames'): + logexchange.pullremotenames(repo, remote) + return pushop # list of steps to perform discovery before push @@ -658,7 +661,7 @@ def _pushdiscoverybookmarks(pushop): ui.debug("checking for updated bookmarks\n") ancestors = () if pushop.revs: - revnums = map(repo.changelog.rev, pushop.revs) + revnums = pycompat.maplist(repo.changelog.rev, pushop.revs) ancestors = repo.changelog.ancestors(revnums, inclusive=True) remotebookmark = listkeys(remote, 'bookmarks') diff --git a/mercurial/extensions.py b/mercurial/extensions.py --- a/mercurial/extensions.py +++ b/mercurial/extensions.py @@ -7,6 +7,8 @@ from __future__ import absolute_import +import ast +import collections import functools import imp import inspect @@ -121,10 +123,11 @@ def _importext(name, path=None, reportfu def _reportimporterror(ui, err, failed, next): # note: this ui.debug happens before --debug is processed, # Use --config ui.debug=1 to see them. - ui.debug('could not import %s (%s): trying %s\n' - % (failed, stringutil.forcebytestr(err), next)) - if ui.debugflag: - ui.traceback() + if ui.configbool('devel', 'debug.extensions'): + ui.debug('could not import %s (%s): trying %s\n' + % (failed, stringutil.forcebytestr(err), next)) + if ui.debugflag: + ui.traceback() def _rejectunicode(name, xs): if isinstance(xs, (list, set, tuple)): @@ -145,9 +148,6 @@ def _validatecmdtable(ui, cmdtable): """Check if extension commands have required attributes""" for c, e in cmdtable.iteritems(): f = e[0] - if getattr(f, '_deprecatedregistrar', False): - ui.deprecwarn("cmdutil.command is deprecated, use " - "registrar.command to register '%s'" % c, '4.6') missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)] if not missing: continue @@ -541,9 +541,8 @@ def getwrapperchain(container, funcname) fn = getattr(fn, '_origfunc', None) return result -def _disabledpaths(strip_init=False): - '''find paths of disabled extensions. returns a dict of {name: path} - removes /__init__.py from packages if strip_init is True''' +def _disabledpaths(): + '''find paths of disabled extensions. returns a dict of {name: path}''' import hgext extpath = os.path.dirname( os.path.abspath(pycompat.fsencode(hgext.__file__))) @@ -562,8 +561,6 @@ def _disabledpaths(strip_init=False): path = os.path.join(extpath, e, '__init__.py') if not os.path.exists(path): continue - if strip_init: - path = os.path.dirname(path) if name in exts or name in _order or name == '__init__': continue exts[name] = path @@ -609,12 +606,10 @@ def _moduledoc(file): def _disabledhelp(path): '''retrieve help synopsis of a disabled extension (without importing)''' try: - file = open(path) + with open(path, 'rb') as src: + doc = _moduledoc(src) except IOError: return - else: - doc = _moduledoc(file) - file.close() if doc: # extracting localized synopsis return gettext(doc) @@ -658,48 +653,82 @@ def disabledext(name): if name in paths: return _disabledhelp(paths[name]) +def _walkcommand(node): + """Scan @command() decorators in the tree starting at node""" + todo = collections.deque([node]) + while todo: + node = todo.popleft() + if not isinstance(node, ast.FunctionDef): + todo.extend(ast.iter_child_nodes(node)) + continue + for d in node.decorator_list: + if not isinstance(d, ast.Call): + continue + if not isinstance(d.func, ast.Name): + continue + if d.func.id != r'command': + continue + yield d + +def _disabledcmdtable(path): + """Construct a dummy command table without loading the extension module + + This may raise IOError or SyntaxError. + """ + with open(path, 'rb') as src: + root = ast.parse(src.read(), path) + cmdtable = {} + for node in _walkcommand(root): + if not node.args: + continue + a = node.args[0] + if isinstance(a, ast.Str): + name = pycompat.sysbytes(a.s) + elif pycompat.ispy3 and isinstance(a, ast.Bytes): + name = a.s + else: + continue + cmdtable[name] = (None, [], b'') + return cmdtable + +def _finddisabledcmd(ui, cmd, name, path, strict): + try: + cmdtable = _disabledcmdtable(path) + except (IOError, SyntaxError): + return + try: + aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict) + except (error.AmbiguousCommand, error.UnknownCommand): + return + for c in aliases: + if c.startswith(cmd): + cmd = c + break + else: + cmd = aliases[0] + doc = _disabledhelp(path) + return (cmd, name, doc) + def disabledcmd(ui, cmd, strict=False): - '''import disabled extensions until cmd is found. - returns (cmdname, extname, module)''' + '''find cmd from disabled extensions without importing. + returns (cmdname, extname, doc)''' - paths = _disabledpaths(strip_init=True) + paths = _disabledpaths() if not paths: raise error.UnknownCommand(cmd) - def findcmd(cmd, name, path): - try: - mod = loadpath(path, 'hgext.%s' % name) - except Exception: - return - try: - aliases, entry = cmdutil.findcmd(cmd, - getattr(mod, 'cmdtable', {}), strict) - except (error.AmbiguousCommand, error.UnknownCommand): - return - except Exception: - ui.warn(_('warning: error finding commands in %s\n') % path) - ui.traceback() - return - for c in aliases: - if c.startswith(cmd): - cmd = c - break - else: - cmd = aliases[0] - return (cmd, name, mod) - ext = None # first, search for an extension with the same name as the command path = paths.pop(cmd, None) if path: - ext = findcmd(cmd, cmd, path) + ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict) if not ext: # otherwise, interrogate each extension until there's a match for name, path in paths.iteritems(): - ext = findcmd(cmd, name, path) + ext = _finddisabledcmd(ui, cmd, name, path, strict=strict) if ext: break - if ext and 'DEPRECATED' not in ext.__doc__: + if ext: return ext raise error.UnknownCommand(cmd) @@ -729,7 +758,7 @@ def moduleversion(module): else: version = '' if isinstance(version, (list, tuple)): - version = '.'.join(str(o) for o in version) + version = '.'.join(pycompat.bytestr(o) for o in version) return version def ismoduleinternal(module): diff --git a/mercurial/filelog.py b/mercurial/filelog.py --- a/mercurial/filelog.py +++ b/mercurial/filelog.py @@ -215,12 +215,12 @@ class filelog(object): self._revlog._lazydeltabase = value @property - def _aggressivemergedeltas(self): - return self._revlog._aggressivemergedeltas + def _deltabothparents(self): + return self._revlog._deltabothparents - @_aggressivemergedeltas.setter - def _aggressivemergedeltas(self, value): - self._revlog._aggressivemergedeltas = value + @_deltabothparents.setter + def _deltabothparents(self, value): + self._revlog._deltabothparents = value @property def _inline(self): diff --git a/mercurial/filemerge.py b/mercurial/filemerge.py --- a/mercurial/filemerge.py +++ b/mercurial/filemerge.py @@ -11,7 +11,6 @@ import contextlib import os import re import shutil -import tempfile from .i18n import _ from .node import nullid, short @@ -114,8 +113,16 @@ class absentfilectx(object): def _findtool(ui, tool): if tool in internals: return tool + cmd = _toolstr(ui, tool, "executable", tool) + if cmd.startswith('python:'): + return cmd return findexternaltool(ui, tool) +def _quotetoolpath(cmd): + if cmd.startswith('python:'): + return cmd + return procutil.shellquote(cmd) + def findexternaltool(ui, tool): for kn in ("regkey", "regkeyalt"): k = _toolstr(ui, tool, kn) @@ -165,7 +172,7 @@ def _picktool(repo, ui, path, binary, sy return ":prompt", None else: if toolpath: - return (force, procutil.shellquote(toolpath)) + return (force, _quotetoolpath(toolpath)) else: # mimic HGMERGE if given tool not found return (force, force) @@ -183,7 +190,7 @@ def _picktool(repo, ui, path, binary, sy mf = match.match(repo.root, '', [pat]) if mf(path) and check(tool, pat, symlink, False, changedelete): toolpath = _findtool(ui, tool) - return (tool, procutil.shellquote(toolpath)) + return (tool, _quotetoolpath(toolpath)) # then merge tools tools = {} @@ -208,7 +215,7 @@ def _picktool(repo, ui, path, binary, sy for p, t in tools: if check(t, None, symlink, binary, changedelete): toolpath = _findtool(ui, t) - return (t, procutil.shellquote(toolpath)) + return (t, _quotetoolpath(toolpath)) # internal merge or prompt as last resort if symlink or binary or changedelete: @@ -325,7 +332,7 @@ def _underlyingfctxifabsent(filectx): return filectx def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None): - tool, toolpath, binary, symlink = toolconf + tool, toolpath, binary, symlink, scriptfn = toolconf if symlink or fcd.isabsent() or fco.isabsent(): return 1 unused, unused, unused, back = files @@ -361,7 +368,7 @@ def _premerge(repo, fcd, fco, fca, toolc return 1 # continue merging def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf): - tool, toolpath, binary, symlink = toolconf + tool, toolpath, binary, symlink, scriptfn = toolconf if symlink: repo.ui.warn(_('warning: internal %s cannot merge symlinks ' 'for %s\n') % (tool, fcd.path())) @@ -430,7 +437,7 @@ def _imergeauto(repo, mynode, orig, fcd, Generic driver for _imergelocal and _imergeother """ assert localorother is not None - tool, toolpath, binary, symlink = toolconf + tool, toolpath, binary, symlink, scriptfn = toolconf r = simplemerge.simplemerge(repo.ui, fcd, fca, fco, label=labels, localorother=localorother) return True, r @@ -510,7 +517,7 @@ def _xmergeimm(repo, mynode, orig, fcd, 'external merge tools') def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): - tool, toolpath, binary, symlink = toolconf + tool, toolpath, binary, symlink, scriptfn = toolconf if fcd.isabsent() or fco.isabsent(): repo.ui.warn(_('warning: %s cannot merge change/delete conflict ' 'for %s\n') % (tool, fcd.path())) @@ -551,12 +558,36 @@ def _xmerge(repo, mynode, orig, fcd, fco args = util.interpolate( br'\$', replace, args, lambda s: procutil.shellquote(util.localpath(s))) - cmd = toolpath + ' ' + args if _toolbool(ui, tool, "gui"): repo.ui.status(_('running merge tool %s for file %s\n') % (tool, fcd.path())) - repo.ui.debug('launching merge tool: %s\n' % cmd) - r = ui.system(cmd, cwd=repo.root, environ=env, blockedtag='mergetool') + if scriptfn is None: + cmd = toolpath + ' ' + args + repo.ui.debug('launching merge tool: %s\n' % cmd) + r = ui.system(cmd, cwd=repo.root, environ=env, + blockedtag='mergetool') + else: + repo.ui.debug('launching python merge script: %s:%s\n' % + (toolpath, scriptfn)) + r = 0 + try: + # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil + from . import extensions + mod = extensions.loadpath(toolpath, 'hgmerge.%s' % tool) + except Exception: + raise error.Abort(_("loading python merge script failed: %s") % + toolpath) + mergefn = getattr(mod, scriptfn, None) + if mergefn is None: + raise error.Abort(_("%s does not have function: %s") % + (toolpath, scriptfn)) + argslist = procutil.shellsplit(args) + # avoid cycle cmdutil->merge->filemerge->hook->extensions->cmdutil + from . import hook + ret, raised = hook.pythonhook(ui, repo, "merge", toolpath, + mergefn, {'args': argslist}, True) + if raised: + r = 1 repo.ui.debug('merge tool returned: %d\n' % r) return True, r, False @@ -681,7 +712,7 @@ def _maketempfiles(repo, fco, fca, local tmproot = None tmprootprefix = repo.ui.config('experimental', 'mergetempdirprefix') if tmprootprefix: - tmproot = tempfile.mkdtemp(prefix=tmprootprefix) + tmproot = pycompat.mkdtemp(prefix=tmprootprefix) def maketempfrompath(prefix, path): fullbase, ext = os.path.splitext(path) @@ -692,7 +723,7 @@ def _maketempfiles(repo, fco, fca, local name += ext f = open(name, r"wb") else: - fd, name = tempfile.mkstemp(prefix=pre + '.', suffix=ext) + fd, name = pycompat.mkstemp(prefix=pre + '.', suffix=ext) f = os.fdopen(fd, r"wb") return f, name @@ -751,9 +782,24 @@ def _filemerge(premerge, repo, wctx, myn symlink = 'l' in fcd.flags() + fco.flags() changedelete = fcd.isabsent() or fco.isabsent() tool, toolpath = _picktool(repo, ui, fd, binary, symlink, changedelete) + scriptfn = None if tool in internals and tool.startswith('internal:'): # normalize to new-style names (':merge' etc) tool = tool[len('internal'):] + if toolpath and toolpath.startswith('python:'): + invalidsyntax = False + if toolpath.count(':') >= 2: + script, scriptfn = toolpath[7:].rsplit(':', 1) + if not scriptfn: + invalidsyntax = True + # missing :callable can lead to spliting on windows drive letter + if '\\' in scriptfn or '/' in scriptfn: + invalidsyntax = True + else: + invalidsyntax = True + if invalidsyntax: + raise error.Abort(_("invalid 'python:' syntax: %s") % toolpath) + toolpath = script ui.debug("picked tool '%s' for %s (binary %s symlink %s changedelete %s)\n" % (tool, fd, pycompat.bytestr(binary), pycompat.bytestr(symlink), pycompat.bytestr(changedelete))) @@ -774,7 +820,7 @@ def _filemerge(premerge, repo, wctx, myn precheck = None isexternal = True - toolconf = tool, toolpath, binary, symlink + toolconf = tool, toolpath, binary, symlink, scriptfn if mergetype == nomerge: r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels) diff --git a/mercurial/fileset.py b/mercurial/fileset.py --- a/mercurial/fileset.py +++ b/mercurial/fileset.py @@ -7,6 +7,7 @@ from __future__ import absolute_import +import errno import re from .i18n import _ @@ -126,148 +127,6 @@ def getpattern(x, allkinds, err): return _getkindpat(x[1], x[2], allkinds, err) return getstring(x, err) -def getset(mctx, x): - if not x: - raise error.ParseError(_("missing argument")) - return methods[x[0]](mctx, *x[1:]) - -def stringset(mctx, x): - m = mctx.matcher([x]) - return [f for f in mctx.subset if m(f)] - -def kindpatset(mctx, x, y): - return stringset(mctx, _getkindpat(x, y, matchmod.allpatternkinds, - _("pattern must be a string"))) - -def andset(mctx, x, y): - return getset(mctx.narrow(getset(mctx, x)), y) - -def orset(mctx, x, y): - # needs optimizing - xl = getset(mctx, x) - yl = getset(mctx, y) - return xl + [f for f in yl if f not in xl] - -def notset(mctx, x): - s = set(getset(mctx, x)) - return [r for r in mctx.subset if r not in s] - -def minusset(mctx, x, y): - xl = getset(mctx, x) - yl = set(getset(mctx, y)) - return [f for f in xl if f not in yl] - -def negateset(mctx, x): - raise error.ParseError(_("can't use negate operator in this context")) - -def listset(mctx, a, b): - raise error.ParseError(_("can't use a list in this context"), - hint=_('see hg help "filesets.x or y"')) - -# symbols are callable like: -# fun(mctx, x) -# with: -# mctx - current matchctx instance -# x - argument in tree form -symbols = {} - -# filesets using matchctx.status() -_statuscallers = set() - -# filesets using matchctx.existing() -_existingcallers = set() - -predicate = registrar.filesetpredicate() - -@predicate('modified()', callstatus=True) -def modified(mctx, x): - """File that is modified according to :hg:`status`. - """ - # i18n: "modified" is a keyword - getargs(x, 0, 0, _("modified takes no arguments")) - s = set(mctx.status().modified) - return [f for f in mctx.subset if f in s] - -@predicate('added()', callstatus=True) -def added(mctx, x): - """File that is added according to :hg:`status`. - """ - # i18n: "added" is a keyword - getargs(x, 0, 0, _("added takes no arguments")) - s = set(mctx.status().added) - return [f for f in mctx.subset if f in s] - -@predicate('removed()', callstatus=True) -def removed(mctx, x): - """File that is removed according to :hg:`status`. - """ - # i18n: "removed" is a keyword - getargs(x, 0, 0, _("removed takes no arguments")) - s = set(mctx.status().removed) - return [f for f in mctx.subset if f in s] - -@predicate('deleted()', callstatus=True) -def deleted(mctx, x): - """Alias for ``missing()``. - """ - # i18n: "deleted" is a keyword - getargs(x, 0, 0, _("deleted takes no arguments")) - s = set(mctx.status().deleted) - return [f for f in mctx.subset if f in s] - -@predicate('missing()', callstatus=True) -def missing(mctx, x): - """File that is missing according to :hg:`status`. - """ - # i18n: "missing" is a keyword - getargs(x, 0, 0, _("missing takes no arguments")) - s = set(mctx.status().deleted) - return [f for f in mctx.subset if f in s] - -@predicate('unknown()', callstatus=True) -def unknown(mctx, x): - """File that is unknown according to :hg:`status`. These files will only be - considered if this predicate is used. - """ - # i18n: "unknown" is a keyword - getargs(x, 0, 0, _("unknown takes no arguments")) - s = set(mctx.status().unknown) - return [f for f in mctx.subset if f in s] - -@predicate('ignored()', callstatus=True) -def ignored(mctx, x): - """File that is ignored according to :hg:`status`. These files will only be - considered if this predicate is used. - """ - # i18n: "ignored" is a keyword - getargs(x, 0, 0, _("ignored takes no arguments")) - s = set(mctx.status().ignored) - return [f for f in mctx.subset if f in s] - -@predicate('clean()', callstatus=True) -def clean(mctx, x): - """File that is clean according to :hg:`status`. - """ - # i18n: "clean" is a keyword - getargs(x, 0, 0, _("clean takes no arguments")) - s = set(mctx.status().clean) - return [f for f in mctx.subset if f in s] - -def func(mctx, a, b): - funcname = getsymbol(a) - if funcname in symbols: - enabled = mctx._existingenabled - mctx._existingenabled = funcname in _existingcallers - try: - return symbols[funcname](mctx, b) - finally: - mctx._existingenabled = enabled - - keep = lambda fn: getattr(fn, '__doc__', None) is not None - - syms = [s for (s, fn) in symbols.items() if keep(fn)] - raise error.UnknownIdentifier(funcname, syms) - def getlist(x): if not x: return [] @@ -281,29 +140,169 @@ def getargs(x, min, max, err): raise error.ParseError(err) return l -@predicate('binary()', callexisting=True) +def getmatch(mctx, x): + if not x: + raise error.ParseError(_("missing argument")) + return methods[x[0]](mctx, *x[1:]) + +def stringmatch(mctx, x): + return mctx.matcher([x]) + +def kindpatmatch(mctx, x, y): + return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds, + _("pattern must be a string"))) + +def andmatch(mctx, x, y): + xm = getmatch(mctx, x) + ym = getmatch(mctx, y) + return matchmod.intersectmatchers(xm, ym) + +def ormatch(mctx, x, y): + xm = getmatch(mctx, x) + ym = getmatch(mctx, y) + return matchmod.unionmatcher([xm, ym]) + +def notmatch(mctx, x): + m = getmatch(mctx, x) + return mctx.predicate(lambda f: not m(f), predrepr=('', m)) + +def minusmatch(mctx, x, y): + xm = getmatch(mctx, x) + ym = getmatch(mctx, y) + return matchmod.differencematcher(xm, ym) + +def negatematch(mctx, x): + raise error.ParseError(_("can't use negate operator in this context")) + +def listmatch(mctx, x, y): + raise error.ParseError(_("can't use a list in this context"), + hint=_('see hg help "filesets.x or y"')) + +def func(mctx, a, b): + funcname = getsymbol(a) + if funcname in symbols: + return symbols[funcname](mctx, b) + + keep = lambda fn: getattr(fn, '__doc__', None) is not None + + syms = [s for (s, fn) in symbols.items() if keep(fn)] + raise error.UnknownIdentifier(funcname, syms) + +# symbols are callable like: +# fun(mctx, x) +# with: +# mctx - current matchctx instance +# x - argument in tree form +symbols = {} + +# filesets using matchctx.status() +_statuscallers = set() + +predicate = registrar.filesetpredicate() + +@predicate('modified()', callstatus=True) +def modified(mctx, x): + """File that is modified according to :hg:`status`. + """ + # i18n: "modified" is a keyword + getargs(x, 0, 0, _("modified takes no arguments")) + s = set(mctx.status().modified) + return mctx.predicate(s.__contains__, predrepr='modified') + +@predicate('added()', callstatus=True) +def added(mctx, x): + """File that is added according to :hg:`status`. + """ + # i18n: "added" is a keyword + getargs(x, 0, 0, _("added takes no arguments")) + s = set(mctx.status().added) + return mctx.predicate(s.__contains__, predrepr='added') + +@predicate('removed()', callstatus=True) +def removed(mctx, x): + """File that is removed according to :hg:`status`. + """ + # i18n: "removed" is a keyword + getargs(x, 0, 0, _("removed takes no arguments")) + s = set(mctx.status().removed) + return mctx.predicate(s.__contains__, predrepr='removed') + +@predicate('deleted()', callstatus=True) +def deleted(mctx, x): + """Alias for ``missing()``. + """ + # i18n: "deleted" is a keyword + getargs(x, 0, 0, _("deleted takes no arguments")) + s = set(mctx.status().deleted) + return mctx.predicate(s.__contains__, predrepr='deleted') + +@predicate('missing()', callstatus=True) +def missing(mctx, x): + """File that is missing according to :hg:`status`. + """ + # i18n: "missing" is a keyword + getargs(x, 0, 0, _("missing takes no arguments")) + s = set(mctx.status().deleted) + return mctx.predicate(s.__contains__, predrepr='deleted') + +@predicate('unknown()', callstatus=True) +def unknown(mctx, x): + """File that is unknown according to :hg:`status`.""" + # i18n: "unknown" is a keyword + getargs(x, 0, 0, _("unknown takes no arguments")) + s = set(mctx.status().unknown) + return mctx.predicate(s.__contains__, predrepr='unknown') + +@predicate('ignored()', callstatus=True) +def ignored(mctx, x): + """File that is ignored according to :hg:`status`.""" + # i18n: "ignored" is a keyword + getargs(x, 0, 0, _("ignored takes no arguments")) + s = set(mctx.status().ignored) + return mctx.predicate(s.__contains__, predrepr='ignored') + +@predicate('clean()', callstatus=True) +def clean(mctx, x): + """File that is clean according to :hg:`status`. + """ + # i18n: "clean" is a keyword + getargs(x, 0, 0, _("clean takes no arguments")) + s = set(mctx.status().clean) + return mctx.predicate(s.__contains__, predrepr='clean') + +@predicate('tracked()') +def tracked(mctx, x): + """File that is under Mercurial control.""" + # i18n: "tracked" is a keyword + getargs(x, 0, 0, _("tracked takes no arguments")) + return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked') + +@predicate('binary()') def binary(mctx, x): """File that appears to be binary (contains NUL bytes). """ # i18n: "binary" is a keyword getargs(x, 0, 0, _("binary takes no arguments")) - return [f for f in mctx.existing() if mctx.ctx[f].isbinary()] + return mctx.fpredicate(lambda fctx: fctx.isbinary(), + predrepr='binary', cache=True) -@predicate('exec()', callexisting=True) +@predicate('exec()') def exec_(mctx, x): """File that is marked as executable. """ # i18n: "exec" is a keyword getargs(x, 0, 0, _("exec takes no arguments")) - return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'x'] + ctx = mctx.ctx + return mctx.predicate(lambda f: ctx.flags(f) == 'x', predrepr='exec') -@predicate('symlink()', callexisting=True) +@predicate('symlink()') def symlink(mctx, x): """File that is marked as a symlink. """ # i18n: "symlink" is a keyword getargs(x, 0, 0, _("symlink takes no arguments")) - return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'l'] + ctx = mctx.ctx + return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink') @predicate('resolved()') def resolved(mctx, x): @@ -312,9 +311,10 @@ def resolved(mctx, x): # i18n: "resolved" is a keyword getargs(x, 0, 0, _("resolved takes no arguments")) if mctx.ctx.rev() is not None: - return [] + return mctx.never() ms = merge.mergestate.read(mctx.ctx.repo()) - return [f for f in mctx.subset if f in ms and ms[f] == 'r'] + return mctx.predicate(lambda f: f in ms and ms[f] == 'r', + predrepr='resolved') @predicate('unresolved()') def unresolved(mctx, x): @@ -323,9 +323,10 @@ def unresolved(mctx, x): # i18n: "unresolved" is a keyword getargs(x, 0, 0, _("unresolved takes no arguments")) if mctx.ctx.rev() is not None: - return [] + return mctx.never() ms = merge.mergestate.read(mctx.ctx.repo()) - return [f for f in mctx.subset if f in ms and ms[f] == 'u'] + return mctx.predicate(lambda f: f in ms and ms[f] == 'u', + predrepr='unresolved') @predicate('hgignore()') def hgignore(mctx, x): @@ -333,8 +334,7 @@ def hgignore(mctx, x): """ # i18n: "hgignore" is a keyword getargs(x, 0, 0, _("hgignore takes no arguments")) - ignore = mctx.ctx.repo().dirstate._ignore - return [f for f in mctx.subset if ignore(f)] + return mctx.ctx.repo().dirstate._ignore @predicate('portable()') def portable(mctx, x): @@ -343,10 +343,10 @@ def portable(mctx, x): """ # i18n: "portable" is a keyword getargs(x, 0, 0, _("portable takes no arguments")) - checkwinfilename = util.checkwinfilename - return [f for f in mctx.subset if checkwinfilename(f) is None] + return mctx.predicate(lambda f: util.checkwinfilename(f) is None, + predrepr='portable') -@predicate('grep(regex)', callexisting=True) +@predicate('grep(regex)') def grep(mctx, x): """File contains the given regular expression. """ @@ -354,8 +354,10 @@ def grep(mctx, x): # i18n: "grep" is a keyword r = re.compile(getstring(x, _("grep requires a pattern"))) except re.error as e: - raise error.ParseError(_('invalid match pattern: %s') % e) - return [f for f in mctx.existing() if r.search(mctx.ctx[f].data())] + raise error.ParseError(_('invalid match pattern: %s') % + stringutil.forcebytestr(e)) + return mctx.fpredicate(lambda fctx: r.search(fctx.data()), + predrepr=('grep(%r)', r.pattern), cache=True) def _sizetomax(s): try: @@ -373,11 +375,9 @@ def _sizetomax(s): except ValueError: raise error.ParseError(_("couldn't parse size: %s") % s) -def sizematcher(x): +def sizematcher(expr): """Return a function(size) -> bool from the ``size()`` expression""" - - # i18n: "size" is a keyword - expr = getstring(x, _("size requires an expression")).strip() + expr = expr.strip() if '-' in expr: # do we have a range? a, b = expr.split('-', 1) a = util.sizetoint(a) @@ -400,7 +400,7 @@ def sizematcher(x): b = _sizetomax(expr) return lambda x: x >= a and x <= b -@predicate('size(expression)', callexisting=True) +@predicate('size(expression)') def size(mctx, x): """File size matches the given expression. Examples: @@ -409,10 +409,13 @@ def size(mctx, x): - size('>= .5MB') - files at least 524288 bytes - size('4k - 1MB') - files from 4096 bytes to 1048576 bytes """ - m = sizematcher(x) - return [f for f in mctx.existing() if m(mctx.ctx[f].size())] + # i18n: "size" is a keyword + expr = getstring(x, _("size requires an expression")) + m = sizematcher(expr) + return mctx.fpredicate(lambda fctx: m(fctx.size()), + predrepr=('size(%r)', expr), cache=True) -@predicate('encoding(name)', callexisting=True) +@predicate('encoding(name)') def encoding(mctx, x): """File can be successfully decoded with the given character encoding. May not be useful for encodings other than ASCII and @@ -422,20 +425,19 @@ def encoding(mctx, x): # i18n: "encoding" is a keyword enc = getstring(x, _("encoding requires an encoding name")) - s = [] - for f in mctx.existing(): - d = mctx.ctx[f].data() + def encp(fctx): + d = fctx.data() try: - d.decode(enc) + d.decode(pycompat.sysstr(enc)) + return True except LookupError: raise error.Abort(_("unknown encoding '%s'") % enc) except UnicodeDecodeError: - continue - s.append(f) + return False - return s + return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True) -@predicate('eol(style)', callexisting=True) +@predicate('eol(style)') def eol(mctx, x): """File contains newlines of the given style (dos, unix, mac). Binary files are excluded, files with mixed line endings match multiple @@ -445,18 +447,18 @@ def eol(mctx, x): # i18n: "eol" is a keyword enc = getstring(x, _("eol requires a style name")) - s = [] - for f in mctx.existing(): - d = mctx.ctx[f].data() - if stringutil.binary(d): - continue + def eolp(fctx): + if fctx.isbinary(): + return False + d = fctx.data() if (enc == 'dos' or enc == 'win') and '\r\n' in d: - s.append(f) + return True elif enc == 'unix' and re.search('(? revcount + 1: - break - entry = webutil.changelistentry(web, web.repo[rev]) - entry['parity'] = next(parity) + for entry in webutil.changelistentries(web, revs, revcount, parity): yield entry if shortlog: @@ -448,9 +441,9 @@ def changelog(web, shortlog=False): rev=pos, symrev=symrev, changesets=count, - entries=entries, - latestentry=latestentry, - nextentry=nextentry, + entries=templateutil.mappinglist(entries), + latestentry=templateutil.mappinglist(latestentry), + nextentry=templateutil.mappinglist(nextentry), archives=web.archivelist('tip'), revcount=revcount, morevars=morevars, @@ -563,7 +556,7 @@ def manifest(web): if mf and not files and not dirs: raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) - def filelist(**map): + def filelist(context): for f in sorted(files): full = files[f] @@ -575,7 +568,7 @@ def manifest(web): "size": fctx.size(), "permissions": mf.flags(full)} - def dirlist(**map): + def dirlist(context): for d in sorted(dirs): emptydirs = [] @@ -598,8 +591,8 @@ def manifest(web): path=abspath, up=webutil.up(abspath), upparity=next(parity), - fentries=filelist, - dentries=dirlist, + fentries=templateutil.mappinggenerator(filelist), + dentries=templateutil.mappinggenerator(dirlist), archives=web.archivelist(hex(node)), **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) @@ -618,7 +611,7 @@ def tags(web): i = list(reversed(web.repo.tagslist())) parity = paritygen(web.stripecount) - def entries(notip, latestonly, **map): + def entries(context, notip, latestonly): t = i if notip: t = [(k, n) for k, n in i if k != "tip"] @@ -633,9 +626,10 @@ def tags(web): return web.sendtemplate( 'tags', node=hex(web.repo.changelog.tip()), - entries=lambda **x: entries(False, False, **x), - entriesnotip=lambda **x: entries(True, False, **x), - latestentry=lambda **x: entries(True, True, **x)) + entries=templateutil.mappinggenerator(entries, args=(False, False)), + entriesnotip=templateutil.mappinggenerator(entries, + args=(True, False)), + latestentry=templateutil.mappinggenerator(entries, args=(True, True))) @webcommand('bookmarks') def bookmarks(web): @@ -654,7 +648,7 @@ def bookmarks(web): i = sorted(i, key=sortkey, reverse=True) parity = paritygen(web.stripecount) - def entries(latestonly, **map): + def entries(context, latestonly): t = i if latestonly: t = i[:1] @@ -668,13 +662,14 @@ def bookmarks(web): latestrev = i[0][1] else: latestrev = -1 + lastdate = web.repo[latestrev].date() return web.sendtemplate( 'bookmarks', node=hex(web.repo.changelog.tip()), - lastchange=[{'date': web.repo[latestrev].date()}], - entries=lambda **x: entries(latestonly=False, **x), - latestentry=lambda **x: entries(latestonly=True, **x)) + lastchange=templateutil.mappinglist([{'date': lastdate}]), + entries=templateutil.mappinggenerator(entries, args=(False,)), + latestentry=templateutil.mappinggenerator(entries, args=(True,))) @webcommand('branches') def branches(web): @@ -732,7 +727,7 @@ def summary(web): 'date': web.repo[n].date(), } - def bookmarks(**map): + def bookmarks(context): parity = paritygen(web.stripecount) marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) @@ -774,7 +769,7 @@ def summary(web): owner=get_contact(web.config) or 'unknown', lastchange=tip.date(), tags=templateutil.mappinggenerator(tagentries, name='tagentry'), - bookmarks=bookmarks, + bookmarks=templateutil.mappinggenerator(bookmarks), branches=webutil.branchentries(web.repo, web.stripecount, 10), shortlog=templateutil.mappinggenerator(changelist, name='shortlogentry'), @@ -819,7 +814,7 @@ def filediff(web): rename = webutil.renamelink(fctx) ctx = fctx else: - rename = [] + rename = templateutil.mappinglist([]) ctx = ctx return web.sendtemplate( @@ -887,12 +882,12 @@ def comparison(web): pfctx = ctx.parents()[0][path] leftlines = filelines(pfctx) - comparison = webutil.compare(web.tmpl, context, leftlines, rightlines) + comparison = webutil.compare(context, leftlines, rightlines) if fctx is not None: rename = webutil.renamelink(fctx) ctx = fctx else: - rename = [] + rename = templateutil.mappinglist([]) ctx = ctx return web.sendtemplate( @@ -934,7 +929,7 @@ def annotate(web): # TODO there are still redundant operations within basefilectx.parents() # and from the fctx.annotate() call itself that could be cached. parentscache = {} - def parents(f): + def parents(context, f): rev = f.rev() if rev not in parentscache: parentscache[rev] = [] @@ -948,7 +943,7 @@ def annotate(web): for p in parentscache[rev]: yield p - def annotate(**map): + def annotate(context): if fctx.isbinary(): mt = (mimetypes.guess_type(fctx.path())[0] or 'application/octet-stream') @@ -972,7 +967,7 @@ def annotate(web): "node": f.hex(), "rev": rev, "author": f.user(), - "parents": parents(f), + "parents": templateutil.mappinggenerator(parents, args=(f,)), "desc": f.description(), "extra": f.extra(), "file": f.path(), @@ -991,13 +986,13 @@ def annotate(web): return web.sendtemplate( 'fileannotate', file=f, - annotate=annotate, + annotate=templateutil.mappinggenerator(annotate), path=webutil.up(f), symrev=webutil.symrevorshortnode(web.req, fctx), rename=webutil.renamelink(fctx), permissions=fctx.manifest().flags(f), ishead=int(ishead), - diffopts=diffopts, + diffopts=templateutil.hybriddict(diffopts), **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))) @webcommand('filelog') @@ -1095,13 +1090,16 @@ def filelog(web): diffs = diff(c, linerange=lr) # follow renames accross filtered (not in range) revisions path = c.path() - entries.append(dict( - parity=next(parity), - filerev=c.rev(), - file=path, - diff=diffs, - linerange=webutil.formatlinerange(*lr), - **pycompat.strkwargs(webutil.commonentry(repo, c)))) + lm = webutil.commonentry(repo, c) + lm.update({ + 'parity': next(parity), + 'filerev': c.rev(), + 'file': path, + 'diff': diffs, + 'linerange': webutil.formatlinerange(*lr), + 'rename': templateutil.mappinglist([]), + }) + entries.append(lm) if i == revcount: break lessvars['linerange'] = webutil.formatlinerange(*lrange) @@ -1112,13 +1110,15 @@ def filelog(web): diffs = None if patch: diffs = diff(iterfctx) - entries.append(dict( - parity=next(parity), - filerev=i, - file=f, - diff=diffs, - rename=webutil.renamelink(iterfctx), - **pycompat.strkwargs(webutil.commonentry(repo, iterfctx)))) + lm = webutil.commonentry(repo, iterfctx) + lm.update({ + 'parity': next(parity), + 'filerev': i, + 'file': f, + 'diff': diffs, + 'rename': webutil.renamelink(iterfctx), + }) + entries.append(lm) entries.reverse() revnav = webutil.filerevnav(web.repo, fctx.path()) nav = revnav.gen(end - 1, revcount, count) @@ -1130,10 +1130,10 @@ def filelog(web): file=f, nav=nav, symrev=webutil.symrevorshortnode(web.req, fctx), - entries=entries, + entries=templateutil.mappinglist(entries), descend=descend, patch=patch, - latestentry=latestentry, + latestentry=templateutil.mappinglist(latestentry), linerange=linerange, revcount=revcount, morevars=morevars, @@ -1162,7 +1162,7 @@ def archive(web): """ type_ = web.req.qsparams.get('type') - allowed = web.configlist("web", "allow_archive") + allowed = web.configlist("web", "allow-archive") key = web.req.qsparams['node'] if type_ not in webutil.archivespecs: @@ -1314,24 +1314,6 @@ def graph(web): tree = list(item for item in graphmod.colored(dag, web.repo) if item[1] == graphmod.CHANGESET) - def nodecurrent(ctx): - wpnodes = web.repo.dirstate.parents() - if wpnodes[1] == nullid: - wpnodes = wpnodes[:1] - if ctx.node() in wpnodes: - return '@' - return '' - - def nodesymbol(ctx): - if ctx.obsolete(): - return 'x' - elif ctx.isunstable(): - return '*' - elif ctx.closesbranch(): - return '_' - else: - return 'o' - def fulltree(): pos = web.repo[graphtop].rev() tree = [] @@ -1342,14 +1324,14 @@ def graph(web): if item[1] == graphmod.CHANGESET) return tree - def jsdata(): - return [{'node': pycompat.bytestr(ctx), - 'graphnode': nodecurrent(ctx) + nodesymbol(ctx), - 'vertex': vtx, - 'edges': edges} - for (id, type, ctx, vtx, edges) in fulltree()] + def jsdata(context): + for (id, type, ctx, vtx, edges) in fulltree(): + yield {'node': pycompat.bytestr(ctx), + 'graphnode': webutil.getgraphnode(web.repo, ctx), + 'vertex': vtx, + 'edges': edges} - def nodes(): + def nodes(context): parity = paritygen(web.stripecount) for row, (id, type, ctx, vtx, edges) in enumerate(tree): entry = webutil.commonentry(web.repo, ctx) @@ -1363,7 +1345,7 @@ def graph(web): entry.update({'col': vtx[0], 'color': (vtx[1] - 1) % 6 + 1, 'parity': next(parity), - 'edges': edgedata, + 'edges': templateutil.mappinglist(edgedata), 'row': row, 'nextrow': row + 1}) @@ -1384,10 +1366,11 @@ def graph(web): rows=rows, bg_height=bg_height, changesets=count, - nextentry=nextentry, - jsdata=lambda **x: jsdata(), - nodes=lambda **x: nodes(), + nextentry=templateutil.mappinglist(nextentry), + jsdata=templateutil.mappinggenerator(jsdata), + nodes=templateutil.mappinggenerator(nodes), node=ctx.hex(), + archives=web.archivelist('tip'), changenav=changenav) def _getdoc(e): @@ -1417,7 +1400,7 @@ def help(web): topicname = web.req.qsparams.get('node') if not topicname: - def topics(**map): + def topics(context): for entries, summary, _doc in helpmod.helptable: yield {'topic': entries[0], 'summary': summary} @@ -1436,19 +1419,19 @@ def help(web): early.sort() other.sort() - def earlycommands(**map): + def earlycommands(context): for c, doc in early: yield {'topic': c, 'summary': doc} - def othercommands(**map): + def othercommands(context): for c, doc in other: yield {'topic': c, 'summary': doc} return web.sendtemplate( 'helptopics', - topics=topics, - earlycommands=earlycommands, - othercommands=othercommands, + topics=templateutil.mappinggenerator(topics), + earlycommands=templateutil.mappinggenerator(earlycommands), + othercommands=templateutil.mappinggenerator(othercommands), title='Index') # Render an index of sub-topics. @@ -1463,7 +1446,7 @@ def help(web): return web.sendtemplate( 'helptopics', - topics=topics, + topics=templateutil.mappinglist(topics), title=topicname, subindex=True) diff --git a/mercurial/hgweb/webutil.py b/mercurial/hgweb/webutil.py --- a/mercurial/hgweb/webutil.py +++ b/mercurial/hgweb/webutil.py @@ -25,6 +25,7 @@ from .common import ( from .. import ( context, + diffutil, error, match, mdiff, @@ -51,7 +52,7 @@ archivespecs = util.sortdict(( )) def archivelist(ui, nodeid, url=None): - allowed = ui.configlist('web', 'allow_archive', untrusted=True) + allowed = ui.configlist('web', 'allow-archive', untrusted=True) archives = [] for typ, spec in archivespecs.iteritems(): @@ -206,8 +207,8 @@ def _siblings(siblings=None, hiderev=Non return templateutil.mappinggenerator(_ctxsgen, args=(siblings,)) def difffeatureopts(req, ui, section): - diffopts = patch.difffeatureopts(ui, untrusted=True, - section=section, whitespace=True) + diffopts = diffutil.difffeatureopts(ui, untrusted=True, + section=section, whitespace=True) for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'): v = req.qsparams.get(k) @@ -234,14 +235,14 @@ def children(ctx, hide=None): def renamelink(fctx): r = fctx.renamed() if r: - return [{'file': r[0], 'node': hex(r[1])}] - return [] + return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}]) + return templateutil.mappinglist([]) def nodetagsdict(repo, node): - return [{"name": i} for i in repo.nodetags(node)] + return templateutil.hybridlist(repo.nodetags(node), name='name') def nodebookmarksdict(repo, node): - return [{"name": i} for i in repo.nodebookmarks(node)] + return templateutil.hybridlist(repo.nodebookmarks(node), name='name') def nodebranchdict(repo, ctx): branches = [] @@ -253,8 +254,8 @@ def nodebranchdict(repo, ctx): except error.RepoLookupError: branchnode = None if branchnode == ctx.node(): - branches.append({"name": branch}) - return branches + branches.append(branch) + return templateutil.hybridlist(branches, name='name') def nodeinbranch(repo, ctx): branches = [] @@ -264,29 +265,27 @@ def nodeinbranch(repo, ctx): except error.RepoLookupError: branchnode = None if branch != 'default' and branchnode != ctx.node(): - branches.append({"name": branch}) - return branches + branches.append(branch) + return templateutil.hybridlist(branches, name='name') def nodebranchnodefault(ctx): branches = [] branch = ctx.branch() if branch != 'default': - branches.append({"name": branch}) - return branches + branches.append(branch) + return templateutil.hybridlist(branches, name='name') + +def _nodenamesgen(context, f, node, name): + for t in f(node): + yield {name: t} -def showtag(repo, tmpl, t1, node=nullid, **args): - args = pycompat.byteskwargs(args) - for t in repo.nodetags(node): - lm = args.copy() - lm['tag'] = t - yield tmpl.generate(t1, lm) +def showtag(repo, t1, node=nullid): + args = (repo.nodetags, node, 'tag') + return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) -def showbookmark(repo, tmpl, t1, node=nullid, **args): - args = pycompat.byteskwargs(args) - for t in repo.nodebookmarks(node): - lm = args.copy() - lm['bookmark'] = t - yield tmpl.generate(t1, lm) +def showbookmark(repo, t1, node=nullid): + args = (repo.nodebookmarks, node, 'bookmark') + return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) def branchentries(repo, stripecount, limit=0): tips = [] @@ -294,7 +293,7 @@ def branchentries(repo, stripecount, lim parity = paritygen(stripecount) sortkey = lambda item: (not item[1], item[0].rev()) - def entries(**map): + def entries(context): count = 0 if not tips: for tag, hs, tip, closed in repo.branchmap().iterbranches(): @@ -317,7 +316,7 @@ def branchentries(repo, stripecount, lim 'date': ctx.date() } - return entries + return templateutil.mappinggenerator(entries) def cleanpath(repo, path): path = path.lstrip('/') @@ -380,7 +379,7 @@ def linerange(req): def formatlinerange(fromline, toline): return '%d:%d' % (fromline + 1, toline) -def succsandmarkers(context, mapping): +def _succsandmarkersgen(context, mapping): repo = context.resource(mapping, 'repo') itemmappings = templatekw.showsuccsandmarkers(context, mapping) for item in itemmappings.tovalue(context, mapping): @@ -388,10 +387,13 @@ def succsandmarkers(context, mapping): for successor in item['successors']) yield item +def succsandmarkers(context, mapping): + return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,)) + # teach templater succsandmarkers is switched to (context, mapping) API succsandmarkers._requires = {'repo', 'ctx'} -def whyunstable(context, mapping): +def _whyunstablegen(context, mapping): repo = context.resource(mapping, 'repo') ctx = context.resource(mapping, 'ctx') @@ -401,6 +403,9 @@ def whyunstable(context, mapping): entry['divergentnodes'] = _siblings(entry['divergentnodes']) yield entry +def whyunstable(context, mapping): + return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,)) + whyunstable._requires = {'repo', 'ctx'} def commonentry(repo, ctx): @@ -419,7 +424,8 @@ def commonentry(repo, ctx): 'phase': ctx.phasestr(), 'obsolete': ctx.obsolete(), 'succsandmarkers': succsandmarkers, - 'instabilities': [{"instability": i} for i in ctx.instabilities()], + 'instabilities': templateutil.hybridlist(ctx.instabilities(), + name='instability'), 'whyunstable': whyunstable, 'branch': nodebranchnodefault(ctx), 'inbranch': nodeinbranch(repo, ctx), @@ -439,8 +445,8 @@ def changelistentry(web, ctx): repo = web.repo rev = ctx.rev() n = ctx.node() - showtags = showtag(repo, web.tmpl, 'changelogtag', n) - files = listfilediffs(web.tmpl, ctx.files(), n, web.maxfiles) + showtags = showtag(repo, 'changelogtag', n) + files = listfilediffs(ctx.files(), n, web.maxfiles) entry = commonentry(repo, ctx) entry.update( @@ -452,30 +458,45 @@ def changelistentry(web, ctx): ) return entry +def changelistentries(web, revs, maxcount, parityfn): + """Emit up to N records for an iterable of revisions.""" + repo = web.repo + + count = 0 + for rev in revs: + if count >= maxcount: + break + + count += 1 + + entry = changelistentry(web, repo[rev]) + entry['parity'] = next(parityfn) + + yield entry + def symrevorshortnode(req, ctx): if 'node' in req.qsparams: return templatefilters.revescape(req.qsparams['node']) else: return short(ctx.node()) -def changesetentry(web, ctx): - '''Obtain a dictionary to be used to render the "changeset" template.''' - - showtags = showtag(web.repo, web.tmpl, 'changesettag', ctx.node()) - showbookmarks = showbookmark(web.repo, web.tmpl, 'changesetbookmark', - ctx.node()) - showbranch = nodebranchnodefault(ctx) - - files = [] - parity = paritygen(web.stripecount) +def _listfilesgen(context, ctx, stripecount): + parity = paritygen(stripecount) for blockno, f in enumerate(ctx.files()): template = 'filenodelink' if f in ctx else 'filenolink' - files.append(web.tmpl.generate(template, { + yield context.process(template, { 'node': ctx.hex(), 'file': f, 'blockno': blockno + 1, 'parity': next(parity), - })) + }) + +def changesetentry(web, ctx): + '''Obtain a dictionary to be used to render the "changeset" template.''' + + showtags = showtag(web.repo, 'changesettag', ctx.node()) + showbookmarks = showbookmark(web.repo, 'changesetbookmark', ctx.node()) + showbranch = nodebranchnodefault(ctx) basectx = basechangectx(web.repo, web.req) if basectx is None: @@ -488,8 +509,8 @@ def changesetentry(web, ctx): diff = diffs(web, ctx, basectx, None, style) parity = paritygen(web.stripecount) - diffstatsgen = diffstatgen(ctx, basectx) - diffstats = diffstat(web.tmpl, ctx, diffstatsgen, parity) + diffstatsgen = diffstatgen(web.repo.ui, ctx, basectx) + diffstats = diffstat(ctx, diffstatsgen, parity) return dict( diff=diff, @@ -498,40 +519,43 @@ def changesetentry(web, ctx): changesettag=showtags, changesetbookmark=showbookmarks, changesetbranch=showbranch, - files=files, + files=templateutil.mappedgenerator(_listfilesgen, + args=(ctx, web.stripecount)), diffsummary=lambda **x: diffsummary(diffstatsgen), diffstat=diffstats, archives=web.archivelist(ctx.hex()), **pycompat.strkwargs(commonentry(web.repo, ctx))) -def listfilediffs(tmpl, files, node, max): +def _listfilediffsgen(context, files, node, max): for f in files[:max]: - yield tmpl.generate('filedifflink', {'node': hex(node), 'file': f}) + yield context.process('filedifflink', {'node': hex(node), 'file': f}) if len(files) > max: - yield tmpl.generate('fileellipses', {}) + yield context.process('fileellipses', {}) -def diffs(web, ctx, basectx, files, style, linerange=None, - lineidprefix=''): +def listfilediffs(files, node, max): + return templateutil.mappedgenerator(_listfilediffsgen, + args=(files, node, max)) - def prettyprintlines(lines, blockno): - for lineno, l in enumerate(lines, 1): - difflineno = "%d.%d" % (blockno, lineno) - if l.startswith('+'): - ltype = "difflineplus" - elif l.startswith('-'): - ltype = "difflineminus" - elif l.startswith('@'): - ltype = "difflineat" - else: - ltype = "diffline" - yield web.tmpl.generate(ltype, { - 'line': l, - 'lineno': lineno, - 'lineid': lineidprefix + "l%s" % difflineno, - 'linenumber': "% 8s" % difflineno, - }) +def _prettyprintdifflines(context, lines, blockno, lineidprefix): + for lineno, l in enumerate(lines, 1): + difflineno = "%d.%d" % (blockno, lineno) + if l.startswith('+'): + ltype = "difflineplus" + elif l.startswith('-'): + ltype = "difflineminus" + elif l.startswith('@'): + ltype = "difflineat" + else: + ltype = "diffline" + yield context.process(ltype, { + 'line': l, + 'lineno': lineno, + 'lineid': lineidprefix + "l%s" % difflineno, + 'linenumber': "% 8s" % difflineno, + }) - repo = web.repo +def _diffsgen(context, repo, ctx, basectx, files, style, stripecount, + linerange, lineidprefix): if files: m = match.exact(repo.root, repo.getcwd(), files) else: @@ -540,7 +564,7 @@ def diffs(web, ctx, basectx, files, styl diffopts = patch.diffopts(repo.ui, untrusted=True) node1 = basectx.node() node2 = ctx.node() - parity = paritygen(web.stripecount) + parity = paritygen(stripecount) diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts) for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1): @@ -554,70 +578,89 @@ def diffs(web, ctx, basectx, files, styl continue lines.extend(hunklines) if lines: - yield web.tmpl.generate('diffblock', { + l = templateutil.mappedgenerator(_prettyprintdifflines, + args=(lines, blockno, + lineidprefix)) + yield { 'parity': next(parity), 'blockno': blockno, - 'lines': prettyprintlines(lines, blockno), - }) + 'lines': l, + } -def compare(tmpl, context, leftlines, rightlines): - '''Generator function that provides side-by-side comparison data.''' +def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''): + args = (web.repo, ctx, basectx, files, style, web.stripecount, + linerange, lineidprefix) + return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock') - def compline(type, leftlineno, leftline, rightlineno, rightline): - lineid = leftlineno and ("l%d" % leftlineno) or '' - lineid += rightlineno and ("r%d" % rightlineno) or '' - llno = '%d' % leftlineno if leftlineno else '' - rlno = '%d' % rightlineno if rightlineno else '' - return tmpl.generate('comparisonline', { - 'type': type, - 'lineid': lineid, - 'leftlineno': leftlineno, - 'leftlinenumber': "% 6s" % llno, - 'leftline': leftline or '', - 'rightlineno': rightlineno, - 'rightlinenumber': "% 6s" % rlno, - 'rightline': rightline or '', - }) +def _compline(type, leftlineno, leftline, rightlineno, rightline): + lineid = leftlineno and ("l%d" % leftlineno) or '' + lineid += rightlineno and ("r%d" % rightlineno) or '' + llno = '%d' % leftlineno if leftlineno else '' + rlno = '%d' % rightlineno if rightlineno else '' + return { + 'type': type, + 'lineid': lineid, + 'leftlineno': leftlineno, + 'leftlinenumber': "% 6s" % llno, + 'leftline': leftline or '', + 'rightlineno': rightlineno, + 'rightlinenumber': "% 6s" % rlno, + 'rightline': rightline or '', + } - def getblock(opcodes): - for type, llo, lhi, rlo, rhi in opcodes: - len1 = lhi - llo - len2 = rhi - rlo - count = min(len1, len2) - for i in xrange(count): - yield compline(type=type, - leftlineno=llo + i + 1, - leftline=leftlines[llo + i], - rightlineno=rlo + i + 1, - rightline=rightlines[rlo + i]) - if len1 > len2: - for i in xrange(llo + count, lhi): - yield compline(type=type, - leftlineno=i + 1, - leftline=leftlines[i], - rightlineno=None, - rightline=None) - elif len2 > len1: - for i in xrange(rlo + count, rhi): - yield compline(type=type, - leftlineno=None, - leftline=None, - rightlineno=i + 1, - rightline=rightlines[i]) +def _getcompblockgen(context, leftlines, rightlines, opcodes): + for type, llo, lhi, rlo, rhi in opcodes: + len1 = lhi - llo + len2 = rhi - rlo + count = min(len1, len2) + for i in xrange(count): + yield _compline(type=type, + leftlineno=llo + i + 1, + leftline=leftlines[llo + i], + rightlineno=rlo + i + 1, + rightline=rightlines[rlo + i]) + if len1 > len2: + for i in xrange(llo + count, lhi): + yield _compline(type=type, + leftlineno=i + 1, + leftline=leftlines[i], + rightlineno=None, + rightline=None) + elif len2 > len1: + for i in xrange(rlo + count, rhi): + yield _compline(type=type, + leftlineno=None, + leftline=None, + rightlineno=i + 1, + rightline=rightlines[i]) +def _getcompblock(leftlines, rightlines, opcodes): + args = (leftlines, rightlines, opcodes) + return templateutil.mappinggenerator(_getcompblockgen, args=args, + name='comparisonline') + +def _comparegen(context, contextnum, leftlines, rightlines): + '''Generator function that provides side-by-side comparison data.''' s = difflib.SequenceMatcher(None, leftlines, rightlines) - if context < 0: - yield tmpl.generate('comparisonblock', - {'lines': getblock(s.get_opcodes())}) + if contextnum < 0: + l = _getcompblock(leftlines, rightlines, s.get_opcodes()) + yield {'lines': l} else: - for oc in s.get_grouped_opcodes(n=context): - yield tmpl.generate('comparisonblock', {'lines': getblock(oc)}) + for oc in s.get_grouped_opcodes(n=contextnum): + l = _getcompblock(leftlines, rightlines, oc) + yield {'lines': l} -def diffstatgen(ctx, basectx): +def compare(contextnum, leftlines, rightlines): + args = (contextnum, leftlines, rightlines) + return templateutil.mappinggenerator(_comparegen, args=args, + name='comparisonblock') + +def diffstatgen(ui, ctx, basectx): '''Generator function that provides the diffstat data.''' + diffopts = patch.diffopts(ui, {'noprefix': False}) stats = patch.diffstatdata( - util.iterlines(ctx.diff(basectx, noprefix=False))) + util.iterlines(ctx.diff(basectx, opts=diffopts))) maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats) while True: yield stats, maxname, maxtotal, addtotal, removetotal, binary @@ -629,9 +672,7 @@ def diffsummary(statgen): return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % ( len(stats), addtotal, removetotal) -def diffstat(tmpl, ctx, statgen, parity): - '''Return a diffstat template for each file in the diff.''' - +def _diffstattmplgen(context, ctx, statgen, parity): stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) files = ctx.files() @@ -645,7 +686,7 @@ def diffstat(tmpl, ctx, statgen, parity) template = 'diffstatlink' if filename in files else 'diffstatnolink' total = adds + removes fileno += 1 - yield tmpl.generate(template, { + yield context.process(template, { 'node': ctx.hex(), 'file': filename, 'fileno': fileno, @@ -655,6 +696,11 @@ def diffstat(tmpl, ctx, statgen, parity) 'parity': next(parity), }) +def diffstat(ctx, statgen, parity): + '''Return a diffstat template for each file in the diff.''' + args = (ctx, statgen, parity) + return templateutil.mappedgenerator(_diffstattmplgen, args=args) + class sessionvars(templateutil.wrapped): def __init__(self, vars, start='?'): self._start = start @@ -669,6 +715,24 @@ class sessionvars(templateutil.wrapped): def __copy__(self): return sessionvars(copy.copy(self._vars), self._start) + def contains(self, context, mapping, item): + item = templateutil.unwrapvalue(context, mapping, item) + return item in self._vars + + def getmember(self, context, mapping, key): + key = templateutil.unwrapvalue(context, mapping, key) + return self._vars.get(key) + + def getmin(self, context, mapping): + raise error.ParseError(_('not comparable')) + + def getmax(self, context, mapping): + raise error.ParseError(_('not comparable')) + + def filter(self, context, mapping, select): + # implement if necessary + raise error.ParseError(_('not filterable')) + def itermaps(self, context): separator = self._start for key, value in sorted(self._vars.iteritems()): @@ -685,6 +749,9 @@ class sessionvars(templateutil.wrapped): def show(self, context, mapping): return self.join(context, '') + def tobool(self, context, mapping): + return bool(self._vars) + def tovalue(self, context, mapping): return self._vars @@ -701,7 +768,7 @@ def getwebsubs(repo): for key, pattern in websubdefs: # grab the delimiter from the character after the "s" unesc = pattern[1:2] - delim = re.escape(unesc) + delim = stringutil.reescape(unesc) # identify portions of the pattern, taking care to avoid escaped # delimiters. the replace format and flags are optional, but @@ -733,3 +800,7 @@ def getwebsubs(repo): repo.ui.warn(_("websub: invalid regexp for %s: %s\n") % (key, regexp)) return websubtable + +def getgraphnode(repo, ctx): + return (templatekw.getgraphnodecurrent(repo, ctx) + + templatekw.getgraphnodesymbol(ctx)) diff --git a/mercurial/hook.py b/mercurial/hook.py --- a/mercurial/hook.py +++ b/mercurial/hook.py @@ -24,7 +24,7 @@ from .utils import ( stringutil, ) -def _pythonhook(ui, repo, htype, hname, funcname, args, throw): +def pythonhook(ui, repo, htype, hname, funcname, args, throw): '''call python hook. hook is callable object, looked up as name in python module. if callable returns "true", hook fails, else passes. if hook raises exception, treated as @@ -120,8 +120,6 @@ def _pythonhook(ui, repo, htype, hname, return r, False def _exthook(ui, repo, htype, name, cmd, args, throw): - ui.note(_("running hook %s: %s\n") % (name, cmd)) - starttime = util.timer() env = {} @@ -138,9 +136,17 @@ def _exthook(ui, repo, htype, name, cmd, if callable(v): v = v() if isinstance(v, (dict, list)): - v = stringutil.pprint(v, bprefix=False) + v = stringutil.pprint(v) env['HG_' + k.upper()] = v + if ui.configbool('hooks', 'tonative.%s' % name, False): + oldcmd = cmd + cmd = procutil.shelltonative(cmd, env) + if cmd != oldcmd: + ui.note(_('converting hook "%s" to native\n') % name) + + ui.note(_("running hook %s: %s\n") % (name, cmd)) + if repo: cwd = repo.root else: @@ -179,9 +185,11 @@ def _hookitems(ui, _untrusted=False): """return all hooks items ready to be sorted""" hooks = {} for name, cmd in ui.configitems('hooks', untrusted=_untrusted): - if not name.startswith('priority'): - priority = ui.configint('hooks', 'priority.%s' % name, 0) - hooks[name] = (-priority, len(hooks), name, cmd) + if name.startswith('priority.') or name.startswith('tonative.'): + continue + + priority = ui.configint('hooks', 'priority.%s' % name, 0) + hooks[name] = (-priority, len(hooks), name, cmd) return hooks _redirect = False @@ -242,7 +250,7 @@ def runhooks(ui, repo, htype, hooks, thr r = 1 raised = False elif callable(cmd): - r, raised = _pythonhook(ui, repo, htype, hname, cmd, args, + r, raised = pythonhook(ui, repo, htype, hname, cmd, args, throw) elif cmd.startswith('python:'): if cmd.count(':') >= 2: @@ -258,7 +266,7 @@ def runhooks(ui, repo, htype, hooks, thr hookfn = getattr(mod, cmd) else: hookfn = cmd[7:].strip() - r, raised = _pythonhook(ui, repo, htype, hname, hookfn, args, + r, raised = pythonhook(ui, repo, htype, hname, hookfn, args, throw) else: r = _exthook(ui, repo, htype, hname, cmd, args, throw) diff --git a/mercurial/httpconnection.py b/mercurial/httpconnection.py --- a/mercurial/httpconnection.py +++ b/mercurial/httpconnection.py @@ -38,21 +38,21 @@ class httpsendfile(object): self.write = self._data.write self.length = os.fstat(self._data.fileno()).st_size self._pos = 0 - self._total = self.length // 1024 * 2 - - def read(self, *args, **kwargs): - ret = self._data.read(*args, **kwargs) - if not ret: - self.ui.progress(_('sending'), None) - return ret - self._pos += len(ret) # We pass double the max for total because we currently have # to send the bundle twice in the case of a server that # requires authentication. Since we can't know until we try # once whether authentication will be required, just lie to # the user and maybe the push succeeds suddenly at 50%. - self.ui.progress(_('sending'), self._pos // 1024, - unit=_('kb'), total=self._total) + self._progress = ui.makeprogress(_('sending'), unit=_('kb'), + total=(self.length // 1024 * 2)) + + def read(self, *args, **kwargs): + ret = self._data.read(*args, **kwargs) + if not ret: + self._progress.complete() + return ret + self._pos += len(ret) + self._progress.update(self._pos // 1024) return ret def __enter__(self): diff --git a/mercurial/httppeer.py b/mercurial/httppeer.py --- a/mercurial/httppeer.py +++ b/mercurial/httppeer.py @@ -13,7 +13,6 @@ import io import os import socket import struct -import tempfile import weakref from .i18n import _ @@ -307,6 +306,7 @@ def sendrequest(ui, opener, req): start = util.timer() + res = None try: res = opener.open(req) except urlerr.httperror as inst: @@ -320,8 +320,9 @@ def sendrequest(ui, opener, req): raise IOError(None, inst) finally: if ui.debugflag and ui.configbool('devel', 'debug.peer-request'): + code = res.code if res else -1 dbg(line % ' finished in %.4f seconds (%d)' - % (util.timer() - start, res.code)) + % (util.timer() - start, code)) # Insert error handlers for common I/O failures. _wraphttpresponse(res) @@ -519,7 +520,7 @@ class httppeer(wireprotov1peer.wirepeer) filename = None try: # dump bundle to disk - fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") + fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg") fh = os.fdopen(fd, r"wb") d = fp.read(4096) while d: diff --git a/mercurial/i18n.py b/mercurial/i18n.py --- a/mercurial/i18n.py +++ b/mercurial/i18n.py @@ -23,11 +23,6 @@ if getattr(sys, 'frozen', None) is not N else: module = pycompat.fsencode(__file__) -try: - unicode -except NameError: - unicode = str - _languages = None if (pycompat.iswindows and 'LANGUAGE' not in encoding.environ @@ -76,7 +71,7 @@ def gettext(message): cache = _msgcache.setdefault(encoding.encoding, {}) if message not in cache: - if type(message) is unicode: + if type(message) is pycompat.unicode: # goofy unicode docstrings in test paragraphs = message.split(u'\n\n') else: diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py --- a/mercurial/localrepo.py +++ b/mercurial/localrepo.py @@ -354,6 +354,15 @@ class locallegacypeer(localpeer): # clients. REVLOGV2_REQUIREMENT = 'exp-revlogv2.0' +# A repository with the sparserevlog feature will have delta chains that +# can spread over a larger span. Sparse reading cuts these large spans into +# pieces, so that each piece isn't too big. +# Without the sparserevlog capability, reading from the repository could use +# huge amounts of memory, because the whole span would be read at once, +# including all the intermediate revisions that aren't pertinent for the chain. +# This is why once a repository has enabled sparse-read, it becomes required. +SPARSEREVLOG_REQUIREMENT = 'sparserevlog' + # Functions receiving (ui, features) that extensions can register to impact # the ability to load repositories with custom requirements. Only # functions defined in loaded extensions are called. @@ -376,6 +385,7 @@ class localrepository(object): 'generaldelta', 'treemanifest', REVLOGV2_REQUIREMENT, + SPARSEREVLOG_REQUIREMENT, } _basesupported = supportedformats | { 'store', @@ -658,10 +668,9 @@ class localrepository(object): manifestcachesize = self.ui.configint('format', 'manifestcachesize') if manifestcachesize is not None: self.svfs.options['manifestcachesize'] = manifestcachesize - # experimental config: format.aggressivemergedeltas - aggressivemergedeltas = self.ui.configbool('format', - 'aggressivemergedeltas') - self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas + deltabothparents = self.ui.configbool('revlog', + 'optimize-delta-parent-choice') + self.svfs.options['deltabothparents'] = deltabothparents self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui) chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan') if 0 <= chainspan: @@ -678,6 +687,8 @@ class localrepository(object): self.svfs.options['with-sparse-read'] = withsparseread self.svfs.options['sparse-read-density-threshold'] = srdensitythres self.svfs.options['sparse-read-min-gap-size'] = srmingapsize + sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements + self.svfs.options['sparse-revlog'] = sparserevlog for r in self.requirements: if r.startswith('exp-compression-'): @@ -778,6 +789,10 @@ class localrepository(object): @repofilecache('dirstate') def dirstate(self): + return self._makedirstate() + + def _makedirstate(self): + """Extension point for wrapping the dirstate per-repo.""" sparsematchfn = lambda: sparse.matcher(self) return dirstate.dirstate(self.vfs, self.ui, self.root, @@ -1029,11 +1044,7 @@ class localrepository(object): def nodebookmarks(self, node): """return the list of bookmarks pointing to the specified node""" - marks = [] - for bookmark, n in self._bookmarks.iteritems(): - if n == node: - marks.append(bookmark) - return sorted(marks) + return self._bookmarks.names(node) def branchmap(self): '''returns a dictionary {branch: [branchheads]} with branchheads @@ -2370,6 +2381,9 @@ def newreporequirements(repo): requirements.add('generaldelta') if ui.configbool('experimental', 'treemanifest'): requirements.add('treemanifest') + # experimental config: format.sparse-revlog + if ui.configbool('format', 'sparse-revlog'): + requirements.add(SPARSEREVLOG_REQUIREMENT) revlogv2 = ui.config('experimental', 'revlogv2') if revlogv2 == 'enable-unstable-format-and-corrupt-my-data': diff --git a/mercurial/lock.py b/mercurial/lock.py --- a/mercurial/lock.py +++ b/mercurial/lock.py @@ -209,7 +209,7 @@ class lock(object): def __del__(self): if self.held: - warnings.warn("use lock.release instead of del lock", + warnings.warn(r"use lock.release instead of del lock", category=DeprecationWarning, stacklevel=2) diff --git a/mercurial/logcmdutil.py b/mercurial/logcmdutil.py --- a/mercurial/logcmdutil.py +++ b/mercurial/logcmdutil.py @@ -76,9 +76,9 @@ def diffordiffstat(ui, repo, diffopts, n if not ui.plain(): width = ui.termwidth() - chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts, - prefix=prefix, relroot=relroot, - hunksfilterfn=hunksfilterfn) + chunks = repo[node2].diff(repo[node1], match, changes, opts=diffopts, + prefix=prefix, relroot=relroot, + hunksfilterfn=hunksfilterfn) if fp is not None or ui.canwritewithoutlabels(): out = fp or ui @@ -154,7 +154,9 @@ class changesetprinter(object): self.repo = repo self.buffered = buffered self._differ = differ or changesetdiffer() - self.diffopts = diffopts or {} + self._diffopts = patch.diffallopts(ui, diffopts) + self._includestat = diffopts and diffopts.get('stat') + self._includediff = diffopts and diffopts.get('patch') self.header = {} self.hunk = {} self.lastheader = None @@ -226,7 +228,7 @@ class changesetprinter(object): if self.ui.debugflag and rev is not None: mnode = ctx.manifestnode() - mrev = self.repo.manifestlog._revlog.rev(mnode) + mrev = self.repo.manifestlog.rev(mnode) self.ui.write(columns['manifest'] % scmutil.formatrevnode(self.ui, mrev, mnode), label='ui.debug log.manifest') @@ -298,16 +300,13 @@ class changesetprinter(object): ''' def _showpatch(self, ctx): - stat = self.diffopts.get('stat') - diff = self.diffopts.get('patch') - diffopts = patch.diffallopts(self.ui, self.diffopts) - if stat: - self._differ.showdiff(self.ui, ctx, diffopts, stat=True) - if stat and diff: + if self._includestat: + self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True) + if self._includestat and self._includediff: self.ui.write("\n") - if diff: - self._differ.showdiff(self.ui, ctx, diffopts, stat=False) - if stat or diff: + if self._includediff: + self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False) + if self._includestat or self._includediff: self.ui.write("\n") class changesetformatter(changesetprinter): @@ -316,6 +315,7 @@ class changesetformatter(changesetprinte def __init__(self, ui, repo, fm, differ=None, diffopts=None, buffered=False): changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered) + self._diffopts = patch.difffeatureopts(ui, diffopts, git=True) self._fm = fm def close(self): @@ -367,16 +367,13 @@ class changesetformatter(changesetprinte fm.data(copies=fm.formatdict(copies, key='name', value='source')) - stat = self.diffopts.get('stat') - diff = self.diffopts.get('patch') - diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True) - if stat: + if self._includestat: self.ui.pushbuffer() - self._differ.showdiff(self.ui, ctx, diffopts, stat=True) + self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True) fm.data(diffstat=self.ui.popbuffer()) - if diff: + if self._includediff: self.ui.pushbuffer() - self._differ.showdiff(self.ui, ctx, diffopts, stat=False) + self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False) fm.data(diff=self.ui.popbuffer()) class changesettemplater(changesetprinter): @@ -868,7 +865,7 @@ def displaygraph(ui, repo, dag, displaye for fn in ctx.files(): rename = getrenamed(fn, ctx.rev()) if rename: - copies.append((fn, rename[0])) + copies.append((fn, rename)) edges = edgefn(type, char, state, rev, parents) firstedge = next(edges) width = firstedge[2] @@ -896,7 +893,7 @@ def displayrevs(ui, repo, revs, displaye for fn in ctx.files(): rename = getrenamed(fn, rev) if rename: - copies.append((fn, rename[0])) + copies.append((fn, rename)) displayer.show(ctx, copies=copies) displayer.flush(ctx) displayer.close() diff --git a/mercurial/logexchange.py b/mercurial/logexchange.py --- a/mercurial/logexchange.py +++ b/mercurial/logexchange.py @@ -112,8 +112,8 @@ def activepath(repo, remote): # represent the remotepath with user defined path name if exists for path, url in repo.ui.configitems('paths'): # remove auth info from user defined url - url = util.removeauth(url) - if url == rpath: + noauthurl = util.removeauth(url) + if url == rpath or noauthurl == rpath: rpath = path break diff --git a/mercurial/mail.py b/mercurial/mail.py --- a/mercurial/mail.py +++ b/mercurial/mail.py @@ -11,6 +11,8 @@ import email import email.charset import email.header import email.message +import email.parser +import io import os import smtplib import socket @@ -322,6 +324,23 @@ def mimeencode(ui, s, charsets=None, dis s, cs = _encode(ui, s, charsets) return mimetextqp(s, 'plain', cs) +if pycompat.ispy3: + def parse(fp): + ep = email.parser.Parser() + # disable the "universal newlines" mode, which isn't binary safe. + # I have no idea if ascii/surrogateescape is correct, but that's + # what the standard Python email parser does. + fp = io.TextIOWrapper(fp, encoding=r'ascii', + errors=r'surrogateescape', newline=chr(10)) + try: + return ep.parse(fp) + finally: + fp.detach() +else: + def parse(fp): + ep = email.parser.Parser() + return ep.parse(fp) + def headdecode(s): '''Decodes RFC-2047 header''' uparts = [] diff --git a/mercurial/manifest.py b/mercurial/manifest.py --- a/mercurial/manifest.py +++ b/mercurial/manifest.py @@ -20,9 +20,14 @@ from . import ( error, mdiff, policy, + pycompat, + repository, revlog, util, ) +from .utils import ( + interfaceutil, +) parsers = policy.importmod(r'parsers') propertycache = util.propertycache @@ -362,6 +367,7 @@ try: except AttributeError: pass +@interfaceutil.implementer(repository.imanifestdict) class manifestdict(object): def __init__(self, data=''): self._lm = _lazymanifest(data) @@ -528,7 +534,8 @@ class manifestdict(object): def items(self): return (x[:2] for x in self._lm.iterentries()) - iteritems = items + def iteritems(self): + return (x[:2] for x in self._lm.iterentries()) def iterentries(self): return self._lm.iterentries() @@ -635,7 +642,8 @@ def _checkforbidden(l): for f in l: if '\n' in f or '\r' in f: raise error.RevlogError( - _("'\\n' and '\\r' disallowed in filenames: %r") % f) + _("'\\n' and '\\r' disallowed in filenames: %r") + % pycompat.bytestr(f)) # apply the changes collected during the bisect loop to our addlist @@ -1260,6 +1268,7 @@ class manifestrevlog(revlog.revlog): m.setnode(n) return n +@interfaceutil.implementer(repository.imanifestlog) class manifestlog(object): """A collection class representing the collection of manifest snapshots referenced by commits in the repository. @@ -1285,7 +1294,7 @@ class manifestlog(object): self._dirmancache = {} self._dirmancache[''] = util.lrucachedict(cachesize) - self.cachesize = cachesize + self._cachesize = cachesize def __getitem__(self, node): """Retrieves the manifest instance for the given node. Throws a @@ -1331,7 +1340,7 @@ class manifestlog(object): if node != revlog.nullid: mancache = self._dirmancache.get(dir) if not mancache: - mancache = util.lrucachedict(self.cachesize) + mancache = util.lrucachedict(self._cachesize) self._dirmancache[dir] = mancache mancache[node] = m return m @@ -1340,6 +1349,13 @@ class manifestlog(object): self._dirmancache.clear() self._revlog.clearcaches() + def rev(self, node): + return self._revlog.rev(node) + + def addgroup(self, deltas, linkmapper, transaction): + return self._revlog.addgroup(deltas, linkmapper, transaction) + +@interfaceutil.implementer(repository.imanifestrevisionwritable) class memmanifestctx(object): def __init__(self, manifestlog): self._manifestlog = manifestlog @@ -1363,6 +1379,7 @@ class memmanifestctx(object): return self._revlog().add(self._manifestdict, transaction, link, p1, p2, added, removed) +@interfaceutil.implementer(repository.imanifestrevisionstored) class manifestctx(object): """A class representing a single revision of a manifest, including its contents, its parent revs, and its linkrev. @@ -1439,6 +1456,7 @@ class manifestctx(object): def find(self, key): return self.read().find(key) +@interfaceutil.implementer(repository.imanifestrevisionwritable) class memtreemanifestctx(object): def __init__(self, manifestlog, dir=''): self._manifestlog = manifestlog @@ -1465,6 +1483,7 @@ class memtreemanifestctx(object): return self._revlog().add(self._treemanifest, transaction, link, p1, p2, added, removed, readtree=readtree) +@interfaceutil.implementer(repository.imanifestrevisionstored) class treemanifestctx(object): def __init__(self, manifestlog, dir, node): self._manifestlog = manifestlog diff --git a/mercurial/match.py b/mercurial/match.py --- a/mercurial/match.py +++ b/mercurial/match.py @@ -40,9 +40,9 @@ def _rematcher(regex): except AttributeError: return m.match -def _expandsets(kindpats, ctx, listsubrepos): - '''Returns the kindpats list with the 'set' patterns expanded.''' - fset = set() +def _expandsets(root, cwd, kindpats, ctx, listsubrepos, badfn): + '''Returns the kindpats list with the 'set' patterns expanded to matchers''' + matchers = [] other = [] for kind, pat, source in kindpats: @@ -50,17 +50,17 @@ def _expandsets(kindpats, ctx, listsubre if not ctx: raise error.ProgrammingError("fileset expression with no " "context") - s = ctx.getfileset(pat) - fset.update(s) + matchers.append(ctx.matchfileset(pat, badfn=badfn)) if listsubrepos: for subpath in ctx.substate: - s = ctx.sub(subpath).getfileset(pat) - fset.update(subpath + '/' + f for f in s) + sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn) + pm = prefixdirmatcher(root, cwd, subpath, sm, badfn=badfn) + matchers.append(pm) continue other.append((kind, pat, source)) - return fset, other + return matchers, other def _expandsubinclude(kindpats, root): '''Returns the list of subinclude matcher args and the kindpats without the @@ -95,6 +95,23 @@ def _kindpatsalwaysmatch(kindpats): return False return True +def _buildkindpatsmatcher(matchercls, root, cwd, kindpats, ctx=None, + listsubrepos=False, badfn=None): + matchers = [] + fms, kindpats = _expandsets(root, cwd, kindpats, ctx=ctx, + listsubrepos=listsubrepos, badfn=badfn) + if kindpats: + m = matchercls(root, cwd, kindpats, listsubrepos=listsubrepos, + badfn=badfn) + matchers.append(m) + if fms: + matchers.extend(fms) + if not matchers: + return nevermatcher(root, cwd, badfn=badfn) + if len(matchers) == 1: + return matchers[0] + return unionmatcher(matchers) + def match(root, cwd, patterns=None, include=None, exclude=None, default='glob', exact=False, auditor=None, ctx=None, listsubrepos=False, warn=None, badfn=None, icasefs=False): @@ -159,8 +176,9 @@ def match(root, cwd, patterns=None, incl if _kindpatsalwaysmatch(kindpats): m = alwaysmatcher(root, cwd, badfn, relativeuipath=True) else: - m = patternmatcher(root, cwd, kindpats, ctx=ctx, - listsubrepos=listsubrepos, badfn=badfn) + m = _buildkindpatsmatcher(patternmatcher, root, cwd, kindpats, + ctx=ctx, listsubrepos=listsubrepos, + badfn=badfn) else: # It's a little strange that no patterns means to match everything. # Consider changing this to match nothing (probably using nevermatcher). @@ -168,13 +186,13 @@ def match(root, cwd, patterns=None, incl if include: kindpats = normalize(include, 'glob', root, cwd, auditor, warn) - im = includematcher(root, cwd, kindpats, ctx=ctx, - listsubrepos=listsubrepos, badfn=None) + im = _buildkindpatsmatcher(includematcher, root, cwd, kindpats, ctx=ctx, + listsubrepos=listsubrepos, badfn=None) m = intersectmatchers(m, im) if exclude: kindpats = normalize(exclude, 'glob', root, cwd, auditor, warn) - em = includematcher(root, cwd, kindpats, ctx=ctx, - listsubrepos=listsubrepos, badfn=None) + em = _buildkindpatsmatcher(includematcher, root, cwd, kindpats, ctx=ctx, + listsubrepos=listsubrepos, badfn=None) m = differencematcher(m, em) return m @@ -375,15 +393,28 @@ class nevermatcher(basematcher): def __repr__(self): return r'' +class predicatematcher(basematcher): + """A matcher adapter for a simple boolean function""" + + def __init__(self, root, cwd, predfn, predrepr=None, badfn=None): + super(predicatematcher, self).__init__(root, cwd, badfn) + self.matchfn = predfn + self._predrepr = predrepr + + @encoding.strmethod + def __repr__(self): + s = (stringutil.buildrepr(self._predrepr) + or pycompat.byterepr(self.matchfn)) + return '' % s + class patternmatcher(basematcher): - def __init__(self, root, cwd, kindpats, ctx=None, listsubrepos=False, - badfn=None): + def __init__(self, root, cwd, kindpats, listsubrepos=False, badfn=None): super(patternmatcher, self).__init__(root, cwd, badfn) self._files = _explicitfiles(kindpats) self._prefix = _prefix(kindpats) - self._pats, self.matchfn = _buildmatch(ctx, kindpats, '$', listsubrepos, + self._pats, self.matchfn = _buildmatch(kindpats, '$', listsubrepos, root) @propertycache @@ -404,15 +435,14 @@ class patternmatcher(basematcher): @encoding.strmethod def __repr__(self): - return ('' % self._pats) + return ('' % pycompat.bytestr(self._pats)) class includematcher(basematcher): - def __init__(self, root, cwd, kindpats, ctx=None, listsubrepos=False, - badfn=None): + def __init__(self, root, cwd, kindpats, listsubrepos=False, badfn=None): super(includematcher, self).__init__(root, cwd, badfn) - self._pats, self.matchfn = _buildmatch(ctx, kindpats, '(?:/|$)', + self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)', listsubrepos, root) self._prefix = _prefix(kindpats) roots, dirs = _rootsanddirs(kindpats) @@ -653,6 +683,78 @@ class subdirmatcher(basematcher): return ('' % (self._path, self._matcher)) +class prefixdirmatcher(basematcher): + """Adapt a matcher to work on a parent directory. + + The matcher's non-matching-attributes (root, cwd, bad, explicitdir, + traversedir) are ignored. + + The prefix path should usually be the relative path from the root of + this matcher to the root of the wrapped matcher. + + >>> m1 = match(b'root/d/e', b'f', [b'../a.txt', b'b.txt']) + >>> m2 = prefixdirmatcher(b'root', b'd/e/f', b'd/e', m1) + >>> bool(m2(b'a.txt'),) + False + >>> bool(m2(b'd/e/a.txt')) + True + >>> bool(m2(b'd/e/b.txt')) + False + >>> m2.files() + ['d/e/a.txt', 'd/e/f/b.txt'] + >>> m2.exact(b'd/e/a.txt') + True + >>> m2.visitdir(b'd') + True + >>> m2.visitdir(b'd/e') + True + >>> m2.visitdir(b'd/e/f') + True + >>> m2.visitdir(b'd/e/g') + False + >>> m2.visitdir(b'd/ef') + False + """ + + def __init__(self, root, cwd, path, matcher, badfn=None): + super(prefixdirmatcher, self).__init__(root, cwd, badfn) + if not path: + raise error.ProgrammingError('prefix path must not be empty') + self._path = path + self._pathprefix = path + '/' + self._matcher = matcher + + @propertycache + def _files(self): + return [self._pathprefix + f for f in self._matcher._files] + + def matchfn(self, f): + if not f.startswith(self._pathprefix): + return False + return self._matcher.matchfn(f[len(self._pathprefix):]) + + @propertycache + def _pathdirs(self): + return set(util.finddirs(self._path)) | {'.'} + + def visitdir(self, dir): + if dir == self._path: + return self._matcher.visitdir('.') + if dir.startswith(self._pathprefix): + return self._matcher.visitdir(dir[len(self._pathprefix):]) + return dir in self._pathdirs + + def isexact(self): + return self._matcher.isexact() + + def prefix(self): + return self._matcher.prefix() + + @encoding.strmethod + def __repr__(self): + return ('' + % (pycompat.bytestr(self._path), self._matcher)) + class unionmatcher(basematcher): """A matcher that is the union of several matchers. @@ -714,7 +816,7 @@ def _globre(pat): >>> bprint(_globre(br'**/a')) (?:.*/)?a >>> bprint(_globre(br'a/**/b')) - a\/(?:.*/)?b + a/(?:.*/)?b >>> bprint(_globre(br'[a*?!^][^b][!c]')) [a*?!^][\^b][^c] >>> bprint(_globre(br'{a,b}')) @@ -725,7 +827,7 @@ def _globre(pat): i, n = 0, len(pat) res = '' group = 0 - escape = util.re.escape + escape = util.stringutil.reescape def peek(): return i < n and pat[i:i + 1] while i < n: @@ -790,13 +892,13 @@ def _regex(kind, pat, globsuffix): if kind in ('path', 'relpath'): if pat == '.': return '' - return util.re.escape(pat) + '(?:/|$)' + return util.stringutil.reescape(pat) + '(?:/|$)' if kind == 'rootfilesin': if pat == '.': escaped = '' else: # Pattern is a directory name. - escaped = util.re.escape(pat) + '/' + escaped = util.stringutil.reescape(pat) + '/' # Anything after the pattern must be a non-directory. return escaped + '[^/]+$' if kind == 'relglob': @@ -805,9 +907,11 @@ def _regex(kind, pat, globsuffix): if pat.startswith('^'): return pat return '.*' + pat - return _globre(pat) + globsuffix + if kind == 'glob': + return _globre(pat) + globsuffix + raise error.ProgrammingError('not a regex pattern: %s:%s' % (kind, pat)) -def _buildmatch(ctx, kindpats, globsuffix, listsubrepos, root): +def _buildmatch(kindpats, globsuffix, listsubrepos, root): '''Return regexp string and a matcher function for kindpats. globsuffix is appended to the regexp of globs.''' matchfuncs = [] @@ -828,10 +932,6 @@ def _buildmatch(ctx, kindpats, globsuffi return False matchfuncs.append(matchsubinclude) - fset, kindpats = _expandsets(kindpats, ctx, listsubrepos) - if fset: - matchfuncs.append(fset.__contains__) - regex = '' if kindpats: regex, mf = _buildregexmatch(kindpats, globsuffix) diff --git a/mercurial/merge.py b/mercurial/merge.py --- a/mercurial/merge.py +++ b/mercurial/merge.py @@ -903,6 +903,23 @@ def _forgetremoved(wctx, mctx, branchmer return actions def _checkcollision(repo, wmf, actions): + """ + Check for case-folding collisions. + """ + + # If the repo is narrowed, filter out files outside the narrowspec. + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + wmf = wmf.matches(narrowmatch) + if actions: + narrowactions = {} + for m, actionsfortype in actions.iteritems(): + narrowactions[m] = [] + for (f, args, msg) in actionsfortype: + if narrowmatch(f): + narrowactions[m].append((f, args, msg)) + actions = narrowactions + # build provisional merged manifest up pmmf = set(wmf) @@ -1072,6 +1089,33 @@ def checkpathconflicts(repo, wctx, mctx, repo.ui.warn(_("%s: is both a file and a directory\n") % p) raise error.Abort(_("destination manifest contains path conflicts")) +def _filternarrowactions(narrowmatch, branchmerge, actions): + """ + Filters out actions that can ignored because the repo is narrowed. + + Raise an exception if the merge cannot be completed because the repo is + narrowed. + """ + nooptypes = set(['k']) # TODO: handle with nonconflicttypes + nonconflicttypes = set('a am c cm f g r e'.split()) + # We mutate the items in the dict during iteration, so iterate + # over a copy. + for f, action in list(actions.items()): + if narrowmatch(f): + pass + elif not branchmerge: + del actions[f] # just updating, ignore changes outside clone + elif action[0] in nooptypes: + del actions[f] # merge does not affect file + elif action[0] in nonconflicttypes: + raise error.Abort(_('merge affects file \'%s\' outside narrow, ' + 'which is not yet supported') % f, + hint=_('merging in the other direction ' + 'may work')) + else: + raise error.Abort(_('conflict in file \'%s\' is outside ' + 'narrow clone') % f) + def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher, acceptremote, followcopies, forcefulldiff=False): """ @@ -1258,6 +1302,11 @@ def manifestmerge(repo, wctx, p2, pa, br # If we are merging, look for path conflicts. checkpathconflicts(repo, wctx, p2, actions) + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + # Updates "actions" in place + _filternarrowactions(narrowmatch, branchmerge, actions) + return actions, diverge, renamedelete def _resolvetrivial(repo, wctx, mctx, ancestor, actions): @@ -1492,27 +1541,6 @@ class updateresult(object): return (not self.updatedcount and not self.mergedcount and not self.removedcount and not self.unresolvedcount) - # TODO remove container emulation once consumers switch to new API. - - def __getitem__(self, x): - util.nouideprecwarn('access merge.update() results by name instead of ' - 'index', '4.6', 2) - if x == 0: - return self.updatedcount - elif x == 1: - return self.mergedcount - elif x == 2: - return self.removedcount - elif x == 3: - return self.unresolvedcount - else: - raise IndexError('can only access items 0-3') - - def __len__(self): - util.nouideprecwarn('access merge.update() results by name instead of ' - 'index', '4.6', 2) - return 4 - def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): """apply the merge action list to the working directory @@ -1558,10 +1586,6 @@ def applyupdates(repo, actions, wctx, mc if f1 != f and move: moves.append(f1) - _updating = _('updating') - _files = _('files') - progress = repo.ui.progress - # remove renamed files after safely stored for f in moves: if wctx[f].lexists(): @@ -1571,7 +1595,8 @@ def applyupdates(repo, actions, wctx, mc numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP) - z = 0 + progress = repo.ui.makeprogress(_('updating'), unit=_('files'), + total=numupdates) if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']: subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) @@ -1588,8 +1613,7 @@ def applyupdates(repo, actions, wctx, mc s(_("the remote file has been renamed to %s\n") % f1) s(_("resolve manually then use 'hg resolve --mark %s'\n") % f) ms.addpath(f, f1, fo) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) # When merging in-memory, we can't support worker processes, so set the # per-item cost at 0 in that case. @@ -1599,8 +1623,7 @@ def applyupdates(repo, actions, wctx, mc prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]) for i, item in prog: - z += i - progress(_updating, z, item=item, total=numupdates, unit=_files) + progress.increment(step=i, item=item) removed = len(actions[ACTION_REMOVE]) # resolve path conflicts (must come before getting) @@ -1612,15 +1635,16 @@ def applyupdates(repo, actions, wctx, mc wctx[f].audit() wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags()) wctx[f0].remove() - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) - # get in parallel + # get in parallel. + threadsafe = repo.ui.configbool('experimental', + 'worker.wdir-get-thread-safe') prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx), - actions[ACTION_GET]) + actions[ACTION_GET], + threadsafe=threadsafe) for i, item in prog: - z += i - progress(_updating, z, item=item, total=numupdates, unit=_files) + progress.increment(step=i, item=item) updated = len(actions[ACTION_GET]) if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']: @@ -1629,20 +1653,17 @@ def applyupdates(repo, actions, wctx, mc # forget (manifest only, just log it) (must come first) for f, args, msg in actions[ACTION_FORGET]: repo.ui.debug(" %s: %s -> f\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) # re-add (manifest only, just log it) for f, args, msg in actions[ACTION_ADD]: repo.ui.debug(" %s: %s -> a\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) # re-add/mark as modified (manifest only, just log it) for f, args, msg in actions[ACTION_ADD_MODIFIED]: repo.ui.debug(" %s: %s -> am\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) # keep (noop, just log it) for f, args, msg in actions[ACTION_KEEP]: @@ -1652,8 +1673,7 @@ def applyupdates(repo, actions, wctx, mc # directory rename, move local for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]: repo.ui.debug(" %s: %s -> dm\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) f0, flags = args repo.ui.note(_("moving %s to %s\n") % (f0, f)) wctx[f].audit() @@ -1664,8 +1684,7 @@ def applyupdates(repo, actions, wctx, mc # local directory rename, get for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]: repo.ui.debug(" %s: %s -> dg\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) f0, flags = args repo.ui.note(_("getting %s to %s\n") % (f0, f)) wctx[f].write(mctx.filectx(f0).data(), flags) @@ -1674,8 +1693,7 @@ def applyupdates(repo, actions, wctx, mc # exec for f, args, msg in actions[ACTION_EXEC]: repo.ui.debug(" %s: %s -> e\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) flags, = args wctx[f].audit() wctx[f].setflags('l' in flags, 'x' in flags) @@ -1710,8 +1728,7 @@ def applyupdates(repo, actions, wctx, mc tocomplete = [] for f, args, msg in mergeactions: repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f) if f == '.hgsubstate': # subrepo states need updating subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels) @@ -1725,8 +1742,7 @@ def applyupdates(repo, actions, wctx, mc # merge for f, args, msg in tocomplete: repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg)) - z += 1 - progress(_updating, z, item=f, total=numupdates, unit=_files) + progress.increment(item=f, total=numupdates) ms.resolve(f, wctx) finally: @@ -1774,7 +1790,7 @@ def applyupdates(repo, actions, wctx, mc actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE] if a[0] in mfiles] - progress(_updating, None, total=numupdates, unit=_files) + progress.complete() return updateresult(updated, merged, removed, unresolved) def recordupdates(repo, actions, branchmerge): @@ -2181,7 +2197,8 @@ def update(repo, node, branchmerge, forc error=stats.unresolvedcount) return stats -def graft(repo, ctx, pctx, labels, keepparent=False): +def graft(repo, ctx, pctx, labels, keepparent=False, + keepconflictparent=False): """Do a graft-like merge. This is a merge where the merge ancestor is chosen such that one @@ -2194,6 +2211,7 @@ def graft(repo, ctx, pctx, labels, keepp pctx - merge base, usually ctx.p1() labels - merge labels eg ['local', 'graft'] keepparent - keep second parent if any + keepparent - if unresolved, keep parent used for the merge """ # If we're grafting a descendant onto an ancestor, be sure to pass @@ -2207,11 +2225,15 @@ def graft(repo, ctx, pctx, labels, keepp stats = update(repo, ctx.node(), True, True, pctx.node(), mergeancestor=mergeancestor, labels=labels) - pother = nullid - parents = ctx.parents() - if keepparent and len(parents) == 2 and pctx in parents: - parents.remove(pctx) - pother = parents[0].node() + + if keepconflictparent and stats.unresolvedcount: + pother = ctx.node() + else: + pother = nullid + parents = ctx.parents() + if keepparent and len(parents) == 2 and pctx in parents: + parents.remove(pctx) + pother = parents[0].node() with repo.dirstate.parentchange(): repo.setparents(repo['.'].node(), pother) diff --git a/mercurial/minifileset.py b/mercurial/minifileset.py --- a/mercurial/minifileset.py +++ b/mercurial/minifileset.py @@ -11,8 +11,14 @@ from .i18n import _ from . import ( error, fileset, + pycompat, ) +def _sizep(x): + # i18n: "size" is a keyword + expr = fileset.getstring(x, _("size requires an expression")) + return fileset.sizematcher(expr) + def _compile(tree): if not tree: raise error.ParseError(_("missing argument")) @@ -21,14 +27,15 @@ def _compile(tree): name = fileset.getpattern(tree, {'path'}, _('invalid file pattern')) if name.startswith('**'): # file extension test, ex. "**.tar.gz" ext = name[2:] - for c in ext: + for c in pycompat.bytestr(ext): if c in '*{}[]?/\\': raise error.ParseError(_('reserved character: %s') % c) return lambda n, s: n.endswith(ext) elif name.startswith('path:'): # directory or full path test p = name[5:] # prefix pl = len(p) - f = lambda n, s: n.startswith(p) and (len(n) == pl or n[pl] == '/') + f = lambda n, s: n.startswith(p) and (len(n) == pl + or n[pl:pl + 1] == '/') return f raise error.ParseError(_("unsupported file pattern: %s") % name, hint=_('paths must be prefixed with "path:"')) @@ -48,7 +55,7 @@ def _compile(tree): symbols = { 'all': lambda n, s: True, 'none': lambda n, s: False, - 'size': lambda n, s: fileset.sizematcher(tree[2])(s), + 'size': lambda n, s: _sizep(tree[2])(s), } name = fileset.getsymbol(tree[1]) diff --git a/mercurial/minirst.py b/mercurial/minirst.py --- a/mercurial/minirst.py +++ b/mercurial/minirst.py @@ -169,7 +169,7 @@ def splitparagraphs(blocks): if not itemre.match(line1): return False if singleline: - return line2 == '' or line2[0] == ' ' or itemre.match(line2) + return line2 == '' or line2[0:1] == ' ' or itemre.match(line2) else: return line2.startswith(' ') diff --git a/mercurial/namespaces.py b/mercurial/namespaces.py --- a/mercurial/namespaces.py +++ b/mercurial/namespaces.py @@ -95,21 +95,16 @@ class namespaces(object): def singlenode(self, repo, name): """ - Return the 'best' node for the given name. Best means the first node - in the first nonempty list returned by a name-to-nodes mapping function - in the defined precedence order. + Return the 'best' node for the given name. What's best is defined + by the namespace's singlenode() function. The first match returned by + a namespace in the defined precedence order is used. Raises a KeyError if there is no such node. """ for ns, v in self._names.iteritems(): - n = v.namemap(repo, name) + n = v.singlenode(repo, name) if n: - # return max revision number - if len(n) > 1: - cl = repo.changelog - maxrev = max(cl.rev(node) for node in n) - return cl.node(maxrev) - return n[0] + return n raise KeyError(_('no such name: %s') % name) class namespace(object): @@ -142,7 +137,7 @@ class namespace(object): def __init__(self, name, templatename=None, logname=None, colorname=None, logfmt=None, listnames=None, namemap=None, nodemap=None, - deprecated=None, builtin=False): + deprecated=None, builtin=False, singlenode=None): """create a namespace name: the namespace to be registered (in plural form) @@ -158,6 +153,7 @@ class namespace(object): nodemap: function that inputs a node, output name(s) deprecated: set of names to be masked for ordinary use builtin: whether namespace is implemented by core Mercurial + singlenode: function that inputs a name, output best node (or None) """ self.name = name self.templatename = templatename @@ -167,6 +163,8 @@ class namespace(object): self.listnames = listnames self.namemap = namemap self.nodemap = nodemap + if singlenode: + self.singlenode = singlenode # if logname is not specified, use the template name as backup if self.logname is None: @@ -199,3 +197,18 @@ class namespace(object): """ return sorted(self.namemap(repo, name)) + + def singlenode(self, repo, name): + """returns the best node for the given name + + By default, the best node is the node from nodes() with the highest + revision number. It can be overriden by the namespace.""" + n = self.namemap(repo, name) + if n: + # return max revision number + if len(n) > 1: + cl = repo.changelog + maxrev = max(cl.rev(node) for node in n) + return cl.node(maxrev) + return n[0] + return None diff --git a/mercurial/obsolete.py b/mercurial/obsolete.py --- a/mercurial/obsolete.py +++ b/mercurial/obsolete.py @@ -74,11 +74,13 @@ import struct from .i18n import _ from . import ( + encoding, error, node, obsutil, phases, policy, + pycompat, util, ) from .utils import dateutil @@ -526,7 +528,7 @@ class obsstore(object): # prec: nodeid, predecessors changesets # succs: tuple of nodeid, successor changesets (0-N length) # flag: integer, flag field carrying modifier for the markers (see doc) - # meta: binary blob, encoded metadata dictionary + # meta: binary blob in UTF-8, encoded metadata dictionary # date: (float, int) tuple, date of marker creation # parents: (tuple of nodeid) or None, parents of predecessors # None is used when no data has been recorded @@ -599,6 +601,16 @@ class obsstore(object): raise ValueError(_('in-marker cycle with %s') % node.hex(prec)) metadata = tuple(sorted(metadata.iteritems())) + for k, v in metadata: + try: + # might be better to reject non-ASCII keys + k.decode('utf-8') + v.decode('utf-8') + except UnicodeDecodeError: + raise error.ProgrammingError( + 'obsstore metadata must be valid UTF-8 sequence ' + '(key = %r, value = %r)' + % (pycompat.bytestr(k), pycompat.bytestr(v))) marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents) return bool(self.add(transaction, [marker])) @@ -853,7 +865,7 @@ def clearobscaches(repo): def _mutablerevs(repo): """the set of mutable revision in the repository""" - return repo._phasecache.getrevset(repo, (phases.draft, phases.secret)) + return repo._phasecache.getrevset(repo, phases.mutablephases) @cachefor('obsolete') def _computeobsoleteset(repo): @@ -950,7 +962,8 @@ def createmarkers(repo, relations, flag= must be an iterable of (, (, ...)[,{metadata}]) tuple. `old` and `news` are changectx. metadata is an optional dictionary containing metadata for this marker only. It is merged with the global - metadata specified through the `metadata` argument of this function, + metadata specified through the `metadata` argument of this function. + Any string values in metadata must be UTF-8 bytes. Trying to obsolete a public changeset will raise an exception. @@ -964,11 +977,8 @@ def createmarkers(repo, relations, flag= if metadata is None: metadata = {} if 'user' not in metadata: - develuser = repo.ui.config('devel', 'user.obsmarker') - if develuser: - metadata['user'] = develuser - else: - metadata['user'] = repo.ui.username() + luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username() + metadata['user'] = encoding.fromlocal(luser) # Operation metadata handling useoperation = repo.ui.configbool('experimental', diff --git a/mercurial/obsutil.py b/mercurial/obsutil.py --- a/mercurial/obsutil.py +++ b/mercurial/obsutil.py @@ -11,11 +11,15 @@ import re from .i18n import _ from . import ( + diffutil, + encoding, node as nodemod, phases, util, ) -from .utils import dateutil +from .utils import ( + dateutil, +) ### obsolescence marker flag @@ -392,13 +396,13 @@ def _cmpdiff(leftctx, rightctx): This is a first and basic implementation, with many shortcoming. """ - + diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True}) # Leftctx or right ctx might be filtered, so we need to use the contexts # with an unfiltered repository to safely compute the diff leftunfi = leftctx._repo.unfiltered()[leftctx.rev()] - leftdiff = leftunfi.diff(git=1) + leftdiff = leftunfi.diff(opts=diffopts) rightunfi = rightctx._repo.unfiltered()[rightctx.rev()] - rightdiff = rightunfi.diff(git=1) + rightdiff = rightunfi.diff(opts=diffopts) left, right = (0, 0) while None not in (left, right): @@ -819,7 +823,8 @@ def markersusers(markers): """ Returns a sorted list of markers users without duplicates """ markersmeta = [dict(m[3]) for m in markers] - users = set(meta.get('user') for meta in markersmeta if meta.get('user')) + users = set(encoding.tolocal(meta['user']) for meta in markersmeta + if meta.get('user')) return sorted(users) diff --git a/mercurial/patch.py b/mercurial/patch.py --- a/mercurial/patch.py +++ b/mercurial/patch.py @@ -18,7 +18,6 @@ import os import posixpath import re import shutil -import tempfile import zlib from .i18n import _ @@ -29,6 +28,7 @@ from .node import ( from . import ( copies, diffhelper, + diffutil, encoding, error, mail, @@ -51,7 +51,7 @@ stringio = util.stringio gitre = re.compile(br'diff --git a/(.*) b/(.*)') tabsplitter = re.compile(br'(\t+|[^\t]+)') wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|' - '[^ \ta-zA-Z0-9_\x80-\xff])') + b'[^ \ta-zA-Z0-9_\x80-\xff])') PatchError = error.PatchError @@ -113,7 +113,7 @@ def split(stream): cur.append(line) c = chunk(cur) - m = pycompat.emailparser().parse(c) + m = mail.parse(c) if not m.is_multipart(): yield msgfp(m) else: @@ -211,7 +211,7 @@ def extract(ui, fileobj): Any item can be missing from the dictionary. If filename is missing, fileobj did not contain a patch. Caller must unlink filename when done.''' - fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') + fd, tmpname = pycompat.mkstemp(prefix='hg-patch-') tmpfp = os.fdopen(fd, r'wb') try: yield _extract(ui, fileobj, tmpname, tmpfp) @@ -231,7 +231,7 @@ def _extract(ui, fileobj, tmpname, tmpfp data = {} - msg = pycompat.emailparser().parse(fileobj) + msg = mail.parse(fileobj) subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject']) data['user'] = msg[r'From'] and mail.headdecode(msg[r'From']) @@ -498,7 +498,8 @@ class fsbackend(abstractbackend): self.opener.setflags(fname, False, True) def unlink(self, fname): - self.opener.unlinkpath(fname, ignoremissing=True) + rmdir = self.ui.configbool('experimental', 'removeemptydirs') + self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir) def writerej(self, fname, failed, total, lines): fname = fname + ".rej" @@ -573,7 +574,7 @@ class filestore(object): self.size += len(data) else: if self.opener is None: - root = tempfile.mkdtemp(prefix='hg-patch-') + root = pycompat.mkdtemp(prefix='hg-patch-') self.opener = vfsmod.vfs(root) # Avoid filename issues with these simple names fn = '%d' % self.created @@ -708,7 +709,7 @@ class patchfile(object): if self.eolmode != 'strict' and eol and eol != '\n': rawlines = [] for l in lines: - if l and l[-1] == '\n': + if l and l.endswith('\n'): l = l[:-1] + eol rawlines.append(l) lines = rawlines @@ -1109,7 +1110,7 @@ file will be generated: you can use that all lines of the hunk are removed, then the edit is aborted and the hunk is left unchanged. """) - (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-", + (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-", suffix=".diff") ncpatchfp = None try: @@ -1946,7 +1947,7 @@ def applybindelta(binchunk, data): """ def deltahead(binchunk): i = 0 - for c in binchunk: + for c in pycompat.bytestr(binchunk): i += 1 if not (ord(c) & 0x80): return i @@ -1958,31 +1959,31 @@ def applybindelta(binchunk, data): binchunk = binchunk[s:] i = 0 while i < len(binchunk): - cmd = ord(binchunk[i]) + cmd = ord(binchunk[i:i + 1]) i += 1 if (cmd & 0x80): offset = 0 size = 0 if (cmd & 0x01): - offset = ord(binchunk[i]) + offset = ord(binchunk[i:i + 1]) i += 1 if (cmd & 0x02): - offset |= ord(binchunk[i]) << 8 + offset |= ord(binchunk[i:i + 1]) << 8 i += 1 if (cmd & 0x04): - offset |= ord(binchunk[i]) << 16 + offset |= ord(binchunk[i:i + 1]) << 16 i += 1 if (cmd & 0x08): - offset |= ord(binchunk[i]) << 24 + offset |= ord(binchunk[i:i + 1]) << 24 i += 1 if (cmd & 0x10): - size = ord(binchunk[i]) + size = ord(binchunk[i:i + 1]) i += 1 if (cmd & 0x20): - size |= ord(binchunk[i]) << 8 + size |= ord(binchunk[i:i + 1]) << 8 i += 1 if (cmd & 0x40): - size |= ord(binchunk[i]) << 16 + size |= ord(binchunk[i:i + 1]) << 16 i += 1 if size == 0: size = 0x10000 @@ -2113,6 +2114,7 @@ def _externalpatch(ui, repo, patcher, pa args.append('-d %s' % procutil.shellquote(cwd)) cmd = ('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, procutil.shellquote(patchname))) + ui.debug('Using external patch tool: %s\n' % cmd) fp = procutil.popen(cmd, 'rb') try: for line in util.iterfile(fp): @@ -2231,95 +2233,9 @@ def changedfiles(ui, repo, patchpath, st class GitDiffRequired(Exception): pass -def diffallopts(ui, opts=None, untrusted=False, section='diff'): - '''return diffopts with all features supported and parsed''' - return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section, - git=True, whitespace=True, formatchanging=True) - -diffopts = diffallopts - -def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False, - whitespace=False, formatchanging=False): - '''return diffopts with only opted-in features parsed - - Features: - - git: git-style diffs - - whitespace: whitespace options like ignoreblanklines and ignorews - - formatchanging: options that will likely break or cause correctness issues - with most diff parsers - ''' - def get(key, name=None, getter=ui.configbool, forceplain=None): - if opts: - v = opts.get(key) - # diffopts flags are either None-default (which is passed - # through unchanged, so we can identify unset values), or - # some other falsey default (eg --unified, which defaults - # to an empty string). We only want to override the config - # entries from hgrc with command line values if they - # appear to have been set, which is any truthy value, - # True, or False. - if v or isinstance(v, bool): - return v - if forceplain is not None and ui.plain(): - return forceplain - return getter(section, name or key, untrusted=untrusted) - - # core options, expected to be understood by every diff parser - buildopts = { - 'nodates': get('nodates'), - 'showfunc': get('show_function', 'showfunc'), - 'context': get('unified', getter=ui.config), - } - buildopts['worddiff'] = ui.configbool('experimental', 'worddiff') - buildopts['xdiff'] = ui.configbool('experimental', 'xdiff') - - if git: - buildopts['git'] = get('git') - - # since this is in the experimental section, we need to call - # ui.configbool directory - buildopts['showsimilarity'] = ui.configbool('experimental', - 'extendedheader.similarity') - - # need to inspect the ui object instead of using get() since we want to - # test for an int - hconf = ui.config('experimental', 'extendedheader.index') - if hconf is not None: - hlen = None - try: - # the hash config could be an integer (for length of hash) or a - # word (e.g. short, full, none) - hlen = int(hconf) - if hlen < 0 or hlen > 40: - msg = _("invalid length for extendedheader.index: '%d'\n") - ui.warn(msg % hlen) - except ValueError: - # default value - if hconf == 'short' or hconf == '': - hlen = 12 - elif hconf == 'full': - hlen = 40 - elif hconf != 'none': - msg = _("invalid value for extendedheader.index: '%s'\n") - ui.warn(msg % hconf) - finally: - buildopts['index'] = hlen - - if whitespace: - buildopts['ignorews'] = get('ignore_all_space', 'ignorews') - buildopts['ignorewsamount'] = get('ignore_space_change', - 'ignorewsamount') - buildopts['ignoreblanklines'] = get('ignore_blank_lines', - 'ignoreblanklines') - buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol') - if formatchanging: - buildopts['text'] = opts and opts.get('text') - binary = None if opts is None else opts.get('binary') - buildopts['nobinary'] = (not binary if binary is not None - else get('nobinary', forceplain=False)) - buildopts['noprefix'] = get('noprefix', forceplain=False) - - return mdiff.diffopts(**pycompat.strkwargs(buildopts)) +diffopts = diffutil.diffallopts +diffallopts = diffutil.diffallopts +difffeatureopts = diffutil.difffeatureopts def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, losedatafn=None, prefix='', relroot='', copy=None, @@ -2489,17 +2405,17 @@ def diffsinglehunk(hunklines): """yield tokens for a list of lines in a single hunk""" for line in hunklines: # chomp - chompline = line.rstrip('\n') + chompline = line.rstrip('\r\n') # highlight tabs and trailing whitespace stripline = chompline.rstrip() - if line[0] == '-': + if line.startswith('-'): label = 'diff.deleted' - elif line[0] == '+': + elif line.startswith('+'): label = 'diff.inserted' else: raise error.ProgrammingError('unexpected hunk line: %s' % line) for token in tabsplitter.findall(stripline): - if '\t' == token[0]: + if token.startswith('\t'): yield (token, 'diff.tab') else: yield (token, label) @@ -2557,6 +2473,9 @@ def diffsinglehunkinline(hunklines): isendofline = token.endswith('\n') if isendofline: chomp = token[:-1] # chomp + if chomp.endswith('\r'): + chomp = chomp[:-1] + endofline = token[len(chomp):] token = chomp.rstrip() # detect spaces at the end endspaces = chomp[len(token):] # scan tabs @@ -2572,7 +2491,7 @@ def diffsinglehunkinline(hunklines): if isendofline: if endspaces: yield (endspaces, 'diff.trailingwhitespace') - yield ('\n', '') + yield (endofline, '') nextisnewline = True def difflabel(func, *args, **kw): diff --git a/mercurial/pathutil.py b/mercurial/pathutil.py --- a/mercurial/pathutil.py +++ b/mercurial/pathutil.py @@ -226,7 +226,7 @@ def canonpath(root, cwd, myname, auditor if cwd != root: canonpath(root, root, myname, auditor) relpath = util.pathto(root, cwd, '') - if relpath[-1] == pycompat.ossep: + if relpath.endswith(pycompat.ossep): relpath = relpath[:-1] hint = (_("consider using '--cwd %s'") % relpath) except error.Abort: diff --git a/mercurial/phases.py b/mercurial/phases.py --- a/mercurial/phases.py +++ b/mercurial/phases.py @@ -126,6 +126,8 @@ from . import ( allphases = public, draft, secret = range(3) trackedphases = allphases[1:] phasenames = ['public', 'draft', 'secret'] +mutablephases = tuple(allphases[1:]) +remotehiddenphases = tuple(allphases[2:]) def _readroots(repo, phasedefaults=None): """Read phase roots from disk @@ -352,10 +354,14 @@ class phasecache(object): _trackphasechange(phasetracking, rev, None, revphase) repo.invalidatevolatilesets() - def advanceboundary(self, repo, tr, targetphase, nodes): + def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None): """Set all 'nodes' to phase 'targetphase' Nodes with a phase lower than 'targetphase' are not affected. + + If dryrun is True, no actions will be performed + + Returns a set of revs whose phase is changed or should be changed """ # Be careful to preserve shallow-copied values: do not update # phaseroots values, replace them. @@ -366,6 +372,7 @@ class phasecache(object): repo = repo.unfiltered() + changes = set() # set of revisions to be changed delroots = [] # set of root deleted by this path for phase in xrange(targetphase + 1, len(allphases)): # filter nodes that are not in a compatible phase already @@ -377,6 +384,9 @@ class phasecache(object): olds = self.phaseroots[phase] affected = repo.revs('%ln::%ln', olds, nodes) + changes.update(affected) + if dryrun: + continue for r in affected: _trackphasechange(phasetracking, r, self.phase(repo, r), targetphase) @@ -387,10 +397,12 @@ class phasecache(object): self._updateroots(phase, roots, tr) # some roots may need to be declared for lower phases delroots.extend(olds - roots) - # declare deleted root in the target phase - if targetphase != 0: - self._retractboundary(repo, tr, targetphase, delroots) - repo.invalidatevolatilesets() + if not dryrun: + # declare deleted root in the target phase + if targetphase != 0: + self._retractboundary(repo, tr, targetphase, delroots) + repo.invalidatevolatilesets() + return changes def retractboundary(self, repo, tr, targetphase, nodes): oldroots = self.phaseroots[:targetphase + 1] @@ -478,16 +490,24 @@ class phasecache(object): # (see branchmap one) self.invalidate() -def advanceboundary(repo, tr, targetphase, nodes): +def advanceboundary(repo, tr, targetphase, nodes, dryrun=None): """Add nodes to a phase changing other nodes phases if necessary. This function move boundary *forward* this means that all nodes are set in the target phase or kept in a *lower* phase. - Simplify boundary to contains phase roots only.""" + Simplify boundary to contains phase roots only. + + If dryrun is True, no actions will be performed + + Returns a set of revs whose phase is changed or should be changed + """ phcache = repo._phasecache.copy() - phcache.advanceboundary(repo, tr, targetphase, nodes) - repo._phasecache.replace(phcache) + changes = phcache.advanceboundary(repo, tr, targetphase, nodes, + dryrun=dryrun) + if not dryrun: + repo._phasecache.replace(phcache) + return changes def retractboundary(repo, tr, targetphase, nodes): """Set nodes back to a phase changing other nodes phases if @@ -645,10 +665,8 @@ def newheads(repo, heads, roots): * `heads`: define the first subset * `roots`: define the second we subtract from the first""" repo = repo.unfiltered() - revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))', - heads, roots, roots, heads) - return [c.node() for c in revset] - + revs = repo.revs('heads(::%ln - (%ln::%ln))', heads, roots, heads) + return pycompat.maplist(repo.changelog.node, revs) def newcommitphase(ui): """helper to get the target phase of new commit diff --git a/mercurial/policy.py b/mercurial/policy.py --- a/mercurial/policy.py +++ b/mercurial/policy.py @@ -69,7 +69,7 @@ def _importfrom(pkgname, modname): (r'cext', r'bdiff'): 3, (r'cext', r'mpatch'): 1, (r'cext', r'osutil'): 4, - (r'cext', r'parsers'): 4, + (r'cext', r'parsers'): 5, } # map import request to other package or module diff --git a/mercurial/posix.py b/mercurial/posix.py --- a/mercurial/posix.py +++ b/mercurial/posix.py @@ -216,7 +216,7 @@ def checkexec(path): # check directly in path and don't leave checkisexec behind checkdir = path checkisexec = None - fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-') + fh, fn = pycompat.mkstemp(dir=checkdir, prefix='hg-checkexec-') try: os.close(fh) m = os.stat(fn).st_mode @@ -249,16 +249,15 @@ def checklink(path): else: checkdir = path cachedir = None - fscheckdir = pycompat.fsdecode(checkdir) - name = tempfile.mktemp(dir=fscheckdir, + name = tempfile.mktemp(dir=pycompat.fsdecode(checkdir), prefix=r'checklink-') name = pycompat.fsencode(name) try: fd = None if cachedir is None: - fd = tempfile.NamedTemporaryFile(dir=fscheckdir, - prefix=r'hg-checklink-') - target = pycompat.fsencode(os.path.basename(fd.name)) + fd = pycompat.namedtempfile(dir=checkdir, + prefix='hg-checklink-') + target = os.path.basename(fd.name) else: # create a fixed file to link to; doesn't matter if it # already exists. @@ -287,7 +286,7 @@ def checklink(path): return True except OSError as inst: # link creation might race, try again - if inst[0] == errno.EEXIST: + if inst.errno == errno.EEXIST: continue raise finally: @@ -297,7 +296,7 @@ def checklink(path): return False except OSError as inst: # sshfs might report failure while successfully creating the link - if inst[0] == errno.EIO and os.path.exists(name): + if inst.errno == errno.EIO and os.path.exists(name): unlink(name) return False @@ -542,9 +541,9 @@ def username(uid=None): if uid is None: uid = os.getuid() try: - return pwd.getpwuid(uid)[0] + return pycompat.fsencode(pwd.getpwuid(uid)[0]) except KeyError: - return str(uid) + return b'%d' % uid def groupname(gid=None): """Return the name of the group with the given gid. diff --git a/mercurial/profiling.py b/mercurial/profiling.py --- a/mercurial/profiling.py +++ b/mercurial/profiling.py @@ -101,7 +101,8 @@ def statprofile(ui, fp): else: ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq) - statprof.start(mechanism='thread') + track = ui.config('profiling', 'time-track') + statprof.start(mechanism='thread', track=track) try: yield diff --git a/mercurial/progress.py b/mercurial/progress.py --- a/mercurial/progress.py +++ b/mercurial/progress.py @@ -264,39 +264,40 @@ class progbar(object): self.starttimes[topic] = now - interval def progress(self, topic, pos, item='', unit='', total=None): + if pos is None: + self.closetopic(topic) + return now = time.time() - self._refreshlock.acquire() - try: - if pos is None: - self.starttimes.pop(topic, None) - self.startvals.pop(topic, None) - self.topicstates.pop(topic, None) - # reset the progress bar if this is the outermost topic - if self.topics and self.topics[0] == topic and self.printed: - self.complete() - self.resetstate() - # truncate the list of topics assuming all topics within - # this one are also closed - if topic in self.topics: - self.topics = self.topics[:self.topics.index(topic)] - # reset the last topic to the one we just unwound to, - # so that higher-level topics will be stickier than - # lower-level topics - if self.topics: - self.lasttopic = self.topics[-1] - else: - self.lasttopic = None - else: - if topic not in self.topics: - self.starttimes[topic] = now - self.startvals[topic] = pos - self.topics.append(topic) - self.topicstates[topic] = pos, item, unit, total - self.curtopic = topic - self._calibrateestimate(topic, now, pos) - if now - self.lastprint >= self.refresh and self.topics: - if self._oktoprint(now): - self.lastprint = now - self.show(now, topic, *self.topicstates[topic]) - finally: - self._refreshlock.release() + with self._refreshlock: + if topic not in self.topics: + self.starttimes[topic] = now + self.startvals[topic] = pos + self.topics.append(topic) + self.topicstates[topic] = pos, item, unit, total + self.curtopic = topic + self._calibrateestimate(topic, now, pos) + if now - self.lastprint >= self.refresh and self.topics: + if self._oktoprint(now): + self.lastprint = now + self.show(now, topic, *self.topicstates[topic]) + + def closetopic(self, topic): + with self._refreshlock: + self.starttimes.pop(topic, None) + self.startvals.pop(topic, None) + self.topicstates.pop(topic, None) + # reset the progress bar if this is the outermost topic + if self.topics and self.topics[0] == topic and self.printed: + self.complete() + self.resetstate() + # truncate the list of topics assuming all topics within + # this one are also closed + if topic in self.topics: + self.topics = self.topics[:self.topics.index(topic)] + # reset the last topic to the one we just unwound to, + # so that higher-level topics will be stickier than + # lower-level topics + if self.topics: + self.lasttopic = self.topics[-1] + else: + self.lasttopic = None diff --git a/mercurial/pycompat.py b/mercurial/pycompat.py --- a/mercurial/pycompat.py +++ b/mercurial/pycompat.py @@ -15,6 +15,7 @@ import inspect import os import shlex import sys +import tempfile ispy3 = (sys.version_info[0] >= 3) ispypy = (r'__pypy__' in sys.builtin_module_names) @@ -23,7 +24,7 @@ if not ispy3: import cookielib import cPickle as pickle import httplib - import Queue as _queue + import Queue as queue import SocketServer as socketserver import xmlrpclib @@ -36,19 +37,49 @@ else: import http.cookiejar as cookielib import http.client as httplib import pickle - import queue as _queue + import queue as queue import socketserver import xmlrpc.client as xmlrpclib def future_set_exception_info(f, exc_info): f.set_exception(exc_info[0]) -empty = _queue.Empty -queue = _queue.Queue - def identity(a): return a +def _rapply(f, xs): + if xs is None: + # assume None means non-value of optional data + return xs + if isinstance(xs, (list, set, tuple)): + return type(xs)(_rapply(f, x) for x in xs) + if isinstance(xs, dict): + return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items()) + return f(xs) + +def rapply(f, xs): + """Apply function recursively to every item preserving the data structure + + >>> def f(x): + ... return 'f(%s)' % x + >>> rapply(f, None) is None + True + >>> rapply(f, 'a') + 'f(a)' + >>> rapply(f, {'a'}) == {'f(a)'} + True + >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []]) + ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []] + + >>> xs = [object()] + >>> rapply(identity, xs) is xs + True + """ + if f is identity: + # fast path mainly for py2 + return xs + return _rapply(f, xs) + if ispy3: import builtins import functools @@ -297,13 +328,10 @@ if ispy3: ret = shlex.split(s.decode('latin-1'), comments, posix) return [a.encode('latin-1') for a in ret] - def emailparser(*args, **kwargs): - import email.parser - return email.parser.BytesParser(*args, **kwargs) - else: import cStringIO + unicode = unicode bytechr = chr byterepr = repr bytestr = str @@ -372,10 +400,6 @@ else: rawinput = raw_input getargspec = inspect.getargspec - def emailparser(*args, **kwargs): - import email.parser - return email.parser.Parser(*args, **kwargs) - isjython = sysplatform.startswith('java') isdarwin = sysplatform == 'darwin' @@ -387,3 +411,18 @@ def getoptb(args, shortlist, namelist): def gnugetoptb(args, shortlist, namelist): return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist) + +def mkdtemp(suffix=b'', prefix=b'tmp', dir=None): + return tempfile.mkdtemp(suffix, prefix, dir) + +# text=True is not supported; use util.from/tonativeeol() instead +def mkstemp(suffix=b'', prefix=b'tmp', dir=None): + return tempfile.mkstemp(suffix, prefix, dir) + +# mode must include 'b'ytes as encoding= is not supported +def namedtempfile(mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, + delete=True): + mode = sysstr(mode) + assert r'b' in mode + return tempfile.NamedTemporaryFile(mode, bufsize, suffix=suffix, + prefix=prefix, dir=dir, delete=delete) diff --git a/mercurial/registrar.py b/mercurial/registrar.py --- a/mercurial/registrar.py +++ b/mercurial/registrar.py @@ -247,10 +247,6 @@ class filesetpredicate(_funcregistrarbas implies 'matchctx.status()' at runtime or not (False, by default). - Optional argument 'callexisting' indicates whether a predicate - implies 'matchctx.existing()' at runtime or not (False, by - default). - 'filesetpredicate' instance in example above can be used to decorate multiple functions. @@ -263,9 +259,8 @@ class filesetpredicate(_funcregistrarbas _getname = _funcregistrarbase._parsefuncdecl _docformat = "``%s``\n %s" - def _extrasetup(self, name, func, callstatus=False, callexisting=False): + def _extrasetup(self, name, func, callstatus=False): func._callstatus = callstatus - func._callexisting = callexisting class _templateregistrarbase(_funcregistrarbase): """Base of decorator to register functions as template specific one @@ -351,7 +346,8 @@ class templatefunc(_templateregistrarbas templatefunc = registrar.templatefunc() - @templatefunc('myfunc(arg1, arg2[, arg3])', argspec='arg1 arg2 arg3') + @templatefunc('myfunc(arg1, arg2[, arg3])', argspec='arg1 arg2 arg3', + requires={'ctx'}) def myfuncfunc(context, mapping, args): '''Explanation of this template function .... ''' @@ -363,6 +359,9 @@ class templatefunc(_templateregistrarbas a dict of named arguments. Otherwise 'args' is a list of positional arguments. + Optional argument 'requires' should be a collection of resource names + which the template function depends on. + 'templatefunc' instance in example above can be used to decorate multiple functions. @@ -374,8 +373,9 @@ class templatefunc(_templateregistrarbas """ _getname = _funcregistrarbase._parsefuncdecl - def _extrasetup(self, name, func, argspec=None): + def _extrasetup(self, name, func, argspec=None, requires=()): func._argspec = argspec + func._requires = requires class internalmerge(_funcregistrarbase): """Decorator to register in-process merge tool diff --git a/mercurial/repair.py b/mercurial/repair.py --- a/mercurial/repair.py +++ b/mercurial/repair.py @@ -186,76 +186,77 @@ def strip(ui, repo, nodelist, backup=Tru tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp', compress=False, obsolescence=False) - try: - with repo.transaction("strip") as tr: - offset = len(tr.entries) + with ui.uninterruptable(): + try: + with repo.transaction("strip") as tr: + offset = len(tr.entries) - tr.startgroup() - cl.strip(striprev, tr) - stripmanifest(repo, striprev, tr, files) - - for fn in files: - repo.file(fn).strip(striprev, tr) - tr.endgroup() + tr.startgroup() + cl.strip(striprev, tr) + stripmanifest(repo, striprev, tr, files) - for i in xrange(offset, len(tr.entries)): - file, troffset, ignore = tr.entries[i] - with repo.svfs(file, 'a', checkambig=True) as fp: - fp.truncate(troffset) - if troffset == 0: - repo.store.markremoved(file) + for fn in files: + repo.file(fn).strip(striprev, tr) + tr.endgroup() - deleteobsmarkers(repo.obsstore, stripobsidx) - del repo.obsstore - repo.invalidatevolatilesets() - repo._phasecache.filterunknown(repo) + for i in xrange(offset, len(tr.entries)): + file, troffset, ignore = tr.entries[i] + with repo.svfs(file, 'a', checkambig=True) as fp: + fp.truncate(troffset) + if troffset == 0: + repo.store.markremoved(file) + + deleteobsmarkers(repo.obsstore, stripobsidx) + del repo.obsstore + repo.invalidatevolatilesets() + repo._phasecache.filterunknown(repo) - if tmpbundlefile: - ui.note(_("adding branch\n")) - f = vfs.open(tmpbundlefile, "rb") - gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) - if not repo.ui.verbose: - # silence internal shuffling chatter - repo.ui.pushbuffer() - tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) - txnname = 'strip' - if not isinstance(gen, bundle2.unbundle20): - txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) - with repo.transaction(txnname) as tr: - bundle2.applybundle(repo, gen, tr, source='strip', - url=tmpbundleurl) - if not repo.ui.verbose: - repo.ui.popbuffer() - f.close() + if tmpbundlefile: + ui.note(_("adding branch\n")) + f = vfs.open(tmpbundlefile, "rb") + gen = exchange.readbundle(ui, f, tmpbundlefile, vfs) + if not repo.ui.verbose: + # silence internal shuffling chatter + repo.ui.pushbuffer() + tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile) + txnname = 'strip' + if not isinstance(gen, bundle2.unbundle20): + txnname = "strip\n%s" % util.hidepassword(tmpbundleurl) + with repo.transaction(txnname) as tr: + bundle2.applybundle(repo, gen, tr, source='strip', + url=tmpbundleurl) + if not repo.ui.verbose: + repo.ui.popbuffer() + f.close() - with repo.transaction('repair') as tr: - bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] - bm.applychanges(repo, tr, bmchanges) + with repo.transaction('repair') as tr: + bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm] + bm.applychanges(repo, tr, bmchanges) - # remove undo files - for undovfs, undofile in repo.undofiles(): - try: - undovfs.unlink(undofile) - except OSError as e: - if e.errno != errno.ENOENT: - ui.warn(_('error removing %s: %s\n') % - (undovfs.join(undofile), - stringutil.forcebytestr(e))) + # remove undo files + for undovfs, undofile in repo.undofiles(): + try: + undovfs.unlink(undofile) + except OSError as e: + if e.errno != errno.ENOENT: + ui.warn(_('error removing %s: %s\n') % + (undovfs.join(undofile), + stringutil.forcebytestr(e))) - except: # re-raises - if backupfile: - ui.warn(_("strip failed, backup bundle stored in '%s'\n") - % vfs.join(backupfile)) - if tmpbundlefile: - ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") - % vfs.join(tmpbundlefile)) - ui.warn(_("(fix the problem, then recover the changesets with " - "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) - raise - else: - if tmpbundlefile: - # Remove temporary bundle only if there were no exceptions - vfs.unlink(tmpbundlefile) + except: # re-raises + if backupfile: + ui.warn(_("strip failed, backup bundle stored in '%s'\n") + % vfs.join(backupfile)) + if tmpbundlefile: + ui.warn(_("strip failed, unrecovered changes stored in '%s'\n") + % vfs.join(tmpbundlefile)) + ui.warn(_("(fix the problem, then recover the changesets with " + "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile)) + raise + else: + if tmpbundlefile: + # Remove temporary bundle only if there were no exceptions + vfs.unlink(tmpbundlefile) repo.destroyed() # return the backup file path (or None if 'backup' was False) so @@ -355,10 +356,10 @@ def rebuildfncache(ui, repo): newentries = set() seenfiles = set() - repolen = len(repo) + progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'), + total=len(repo)) for rev in repo: - ui.progress(_('rebuilding'), rev, total=repolen, - unit=_('changesets')) + progress.update(rev) ctx = repo[rev] for f in ctx.files(): @@ -375,7 +376,7 @@ def rebuildfncache(ui, repo): if repo.store._exists(d): newentries.add(d) - ui.progress(_('rebuilding'), None) + progress.complete() if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise for dir in util.dirs(seenfiles): @@ -405,18 +406,6 @@ def rebuildfncache(ui, repo): else: ui.write(_('fncache already up to date\n')) -def stripbmrevset(repo, mark): - """ - The revset to strip when strip is called with -B mark - - Needs to live here so extensions can use it and wrap it even when strip is - not enabled or not present on a box. - """ - return repo.revs("ancestors(bookmark(%s)) - " - "ancestors(head() and not bookmark(%s)) - " - "ancestors(bookmark() and not bookmark(%s))", - mark, mark, mark) - def deleteobsmarkers(obsstore, indices): """Delete some obsmarkers from obsstore and return how many were deleted diff --git a/mercurial/repository.py b/mercurial/repository.py --- a/mercurial/repository.py +++ b/mercurial/repository.py @@ -642,6 +642,308 @@ class ifilestorage(ifileindex, ifiledata TODO this is used by verify and it should not be part of the interface. """ +class idirs(interfaceutil.Interface): + """Interface representing a collection of directories from paths. + + This interface is essentially a derived data structure representing + directories from a collection of paths. + """ + + def addpath(path): + """Add a path to the collection. + + All directories in the path will be added to the collection. + """ + + def delpath(path): + """Remove a path from the collection. + + If the removal was the last path in a particular directory, the + directory is removed from the collection. + """ + + def __iter__(): + """Iterate over the directories in this collection of paths.""" + + def __contains__(path): + """Whether a specific directory is in this collection.""" + +class imanifestdict(interfaceutil.Interface): + """Interface representing a manifest data structure. + + A manifest is effectively a dict mapping paths to entries. Each entry + consists of a binary node and extra flags affecting that entry. + """ + + def __getitem__(path): + """Returns the binary node value for a path in the manifest. + + Raises ``KeyError`` if the path does not exist in the manifest. + + Equivalent to ``self.find(path)[0]``. + """ + + def find(path): + """Returns the entry for a path in the manifest. + + Returns a 2-tuple of (node, flags). + + Raises ``KeyError`` if the path does not exist in the manifest. + """ + + def __len__(): + """Return the number of entries in the manifest.""" + + def __nonzero__(): + """Returns True if the manifest has entries, False otherwise.""" + + __bool__ = __nonzero__ + + def __setitem__(path, node): + """Define the node value for a path in the manifest. + + If the path is already in the manifest, its flags will be copied to + the new entry. + """ + + def __contains__(path): + """Whether a path exists in the manifest.""" + + def __delitem__(path): + """Remove a path from the manifest. + + Raises ``KeyError`` if the path is not in the manifest. + """ + + def __iter__(): + """Iterate over paths in the manifest.""" + + def iterkeys(): + """Iterate over paths in the manifest.""" + + def keys(): + """Obtain a list of paths in the manifest.""" + + def filesnotin(other, match=None): + """Obtain the set of paths in this manifest but not in another. + + ``match`` is an optional matcher function to be applied to both + manifests. + + Returns a set of paths. + """ + + def dirs(): + """Returns an object implementing the ``idirs`` interface.""" + + def hasdir(dir): + """Returns a bool indicating if a directory is in this manifest.""" + + def matches(match): + """Generate a new manifest filtered through a matcher. + + Returns an object conforming to the ``imanifestdict`` interface. + """ + + def walk(match): + """Generator of paths in manifest satisfying a matcher. + + This is equivalent to ``self.matches(match).iterkeys()`` except a new + manifest object is not created. + + If the matcher has explicit files listed and they don't exist in + the manifest, ``match.bad()`` is called for each missing file. + """ + + def diff(other, match=None, clean=False): + """Find differences between this manifest and another. + + This manifest is compared to ``other``. + + If ``match`` is provided, the two manifests are filtered against this + matcher and only entries satisfying the matcher are compared. + + If ``clean`` is True, unchanged files are included in the returned + object. + + Returns a dict with paths as keys and values of 2-tuples of 2-tuples of + the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)`` + represents the node and flags for this manifest and ``(node2, flag2)`` + are the same for the other manifest. + """ + + def setflag(path, flag): + """Set the flag value for a given path. + + Raises ``KeyError`` if the path is not already in the manifest. + """ + + def get(path, default=None): + """Obtain the node value for a path or a default value if missing.""" + + def flags(path, default=''): + """Return the flags value for a path or a default value if missing.""" + + def copy(): + """Return a copy of this manifest.""" + + def items(): + """Returns an iterable of (path, node) for items in this manifest.""" + + def iteritems(): + """Identical to items().""" + + def iterentries(): + """Returns an iterable of (path, node, flags) for this manifest. + + Similar to ``iteritems()`` except items are a 3-tuple and include + flags. + """ + + def text(): + """Obtain the raw data representation for this manifest. + + Result is used to create a manifest revision. + """ + + def fastdelta(base, changes): + """Obtain a delta between this manifest and another given changes. + + ``base`` in the raw data representation for another manifest. + + ``changes`` is an iterable of ``(path, to_delete)``. + + Returns a 2-tuple containing ``bytearray(self.text())`` and the + delta between ``base`` and this manifest. + """ + +class imanifestrevisionbase(interfaceutil.Interface): + """Base interface representing a single revision of a manifest. + + Should not be used as a primary interface: should always be inherited + as part of a larger interface. + """ + + def new(): + """Obtain a new manifest instance. + + Returns an object conforming to the ``imanifestrevisionwritable`` + interface. The instance will be associated with the same + ``imanifestlog`` collection as this instance. + """ + + def copy(): + """Obtain a copy of this manifest instance. + + Returns an object conforming to the ``imanifestrevisionwritable`` + interface. The instance will be associated with the same + ``imanifestlog`` collection as this instance. + """ + + def read(): + """Obtain the parsed manifest data structure. + + The returned object conforms to the ``imanifestdict`` interface. + """ + +class imanifestrevisionstored(imanifestrevisionbase): + """Interface representing a manifest revision committed to storage.""" + + def node(): + """The binary node for this manifest.""" + + parents = interfaceutil.Attribute( + """List of binary nodes that are parents for this manifest revision.""" + ) + + def readdelta(shallow=False): + """Obtain the manifest data structure representing changes from parent. + + This manifest is compared to its 1st parent. A new manifest representing + those differences is constructed. + + The returned object conforms to the ``imanifestdict`` interface. + """ + + def readfast(shallow=False): + """Calls either ``read()`` or ``readdelta()``. + + The faster of the two options is called. + """ + + def find(key): + """Calls self.read().find(key)``. + + Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``. + """ + +class imanifestrevisionwritable(imanifestrevisionbase): + """Interface representing a manifest revision that can be committed.""" + + def write(transaction, linkrev, p1node, p2node, added, removed): + """Add this revision to storage. + + Takes a transaction object, the changeset revision number it will + be associated with, its parent nodes, and lists of added and + removed paths. + + Returns the binary node of the created revision. + """ + +class imanifestlog(interfaceutil.Interface): + """Interface representing a collection of manifest snapshots.""" + + def __getitem__(node): + """Obtain a manifest instance for a given binary node. + + Equivalent to calling ``self.get('', node)``. + + The returned object conforms to the ``imanifestrevisionstored`` + interface. + """ + + def get(dir, node, verify=True): + """Retrieve the manifest instance for a given directory and binary node. + + ``node`` always refers to the node of the root manifest (which will be + the only manifest if flat manifests are being used). + + If ``dir`` is the empty string, the root manifest is returned. Otherwise + the manifest for the specified directory will be returned (requires + tree manifests). + + If ``verify`` is True, ``LookupError`` is raised if the node is not + known. + + The returned object conforms to the ``imanifestrevisionstored`` + interface. + """ + + def clearcaches(): + """Clear caches associated with this collection.""" + + def rev(node): + """Obtain the revision number for a binary node. + + Raises ``error.LookupError`` if the node is not known. + """ + + def addgroup(deltas, linkmapper, transaction): + """Process a series of deltas for storage. + + ``deltas`` is an iterable of 7-tuples of + (node, p1, p2, linknode, deltabase, delta, flags) defining revisions + to add. + + The ``delta`` field contains ``mpatch`` data to apply to a base + revision, identified by ``deltabase``. The base node can be + ``nullid``, in which case the header from the delta can be ignored + and the delta used as the fulltext. + + Returns a list of nodes that were processed. A node will be in the list + even if it existed in the store previously. + """ + class completelocalrepository(interfaceutil.Interface): """Monolithic interface for local repositories. @@ -757,7 +1059,10 @@ class completelocalrepository(interfaceu """A handle on the changelog revlog.""") manifestlog = interfaceutil.Attribute( - """A handle on the root manifest revlog.""") + """An instance conforming to the ``imanifestlog`` interface. + + Provides access to manifests for the repository. + """) dirstate = interfaceutil.Attribute( """Working directory state.""") @@ -863,7 +1168,10 @@ class completelocalrepository(interfaceu """Calls self.vfs.reljoin(self.root, f, *insidef)""" def file(f): - """Obtain a filelog for a tracked path.""" + """Obtain a filelog for a tracked path. + + The returned type conforms to the ``ifilestorage`` interface. + """ def setparents(p1, p2): """Set the parent nodes of the working directory.""" diff --git a/mercurial/repoview.py b/mercurial/repoview.py --- a/mercurial/repoview.py +++ b/mercurial/repoview.py @@ -77,8 +77,7 @@ def computehidden(repo, visibilityexcept if visibilityexceptions: hidden -= visibilityexceptions pfunc = repo.changelog.parentrevs - mutablephases = (phases.draft, phases.secret) - mutable = repo._phasecache.getrevset(repo, mutablephases) + mutable = repo._phasecache.getrevset(repo, phases.mutablephases) visible = mutable - hidden _revealancestors(pfunc, hidden, visible) @@ -92,13 +91,8 @@ def computeunserved(repo, visibilityexce # fast path in simple case to avoid impact of non optimised code hiddens = filterrevs(repo, 'visible') if phases.hassecret(repo): - cl = repo.changelog - secret = phases.secret - getphase = repo._phasecache.phase - first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) - revs = cl.revs(start=first) - secrets = set(r for r in revs if getphase(repo, r) >= secret) - return frozenset(hiddens | secrets) + secrets = repo._phasecache.getrevset(repo, phases.remotehiddenphases) + return frozenset(hiddens | frozenset(secrets)) else: return hiddens diff --git a/mercurial/revlog.py b/mercurial/revlog.py --- a/mercurial/revlog.py +++ b/mercurial/revlog.py @@ -196,8 +196,66 @@ def hash(text, p1, p2): s.update(text) return s.digest() +class _testrevlog(object): + """minimalist fake revlog to use in doctests""" + + def __init__(self, data, density=0.5, mingap=0): + """data is an list of revision payload boundaries""" + self._data = data + self._srdensitythreshold = density + self._srmingapsize = mingap + + def start(self, rev): + if rev == 0: + return 0 + return self._data[rev - 1] + + def end(self, rev): + return self._data[rev] + + def length(self, rev): + return self.end(rev) - self.start(rev) + + def __len__(self): + return len(self._data) + def _trimchunk(revlog, revs, startidx, endidx=None): """returns revs[startidx:endidx] without empty trailing revs + + Doctest Setup + >>> revlog = _testrevlog([ + ... 5, #0 + ... 10, #1 + ... 12, #2 + ... 12, #3 (empty) + ... 17, #4 + ... 21, #5 + ... 21, #6 (empty) + ... ]) + + Contiguous cases: + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0) + [0, 1, 2, 3, 4, 5] + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5) + [0, 1, 2, 3, 4] + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4) + [0, 1, 2] + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4) + [2] + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3) + [3, 4, 5] + >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5) + [3, 4] + + Discontiguous cases: + >>> _trimchunk(revlog, [1, 3, 5, 6], 0) + [1, 3, 5] + >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2) + [1] + >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3) + [3, 5] + >>> _trimchunk(revlog, [1, 3, 5, 6], 1) + [3, 5] """ length = revlog.length @@ -210,11 +268,231 @@ def _trimchunk(revlog, revs, startidx, e return revs[startidx:endidx] -def _slicechunk(revlog, revs): +def _segmentspan(revlog, revs): + """Get the byte span of a segment of revisions + + revs is a sorted array of revision numbers + + >>> revlog = _testrevlog([ + ... 5, #0 + ... 10, #1 + ... 12, #2 + ... 12, #3 (empty) + ... 17, #4 + ... ]) + + >>> _segmentspan(revlog, [0, 1, 2, 3, 4]) + 17 + >>> _segmentspan(revlog, [0, 4]) + 17 + >>> _segmentspan(revlog, [3, 4]) + 5 + >>> _segmentspan(revlog, [1, 2, 3,]) + 7 + >>> _segmentspan(revlog, [1, 3]) + 7 + """ + if not revs: + return 0 + return revlog.end(revs[-1]) - revlog.start(revs[0]) + +def _slicechunk(revlog, revs, deltainfo=None, targetsize=None): """slice revs to reduce the amount of unrelated data to be read from disk. ``revs`` is sliced into groups that should be read in one time. Assume that revs are sorted. + + The initial chunk is sliced until the overall density (payload/chunks-span + ratio) is above `revlog._srdensitythreshold`. No gap smaller than + `revlog._srmingapsize` is skipped. + + If `targetsize` is set, no chunk larger than `targetsize` will be yield. + For consistency with other slicing choice, this limit won't go lower than + `revlog._srmingapsize`. + + If individual revisions chunk are larger than this limit, they will still + be raised individually. + + >>> revlog = _testrevlog([ + ... 5, #00 (5) + ... 10, #01 (5) + ... 12, #02 (2) + ... 12, #03 (empty) + ... 27, #04 (15) + ... 31, #05 (4) + ... 31, #06 (empty) + ... 42, #07 (11) + ... 47, #08 (5) + ... 47, #09 (empty) + ... 48, #10 (1) + ... 51, #11 (3) + ... 74, #12 (23) + ... 85, #13 (11) + ... 86, #14 (1) + ... 91, #15 (5) + ... ]) + + >>> list(_slicechunk(revlog, list(range(16)))) + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]] + >>> list(_slicechunk(revlog, [0, 15])) + [[0], [15]] + >>> list(_slicechunk(revlog, [0, 11, 15])) + [[0], [11], [15]] + >>> list(_slicechunk(revlog, [0, 11, 13, 15])) + [[0], [11, 13, 15]] + >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14])) + [[1, 2], [5, 8, 10, 11], [14]] + + Slicing with a maximum chunk size + >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15)) + [[0], [11], [13], [15]] + >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20)) + [[0], [11], [13, 15]] + """ + if targetsize is not None: + targetsize = max(targetsize, revlog._srmingapsize) + # targetsize should not be specified when evaluating delta candidates: + # * targetsize is used to ensure we stay within specification when reading, + # * deltainfo is used to pick are good delta chain when writing. + if not (deltainfo is None or targetsize is None): + msg = 'cannot use `targetsize` with a `deltainfo`' + raise error.ProgrammingError(msg) + for chunk in _slicechunktodensity(revlog, revs, + deltainfo, + revlog._srdensitythreshold, + revlog._srmingapsize): + for subchunk in _slicechunktosize(revlog, chunk, targetsize): + yield subchunk + +def _slicechunktosize(revlog, revs, targetsize=None): + """slice revs to match the target size + + This is intended to be used on chunk that density slicing selected by that + are still too large compared to the read garantee of revlog. This might + happens when "minimal gap size" interrupted the slicing or when chain are + built in a way that create large blocks next to each other. + + >>> revlog = _testrevlog([ + ... 3, #0 (3) + ... 5, #1 (2) + ... 6, #2 (1) + ... 8, #3 (2) + ... 8, #4 (empty) + ... 11, #5 (3) + ... 12, #6 (1) + ... 13, #7 (1) + ... 14, #8 (1) + ... ]) + + Cases where chunk is already small enough + >>> list(_slicechunktosize(revlog, [0], 3)) + [[0]] + >>> list(_slicechunktosize(revlog, [6, 7], 3)) + [[6, 7]] + >>> list(_slicechunktosize(revlog, [0], None)) + [[0]] + >>> list(_slicechunktosize(revlog, [6, 7], None)) + [[6, 7]] + + cases where we need actual slicing + >>> list(_slicechunktosize(revlog, [0, 1], 3)) + [[0], [1]] + >>> list(_slicechunktosize(revlog, [1, 3], 3)) + [[1], [3]] + >>> list(_slicechunktosize(revlog, [1, 2, 3], 3)) + [[1, 2], [3]] + >>> list(_slicechunktosize(revlog, [3, 5], 3)) + [[3], [5]] + >>> list(_slicechunktosize(revlog, [3, 4, 5], 3)) + [[3], [5]] + >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3)) + [[5], [6, 7, 8]] + >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3)) + [[0], [1, 2], [3], [5], [6, 7, 8]] + + Case with too large individual chunk (must return valid chunk) + >>> list(_slicechunktosize(revlog, [0, 1], 2)) + [[0], [1]] + >>> list(_slicechunktosize(revlog, [1, 3], 1)) + [[1], [3]] + >>> list(_slicechunktosize(revlog, [3, 4, 5], 2)) + [[3], [5]] + """ + assert targetsize is None or 0 <= targetsize + if targetsize is None or _segmentspan(revlog, revs) <= targetsize: + yield revs + return + + startrevidx = 0 + startdata = revlog.start(revs[0]) + endrevidx = 0 + iterrevs = enumerate(revs) + next(iterrevs) # skip first rev. + for idx, r in iterrevs: + span = revlog.end(r) - startdata + if span <= targetsize: + endrevidx = idx + else: + chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1) + if chunk: + yield chunk + startrevidx = idx + startdata = revlog.start(r) + endrevidx = idx + yield _trimchunk(revlog, revs, startrevidx) + +def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5, + mingapsize=0): + """slice revs to reduce the amount of unrelated data to be read from disk. + + ``revs`` is sliced into groups that should be read in one time. + Assume that revs are sorted. + + ``deltainfo`` is a _deltainfo instance of a revision that we would append + to the top of the revlog. + + The initial chunk is sliced until the overall density (payload/chunks-span + ratio) is above `targetdensity`. No gap smaller than `mingapsize` is + skipped. + + >>> revlog = _testrevlog([ + ... 5, #00 (5) + ... 10, #01 (5) + ... 12, #02 (2) + ... 12, #03 (empty) + ... 27, #04 (15) + ... 31, #05 (4) + ... 31, #06 (empty) + ... 42, #07 (11) + ... 47, #08 (5) + ... 47, #09 (empty) + ... 48, #10 (1) + ... 51, #11 (3) + ... 74, #12 (23) + ... 85, #13 (11) + ... 86, #14 (1) + ... 91, #15 (5) + ... ]) + + >>> list(_slicechunktodensity(revlog, list(range(16)))) + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]] + >>> list(_slicechunktodensity(revlog, [0, 15])) + [[0], [15]] + >>> list(_slicechunktodensity(revlog, [0, 11, 15])) + [[0], [11], [15]] + >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15])) + [[0], [11, 13, 15]] + >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14])) + [[1, 2], [5, 8, 10, 11], [14]] + >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14], + ... mingapsize=20)) + [[1, 2, 3, 5, 8, 10, 11], [14]] + >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14], + ... targetdensity=0.95)) + [[1, 2], [5], [8, 10, 11], [14]] + >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14], + ... targetdensity=0.95, mingapsize=12)) + [[1, 2], [5, 8, 10, 11], [14]] """ start = revlog.start length = revlog.length @@ -223,24 +501,46 @@ def _slicechunk(revlog, revs): yield revs return - startbyte = start(revs[0]) - endbyte = start(revs[-1]) + length(revs[-1]) - readdata = deltachainspan = endbyte - startbyte - - chainpayload = sum(length(r) for r in revs) + nextrev = len(revlog) + nextoffset = revlog.end(nextrev - 1) + + if deltainfo is None: + deltachainspan = _segmentspan(revlog, revs) + chainpayload = sum(length(r) for r in revs) + else: + deltachainspan = deltainfo.distance + chainpayload = deltainfo.compresseddeltalen + + if deltachainspan < mingapsize: + yield revs + return + + readdata = deltachainspan if deltachainspan: density = chainpayload / float(deltachainspan) else: density = 1.0 + if density >= targetdensity: + yield revs + return + + if deltainfo is not None: + revs = list(revs) + revs.append(nextrev) + # Store the gaps in a heap to have them sorted by decreasing size gapsheap = [] heapq.heapify(gapsheap) prevend = None for i, rev in enumerate(revs): - revstart = start(rev) - revlen = length(rev) + if rev < nextrev: + revstart = start(rev) + revlen = length(rev) + else: + revstart = nextoffset + revlen = deltainfo.deltalen # Skip empty revisions to form larger holes if revlen == 0: @@ -249,7 +549,7 @@ def _slicechunk(revlog, revs): if prevend is not None: gapsize = revstart - prevend # only consider holes that are large enough - if gapsize > revlog._srmingapsize: + if gapsize > mingapsize: heapq.heappush(gapsheap, (-gapsize, i)) prevend = revstart + revlen @@ -257,7 +557,7 @@ def _slicechunk(revlog, revs): # Collect the indices of the largest holes until the density is acceptable indicesheap = [] heapq.heapify(indicesheap) - while gapsheap and density < revlog._srdensitythreshold: + while gapsheap and density < targetdensity: oppgapsize, gapidx = heapq.heappop(gapsheap) heapq.heappush(indicesheap, gapidx) @@ -305,6 +605,7 @@ class _deltacomputer(object): grouped by level of easiness. """ revlog = self.revlog + gdelta = revlog._generaldelta curr = len(revlog) prev = curr - 1 p1r, p2r = revlog.rev(p1), revlog.rev(p2) @@ -316,27 +617,35 @@ class _deltacomputer(object): # changegroup data into a generaldelta repo. The only time it # isn't true is if this is the first revision in a delta chain # or if ``format.generaldelta=true`` disabled ``lazydeltabase``. - if cachedelta and revlog._generaldelta and revlog._lazydeltabase: + if cachedelta and gdelta and revlog._lazydeltabase: # Assume what we received from the server is a good choice # build delta will reuse the cache yield (cachedelta[0],) tested.add(cachedelta[0]) - if revlog._generaldelta: + if gdelta: # exclude already lazy tested base if any parents = [p for p in (p1r, p2r) if p != nullrev and p not in tested] - if parents and not revlog._aggressivemergedeltas: - # Pick whichever parent is closer to us (to minimize the - # chance of having to build a fulltext). - parents = [max(parents)] - tested.update(parents) - yield parents + + if not revlog._deltabothparents and len(parents) == 2: + parents.sort() + # To minimize the chance of having to build a fulltext, + # pick first whichever parent is closest to us (max rev) + yield (parents[1],) + # then the other one (min rev) if the first did not fit + yield (parents[0],) + tested.update(parents) + elif len(parents) > 0: + # Test all parents (1 or 2), and keep the best candidate + yield parents + tested.update(parents) if prev not in tested: # other approach failed try against prev to hopefully save us a # fulltext. yield (prev,) + tested.add(prev) def buildtext(self, revinfo, fh): """Builds a fulltext version of a revision @@ -441,7 +750,7 @@ class _deltacomputer(object): if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS: continue candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh) - if revlog._isgooddeltainfo(candidatedelta, revinfo.textlen): + if revlog._isgooddeltainfo(candidatedelta, revinfo): nominateddeltas.append(candidatedelta) if nominateddeltas: deltainfo = min(nominateddeltas, key=lambda x: x.deltalen) @@ -606,7 +915,7 @@ class revlog(object): # How much data to read and cache into the raw revlog data cache. self._chunkcachesize = 65536 self._maxchainlen = None - self._aggressivemergedeltas = False + self._deltabothparents = True self.index = [] # Mapping of partial identifiers to full nodes. self._pcache = {} @@ -616,7 +925,8 @@ class revlog(object): self._compengine = 'zlib' self._maxdeltachainspan = -1 self._withsparseread = False - self._srdensitythreshold = 0.25 + self._sparserevlog = False + self._srdensitythreshold = 0.50 self._srmingapsize = 262144 mmapindexthreshold = None @@ -635,8 +945,8 @@ class revlog(object): self._chunkcachesize = opts['chunkcachesize'] if 'maxchainlen' in opts: self._maxchainlen = opts['maxchainlen'] - if 'aggressivemergedeltas' in opts: - self._aggressivemergedeltas = opts['aggressivemergedeltas'] + if 'deltabothparents' in opts: + self._deltabothparents = opts['deltabothparents'] self._lazydeltabase = bool(opts.get('lazydeltabase', False)) if 'compengine' in opts: self._compengine = opts['compengine'] @@ -644,7 +954,10 @@ class revlog(object): self._maxdeltachainspan = opts['maxdeltachainspan'] if mmaplargeindex and 'mmapindexthreshold' in opts: mmapindexthreshold = opts['mmapindexthreshold'] - self._withsparseread = bool(opts.get('with-sparse-read', False)) + self._sparserevlog = bool(opts.get('sparse-revlog', False)) + withsparseread = bool(opts.get('with-sparse-read', False)) + # sparse-revlog forces sparse-read + self._withsparseread = self._sparserevlog or withsparseread if 'sparse-read-density-threshold' in opts: self._srdensitythreshold = opts['sparse-read-density-threshold'] if 'sparse-read-min-gap-size' in opts: @@ -868,10 +1181,11 @@ class revlog(object): return base index = self.index - base = index[rev][3] - while base != rev: - rev = base - base = index[rev][3] + iterrev = rev + base = index[iterrev][3] + while base != iterrev: + iterrev = base + base = index[iterrev][3] self._chainbasecache[rev] = base return base @@ -1365,31 +1679,46 @@ class revlog(object): c.append(self.node(r)) return c - def descendant(self, start, end): - if start == nullrev: - return True - for i in self.descendants([start]): - if i == end: - return True - elif i > end: - break - return False - def commonancestorsheads(self, a, b): """calculate all the heads of the common ancestors of nodes a and b""" a, b = self.rev(a), self.rev(b) + ancs = self._commonancestorsheads(a, b) + return pycompat.maplist(self.node, ancs) + + def _commonancestorsheads(self, *revs): + """calculate all the heads of the common ancestors of revs""" try: - ancs = self.index.commonancestorsheads(a, b) + ancs = self.index.commonancestorsheads(*revs) except (AttributeError, OverflowError): # C implementation failed - ancs = ancestor.commonancestorsheads(self.parentrevs, a, b) - return pycompat.maplist(self.node, ancs) + ancs = ancestor.commonancestorsheads(self.parentrevs, *revs) + return ancs def isancestor(self, a, b): """return True if node a is an ancestor of node b + A revision is considered an ancestor of itself.""" + a, b = self.rev(a), self.rev(b) + return self.isancestorrev(a, b) + + def descendant(self, a, b): + msg = (b'revlog.descendant is deprecated, use revlog.isancestorrev') + self._repo.ui.deprecwarn(msg, b'4.7') + return self.isancestorrev(a, b) + + def isancestorrev(self, a, b): + """return True if revision a is an ancestor of revision b + + A revision is considered an ancestor of itself. + The implementation of this is trivial but the use of commonancestorsheads is not.""" - return a in self.commonancestorsheads(a, b) + if a == nullrev: + return True + elif a == b: + return True + elif a > b: + return False + return a in self._commonancestorsheads(a, b) def ancestor(self, a, b): """calculate the "best" common ancestor of nodes a and b""" @@ -1502,42 +1831,51 @@ class revlog(object): def shortest(self, node, minlength=1): """Find the shortest unambiguous prefix that matches node.""" - def isvalid(test): + def isvalid(prefix): try: - if self._partialmatch(test) is None: - return False - - try: - i = int(test) - # if we are a pure int, then starting with zero will not be - # confused as a rev; or, obviously, if the int is larger - # than the value of the tip rev - if test[0] == '0' or i > len(self): - return True - return False - except ValueError: - return True + node = self._partialmatch(prefix) except error.RevlogError: return False except error.WdirUnsupported: # single 'ff...' match return True + if node is None: + raise LookupError(node, self.indexfile, _('no node')) + return True + + def maybewdir(prefix): + return all(c == 'f' for c in prefix) hexnode = hex(node) - shortest = hexnode - startlength = max(6, minlength) - length = startlength - while True: - test = hexnode[:length] - if isvalid(test): - shortest = test - if length == minlength or length > startlength: - return shortest - length -= 1 - else: - length += 1 - if len(shortest) <= length: - return shortest + + def disambiguate(hexnode, minlength): + """Disambiguate against wdirid.""" + for length in range(minlength, 41): + prefix = hexnode[:length] + if not maybewdir(prefix): + return prefix + + if not getattr(self, 'filteredrevs', None): + try: + length = max(self.index.shortest(node), minlength) + return disambiguate(hexnode, length) + except RevlogError: + if node != wdirid: + raise LookupError(node, self.indexfile, _('no node')) + except AttributeError: + # Fall through to pure code + pass + + if node == wdirid: + for length in range(minlength, 41): + prefix = hexnode[:length] + if isvalid(prefix): + return prefix + + for length in range(minlength, 41): + prefix = hexnode[:length] + if isvalid(prefix): + return disambiguate(hexnode, length) def cmp(self, node, text): """compare text with a given file revision @@ -1654,7 +1992,7 @@ class revlog(object): """ return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1]) - def _chunks(self, revs, df=None): + def _chunks(self, revs, df=None, targetsize=None): """Obtain decompressed chunks for the specified revisions. Accepts an iterable of numeric revisions that are assumed to be in @@ -1681,7 +2019,7 @@ class revlog(object): if not self._withsparseread: slicedchunks = (revs,) else: - slicedchunks = _slicechunk(self, revs) + slicedchunks = _slicechunk(self, revs, targetsize=targetsize) for revschunk in slicedchunks: firstrev = revschunk[0] @@ -1784,7 +2122,12 @@ class revlog(object): # drop cache to save memory self._cache = None - bins = self._chunks(chain, df=_df) + targetsize = None + rawsize = self.index[rev][2] + if 0 <= rawsize: + targetsize = 4 * rawsize + + bins = self._chunks(chain, df=_df, targetsize=targetsize) if rawtext is None: rawtext = bytes(bins[0]) bins = bins[1:] @@ -2076,26 +2419,49 @@ class revlog(object): return compressor.decompress(data) - def _isgooddeltainfo(self, d, textlen): + def _isgooddeltainfo(self, deltainfo, revinfo): """Returns True if the given delta is good. Good means that it is within the disk span, disk size, and chain length bounds that we know to be performant.""" - if d is None: + if deltainfo is None: return False - # - 'd.distance' is the distance from the base revision -- bounding it - # limits the amount of I/O we need to do. - # - 'd.compresseddeltalen' is the sum of the total size of deltas we - # need to apply -- bounding it limits the amount of CPU we consume. - + # - 'deltainfo.distance' is the distance from the base revision -- + # bounding it limits the amount of I/O we need to do. + # - 'deltainfo.compresseddeltalen' is the sum of the total size of + # deltas we need to apply -- bounding it limits the amount of CPU + # we consume. + + if self._sparserevlog: + # As sparse-read will be used, we can consider that the distance, + # instead of being the span of the whole chunk, + # is the span of the largest read chunk + base = deltainfo.base + + if base != nullrev: + deltachain = self._deltachain(base)[0] + else: + deltachain = [] + + chunks = _slicechunk(self, deltachain, deltainfo) + distance = max(map(lambda revs:_segmentspan(self, revs), chunks)) + else: + distance = deltainfo.distance + + textlen = revinfo.textlen defaultmax = textlen * 4 maxdist = self._maxdeltachainspan if not maxdist: - maxdist = d.distance # ensure the conditional pass + maxdist = distance # ensure the conditional pass maxdist = max(maxdist, defaultmax) - if (d.distance > maxdist or d.deltalen > textlen or - d.compresseddeltalen > textlen * 2 or - (self._maxchainlen and d.chainlen > self._maxchainlen)): + if self._sparserevlog and maxdist < self._srmingapsize: + # In multiple place, we are ignoring irrelevant data range below a + # certain size. Be also apply this tradeoff here and relax span + # constraint for small enought content. + maxdist = self._srmingapsize + if (distance > maxdist or deltainfo.deltalen > textlen or + deltainfo.compresseddeltalen > textlen * 2 or + (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)): return False return True @@ -2477,7 +2843,7 @@ class revlog(object): DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'} def clone(self, tr, destrevlog, addrevisioncb=None, - deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None): + deltareuse=DELTAREUSESAMEREVS, deltabothparents=None): """Copy this revlog to another, possibly with format changes. The destination revlog will contain the same revisions and nodes. @@ -2511,7 +2877,7 @@ class revlog(object): deltas will be recomputed if the delta's parent isn't a parent of the revision. - In addition to the delta policy, the ``aggressivemergedeltas`` argument + In addition to the delta policy, the ``deltabothparents`` argument controls whether to compute deltas against both parents for merges. By default, the current default is used. """ @@ -2528,7 +2894,7 @@ class revlog(object): # lazydeltabase controls whether to reuse a cached delta, if possible. oldlazydeltabase = destrevlog._lazydeltabase - oldamd = destrevlog._aggressivemergedeltas + oldamd = destrevlog._deltabothparents try: if deltareuse == self.DELTAREUSEALWAYS: @@ -2536,7 +2902,7 @@ class revlog(object): elif deltareuse == self.DELTAREUSESAMEREVS: destrevlog._lazydeltabase = False - destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd + destrevlog._deltabothparents = deltabothparents or oldamd populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS, self.DELTAREUSESAMEREVS) @@ -2591,4 +2957,4 @@ class revlog(object): addrevisioncb(self, rev, node) finally: destrevlog._lazydeltabase = oldlazydeltabase - destrevlog._aggressivemergedeltas = oldamd + destrevlog._deltabothparents = oldamd diff --git a/mercurial/revset.py b/mercurial/revset.py --- a/mercurial/revset.py +++ b/mercurial/revset.py @@ -13,6 +13,7 @@ from .i18n import _ from . import ( dagop, destutil, + diffutil, encoding, error, hbisect, @@ -111,7 +112,7 @@ def _getrevsource(repo, r): return None def _sortedb(xs): - return sorted(util.rapply(pycompat.maybebytestr, xs)) + return sorted(pycompat.rapply(pycompat.maybebytestr, xs)) # operator methods @@ -203,6 +204,8 @@ def _orsetlist(repo, subset, xs, order): def orset(repo, subset, x, order): xs = getlist(x) + if not xs: + return baseset() if order == followorder: # slow path to take the subset order return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder) @@ -309,21 +312,17 @@ def ancestor(repo, subset, x): Will return empty list when passed no args. Greatest common ancestor of a single changeset is that changeset. """ - # i18n: "ancestor" is a keyword - l = getlist(x) - rl = fullreposet(repo) - anc = None + reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder)) + try: + anc = repo[next(reviter)] + except StopIteration: + return baseset() + for r in reviter: + anc = anc.ancestor(repo[r]) - # (getset(repo, rl, i) for i in l) generates a list of lists - for revs in (getset(repo, rl, i) for i in l): - for r in revs: - if anc is None: - anc = repo[r] - else: - anc = anc.ancestor(repo[r]) - - if anc is not None and anc.rev() in subset: - return baseset([anc.rev()]) + r = scmutil.intrev(anc) + if r in subset: + return baseset([r]) return baseset() def _ancestors(repo, subset, x, followfirst=False, startdepth=None, @@ -609,6 +608,38 @@ def closed(repo, subset, x): return subset.filter(lambda r: repo[r].closesbranch(), condrepr='') +# for internal use +@predicate('_commonancestorheads(set)', safe=True) +def _commonancestorheads(repo, subset, x): + # This is an internal method is for quickly calculating "heads(::x and + # ::y)" + + # These greatest common ancestors are the same ones that the consesus bid + # merge will find. + h = heads(repo, fullreposet(repo), x, anyorder) + + ancs = repo.changelog._commonancestorsheads(*list(h)) + return subset & baseset(ancs) + +@predicate('commonancestors(set)', safe=True) +def commonancestors(repo, subset, x): + """Returns all common ancestors of the set. + + This method is for calculating "::x and ::y" (i.e. all the ancestors that + are common to both x and y) in an easy and optimized way. We can't quite + use "::head()" because that revset returns "::x + ::y + ..." for each head + in the repo (whereas we want "::x *and* ::y"). + + """ + # only wants the heads of the set passed in + h = heads(repo, fullreposet(repo), x, anyorder) + if not h: + return baseset() + for r in h: + subset &= dagop.revancestors(repo, baseset([r])) + + return subset + @predicate('contains(pattern)', weight=100) def contains(repo, subset, x): """The revision's manifest contains a file matching pattern (but might not @@ -1129,11 +1160,14 @@ def head(repo, subset, x): hs.update(cl.rev(h) for h in ls) return subset & baseset(hs) -@predicate('heads(set)', safe=True) -def heads(repo, subset, x): +@predicate('heads(set)', safe=True, takeorder=True) +def heads(repo, subset, x, order): """Members of set with no children in set. """ - s = getset(repo, subset, x) + # argument set should never define order + if order == defineorder: + order = followorder + s = getset(repo, subset, x, order=order) ps = parents(repo, subset, x) return s - ps @@ -1333,9 +1367,11 @@ def node_(repo, subset, x): else: rn = None try: - pm = repo.changelog._partialmatch(n) + pm = scmutil.resolvehexnodeidprefix(repo, n) if pm is not None: rn = repo.changelog.rev(pm) + except LookupError: + pass except error.WdirUnsupported: rn = node.wdirrev @@ -1344,6 +1380,14 @@ def node_(repo, subset, x): result = baseset([rn]) return result & subset +@predicate('none()', safe=True) +def none(repo, subset, x): + """No changesets. + """ + # i18n: "none" is a keyword + getargs(x, 0, 0, _("none takes no arguments")) + return baseset() + @predicate('obsolete()', safe=True) def obsolete(repo, subset, x): """Mutable changeset with a newer version.""" @@ -1792,7 +1836,8 @@ def matching(repo, subset, x): 'phase': lambda r: repo[r].phase(), 'substate': lambda r: repo[r].substate, 'summary': lambda r: repo[r].description().splitlines()[0], - 'diff': lambda r: list(repo[r].diff(git=True),) + 'diff': lambda r: list(repo[r].diff( + opts=diffutil.diffallopts(repo.ui, {'git': True}))), } for info in fields: getfield = _funcs.get(info, None) diff --git a/mercurial/revsetlang.py b/mercurial/revsetlang.py --- a/mercurial/revsetlang.py +++ b/mercurial/revsetlang.py @@ -459,6 +459,12 @@ def _optimize(x): f = getsymbol(x[1]) wa, ta = _optimize(x[2]) w = getattr(symbols.get(f), '_weight', 1) + m = _match('commonancestors(_)', ta) + + # Optimize heads(commonancestors(_)) because we have a fast version + if f == 'heads' and m: + return w + wa, _build('_commonancestorheads(_)', m[1]) + return w + wa, (op, x[1], ta) raise ValueError('invalid operator %r' % op) diff --git a/mercurial/scmutil.py b/mercurial/scmutil.py --- a/mercurial/scmutil.py +++ b/mercurial/scmutil.py @@ -104,8 +104,9 @@ class status(tuple): return self[6] def __repr__(self, *args, **kwargs): - return (('') % self) + return ((r'') % + tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)) def itersubrepos(ctx1, ctx2): """find subrepos in ctx1 or ctx2""" @@ -200,7 +201,7 @@ def callcatch(ui, func): elif not msg: ui.warn(_(" empty string\n")) else: - ui.warn("\n%r\n" % stringutil.ellipsis(msg)) + ui.warn("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) except error.CensoredNodeError as inst: ui.warn(_("abort: file censored %s!\n") % inst) except error.RevlogError as inst: @@ -232,7 +233,7 @@ def callcatch(ui, func): except (AttributeError, IndexError): # it might be anything, for example a string reason = inst.reason - if isinstance(reason, unicode): + if isinstance(reason, pycompat.unicode): # SSLError of Python 2.7.9 contains a unicode reason = encoding.unitolocal(reason) ui.warn(_("abort: error: %s\n") % reason) @@ -286,7 +287,8 @@ def checknewlabel(repo, lbl, kind): def checkfilename(f): '''Check that the filename f is an acceptable filename for a tracked file''' if '\r' in f or '\n' in f: - raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f) + raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") + % pycompat.bytestr(f)) def checkportable(ui, f): '''Check if filename f is portable and warn or abort depending on config''' @@ -448,7 +450,32 @@ def shortesthexnodeidprefix(repo, node, # _partialmatch() of filtered changelog could take O(len(repo)) time, # which would be unacceptably slow. so we look for hash collision in # unfiltered space, which means some hashes may be slightly longer. - return repo.unfiltered().changelog.shortest(node, minlength) + cl = repo.unfiltered().changelog + + def isrev(prefix): + try: + i = int(prefix) + # if we are a pure int, then starting with zero will not be + # confused as a rev; or, obviously, if the int is larger + # than the value of the tip rev + if prefix[0:1] == b'0' or i > len(cl): + return False + return True + except ValueError: + return False + + def disambiguate(prefix): + """Disambiguate against revnums.""" + hexnode = hex(node) + for length in range(len(prefix), len(hexnode) + 1): + prefix = hexnode[:length] + if not isrev(prefix): + return prefix + + try: + return disambiguate(cl.shortest(node, minlength)) + except error.LookupError: + raise error.RepoLookupError() def isrevsymbol(repo, symbol): """Checks if a symbol exists in the repo. @@ -561,11 +588,6 @@ def _pairspec(revspec): tree = revsetlang.parse(revspec) return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') -def revpairnodes(repo, revs): - repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6") - ctx1, ctx2 = revpair(repo, revs) - return ctx1.node(), ctx2.node() - def revpair(repo, revs): if not revs: return repo['.'], repo[None] @@ -757,7 +779,8 @@ class _containsnode(object): def __contains__(self, node): return self._revcontains(self._torev(node)) -def cleanupnodes(repo, replacements, operation, moves=None, metadata=None): +def cleanupnodes(repo, replacements, operation, moves=None, metadata=None, + fixphase=False, targetphase=None): """do common cleanups when old nodes are replaced by new nodes That includes writing obsmarkers or stripping nodes, and moving bookmarks. @@ -773,6 +796,7 @@ def cleanupnodes(repo, replacements, ope metadata is dictionary containing metadata to be stored in obsmarker if obsolescence is enabled. """ + assert fixphase or targetphase is None if not replacements and not moves: return @@ -803,18 +827,45 @@ def cleanupnodes(repo, replacements, ope newnode = newnodes[0] moves[oldnode] = newnode + allnewnodes = [n for ns in replacements.values() for n in ns] + toretract = {} + toadvance = {} + if fixphase: + precursors = {} + for oldnode, newnodes in replacements.items(): + for newnode in newnodes: + precursors.setdefault(newnode, []).append(oldnode) + + allnewnodes.sort(key=lambda n: unfi[n].rev()) + newphases = {} + def phase(ctx): + return newphases.get(ctx.node(), ctx.phase()) + for newnode in allnewnodes: + ctx = unfi[newnode] + parentphase = max(phase(p) for p in ctx.parents()) + if targetphase is None: + oldphase = max(unfi[oldnode].phase() + for oldnode in precursors[newnode]) + newphase = max(oldphase, parentphase) + else: + newphase = max(targetphase, parentphase) + newphases[newnode] = newphase + if newphase > ctx.phase(): + toretract.setdefault(newphase, []).append(newnode) + elif newphase < ctx.phase(): + toadvance.setdefault(newphase, []).append(newnode) + with repo.transaction('cleanup') as tr: # Move bookmarks bmarks = repo._bookmarks bmarkchanges = [] - allnewnodes = [n for ns in replacements.values() for n in ns] for oldnode, newnode in moves.items(): oldbmarks = repo.nodebookmarks(oldnode) if not oldbmarks: continue from . import bookmarks # avoid import cycle repo.ui.debug('moving bookmarks %r from %s to %s\n' % - (util.rapply(pycompat.maybebytestr, oldbmarks), + (pycompat.rapply(pycompat.maybebytestr, oldbmarks), hex(oldnode), hex(newnode))) # Delete divergent bookmarks being parents of related newnodes deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)', @@ -828,6 +879,11 @@ def cleanupnodes(repo, replacements, ope if bmarkchanges: bmarks.applychanges(repo, tr, bmarkchanges) + for phase, nodes in toretract.items(): + phases.retractboundary(repo, tr, phase, nodes) + for phase, nodes in toadvance.items(): + phases.advanceboundary(repo, tr, phase, nodes) + # Obsolete or strip nodes if obsolete.isenabled(repo, obsolete.createmarkersopt): # If a node is already obsoleted, and we want to obsolete it @@ -1110,21 +1166,32 @@ class filecacheentry(object): entry.refresh() class filecache(object): - '''A property like decorator that tracks files under .hg/ for updates. + """A property like decorator that tracks files under .hg/ for updates. - Records stat info when called in _filecache. + On first access, the files defined as arguments are stat()ed and the + results cached. The decorated function is called. The results are stashed + away in a ``_filecache`` dict on the object whose method is decorated. - On subsequent calls, compares old stat info with new info, and recreates the - object when any of the files changes, updating the new stat info in - _filecache. + On subsequent access, the cached result is returned. + + On external property set operations, stat() calls are performed and the new + value is cached. + + On property delete operations, cached data is removed. - Mercurial either atomic renames or appends for files under .hg, - so to ensure the cache is reliable we need the filesystem to be able - to tell us if a file has been replaced. If it can't, we fallback to - recreating the object on every call (essentially the same behavior as - propertycache). + When using the property API, cached data is always returned, if available: + no stat() is performed to check if the file has changed and if the function + needs to be called to reflect file changes. - ''' + Others can muck about with the state of the ``_filecache`` dict. e.g. they + can populate an entry before the property's getter is called. In this case, + entries in ``_filecache`` will be used during property operations, + if available. If the underlying file changes, it is up to external callers + to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached + method result as well as possibly calling ``del obj._filecache[attr]`` to + remove the ``filecacheentry``. + """ + def __init__(self, *paths): self.paths = paths @@ -1139,7 +1206,8 @@ class filecache(object): def __call__(self, func): self.func = func - self.name = func.__name__.encode('ascii') + self.sname = func.__name__ + self.name = pycompat.sysbytes(self.sname) return self def __get__(self, obj, type=None): @@ -1147,9 +1215,9 @@ class filecache(object): if obj is None: return self # do we need to check if the file changed? - if self.name in obj.__dict__: + if self.sname in obj.__dict__: assert self.name in obj._filecache, self.name - return obj.__dict__[self.name] + return obj.__dict__[self.sname] entry = obj._filecache.get(self.name) @@ -1166,7 +1234,7 @@ class filecache(object): obj._filecache[self.name] = entry - obj.__dict__[self.name] = entry.obj + obj.__dict__[self.sname] = entry.obj return entry.obj def __set__(self, obj, value): @@ -1180,13 +1248,13 @@ class filecache(object): ce = obj._filecache[self.name] ce.obj = value # update cached copy - obj.__dict__[self.name] = value # update copy returned by obj.x + obj.__dict__[self.sname] = value # update copy returned by obj.x def __delete__(self, obj): try: - del obj.__dict__[self.name] + del obj.__dict__[self.sname] except KeyError: - raise AttributeError(self.name) + raise AttributeError(self.sname) def extdatasource(repo, source): """Gather a map of rev -> value dict from the specified source @@ -1262,6 +1330,37 @@ def wlocksub(repo, cmd, *args, **kwargs) return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args, **kwargs) +class progress(object): + def __init__(self, ui, topic, unit="", total=None): + self.ui = ui + self.pos = 0 + self.topic = topic + self.unit = unit + self.total = total + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.complete() + + def update(self, pos, item="", total=None): + assert pos is not None + if total: + self.total = total + self.pos = pos + self._print(item) + + def increment(self, step=1, item="", total=None): + self.update(self.pos + step, item, total) + + def complete(self): + self.ui.progress(self.topic, None) + + def _print(self, item): + self.ui.progress(self.topic, self.pos, item, self.unit, + self.total) + def gdinitconfig(ui): """helper function to know if a repo should be created as general delta """ @@ -1434,9 +1533,9 @@ def registersummarycallback(repo, otr, t for instability, revset in instabilitytypes: delta = (newinstabilitycounts[instability] - oldinstabilitycounts[instability]) - if delta > 0: - repo.ui.warn(_('%i new %s changesets\n') % - (delta, instability)) + msg = getinstabilitymessage(delta, instability) + if msg: + repo.ui.warn(msg) if txmatch(_reportnewcssource): @reportsummary @@ -1460,6 +1559,32 @@ def registersummarycallback(repo, otr, t revrange = '%s:%s' % (minrev, maxrev) repo.ui.status(_('new changesets %s\n') % revrange) + @reportsummary + def reportphasechanges(repo, tr): + """Report statistics of phase changes for changesets pre-existing + pull/unbundle. + """ + newrevs = tr.changes.get('revs', xrange(0, 0)) + phasetracking = tr.changes.get('phases', {}) + if not phasetracking: + return + published = [ + rev for rev, (old, new) in phasetracking.iteritems() + if new == phases.public and rev not in newrevs + ] + if not published: + return + repo.ui.status(_('%d local changesets published\n') + % len(published)) + +def getinstabilitymessage(delta, instability): + """function to return the message to show warning about new instabilities + + exists as a separate function so that extension can wrap to show more + information like how to fix instabilities""" + if delta > 0: + return _('%i new %s changesets\n') % (delta, instability) + def nodesummaries(repo, nodes, maxnumnodes=4): if len(nodes) <= maxnumnodes or repo.ui.verbose: return ' '.join(short(h) for h in nodes) @@ -1538,7 +1663,6 @@ def _getrevsfromsymbols(repo, symbols): unficl = unfi.changelog cl = repo.changelog tiprev = len(unficl) - pmatch = unficl._partialmatch allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') for s in symbols: try: @@ -1554,7 +1678,7 @@ def _getrevsfromsymbols(repo, symbols): pass try: - s = pmatch(s) + s = resolvehexnodeidprefix(unfi, s) except (error.LookupError, error.WdirUnsupported): s = None @@ -1564,3 +1688,12 @@ def _getrevsfromsymbols(repo, symbols): revs.add(rev) return revs + +def bookmarkrevs(repo, mark): + """ + Select revisions reachable by a given bookmark + """ + return repo.revs("ancestors(bookmark(%s)) - " + "ancestors(head() and not bookmark(%s)) - " + "ancestors(bookmark() and not bookmark(%s))", + mark, mark, mark) diff --git a/mercurial/server.py b/mercurial/server.py --- a/mercurial/server.py +++ b/mercurial/server.py @@ -8,7 +8,6 @@ from __future__ import absolute_import import os -import tempfile from .i18n import _ @@ -72,7 +71,7 @@ def runservice(opts, parentfn=None, init if opts['daemon'] and not opts['daemon_postexec']: # Signal child process startup with file removal - lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-') + lockfd, lockpath = pycompat.mkstemp(prefix='hg-service-') os.close(lockfd) try: if not runargs: diff --git a/mercurial/setdiscovery.py b/mercurial/setdiscovery.py --- a/mercurial/setdiscovery.py +++ b/mercurial/setdiscovery.py @@ -197,6 +197,7 @@ def findcommonheads(ui, local, remote, missing = set() full = False + progress = ui.makeprogress(_('searching'), unit=_('queries')) while undecided: if sample: @@ -226,7 +227,7 @@ def findcommonheads(ui, local, remote, sample = samplefunc(dag, undecided, targetsize) roundtrips += 1 - ui.progress(_('searching'), roundtrips, unit=_('queries')) + progress.update(roundtrips) ui.debug("query %i; still undecided: %i, sample size is: %i\n" % (roundtrips, len(undecided), len(sample))) # indices between sample and externalized version must match @@ -251,7 +252,7 @@ def findcommonheads(ui, local, remote, # return any heads in that case, so discard that result.discard(nullrev) elapsed = util.timer() - start - ui.progress(_('searching'), None) + progress.complete() ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) msg = ('found %d common and %d unknown server heads,' ' %d roundtrips in %.4fs\n') diff --git a/mercurial/similar.py b/mercurial/similar.py --- a/mercurial/similar.py +++ b/mercurial/similar.py @@ -18,14 +18,14 @@ def _findexactmatches(repo, added, remov Takes a list of new filectxs and a list of removed filectxs, and yields (before, after) tuples of exact matches. ''' - numfiles = len(added) + len(removed) - # Build table of removed files: {hash(fctx.data()): [fctx, ...]}. # We use hash() to discard fctx.data() from memory. hashes = {} - for i, fctx in enumerate(removed): - repo.ui.progress(_('searching for exact renames'), i, total=numfiles, - unit=_('files')) + progress = repo.ui.makeprogress(_('searching for exact renames'), + total=(len(added) + len(removed)), + unit=_('files')) + for fctx in removed: + progress.increment() h = hash(fctx.data()) if h not in hashes: hashes[h] = [fctx] @@ -33,9 +33,8 @@ def _findexactmatches(repo, added, remov hashes[h].append(fctx) # For each added file, see if it corresponds to a removed file. - for i, fctx in enumerate(added): - repo.ui.progress(_('searching for exact renames'), i + len(removed), - total=numfiles, unit=_('files')) + for fctx in added: + progress.increment() adata = fctx.data() h = hash(adata) for rfctx in hashes.get(h, []): @@ -45,7 +44,7 @@ def _findexactmatches(repo, added, remov break # Done - repo.ui.progress(_('searching for exact renames'), None) + progress.complete() def _ctxdata(fctx): # lazily load text @@ -76,10 +75,10 @@ def _findsimilarmatches(repo, added, rem (before, after, score) tuples of partial matches. ''' copies = {} - for i, r in enumerate(removed): - repo.ui.progress(_('searching for similar files'), i, - total=len(removed), unit=_('files')) - + progress = repo.ui.makeprogress(_('searching for similar files'), + unit=_('files'), total=len(removed)) + for r in removed: + progress.increment() data = None for a in added: bestscore = copies.get(a, (None, threshold))[1] @@ -88,7 +87,7 @@ def _findsimilarmatches(repo, added, rem myscore = _score(a, data) if myscore > bestscore: copies[a] = (r, myscore) - repo.ui.progress(_('searching'), None) + progress.complete() for dest, v in copies.iteritems(): source, bscore = v diff --git a/mercurial/smartset.py b/mercurial/smartset.py --- a/mercurial/smartset.py +++ b/mercurial/smartset.py @@ -13,29 +13,9 @@ from . import ( pycompat, util, ) - -def _formatsetrepr(r): - """Format an optional printable representation of a set - - ======== ================================= - type(r) example - ======== ================================= - tuple ('', other) - bytes '' - callable lambda: '' % sorted(b) - object other - ======== ================================= - """ - if r is None: - return '' - elif isinstance(r, tuple): - return r[0] % util.rapply(pycompat.maybebytestr, r[1:]) - elif isinstance(r, bytes): - return r - elif callable(r): - return r() - else: - return pycompat.byterepr(r) +from .utils import ( + stringutil, +) def _typename(o): return pycompat.sysbytes(type(o).__name__).lstrip('_') @@ -392,7 +372,7 @@ class baseset(abstractsmartset): @encoding.strmethod def __repr__(self): d = {None: '', False: '-', True: '+'}[self._ascending] - s = _formatsetrepr(self._datarepr) + s = stringutil.buildrepr(self._datarepr) if not s: l = self._list # if _list has been built from a set, it might have a different @@ -514,7 +494,7 @@ class filteredset(abstractsmartset): @encoding.strmethod def __repr__(self): xs = [pycompat.byterepr(self._subset)] - s = _formatsetrepr(self._condrepr) + s = stringutil.buildrepr(self._condrepr) if s: xs.append(s) return '<%s %s>' % (_typename(self), ', '.join(xs)) @@ -1129,17 +1109,3 @@ class fullreposet(_spanset): other.sort(reverse=self.isdescending()) return other - -def prettyformat(revs): - lines = [] - rs = pycompat.byterepr(revs) - p = 0 - while p < len(rs): - q = rs.find('<', p + 1) - if q < 0: - q = len(rs) - l = rs.count('<', 0, p) - rs.count('>', 0, p) - assert l >= 0 - lines.append((l, rs[p:q].rstrip())) - p = q - return '\n'.join(' ' * l + s for l, s in lines) diff --git a/mercurial/sshpeer.py b/mercurial/sshpeer.py --- a/mercurial/sshpeer.py +++ b/mercurial/sshpeer.py @@ -22,6 +22,7 @@ from . import ( ) from .utils import ( procutil, + stringutil, ) def _serverquote(s): @@ -98,6 +99,17 @@ class doublepipe(object): _forwardoutput(self._ui, self._side) return r + def unbufferedread(self, size): + r = self._call('unbufferedread', size) + if size != 0 and not r: + # We've observed a condition that indicates the + # stdout closed unexpectedly. Check stderr one + # more time and snag anything that's there before + # letting anyone know the main part of the pipe + # closed prematurely. + _forwardoutput(self._ui, self._side) + return r + def readline(self): return self._call('readline') @@ -273,7 +285,7 @@ def _performhandshake(ui, stdin, stdout, # Assume version 1 of wire protocol by default. protoname = wireprototypes.SSHV1 - reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token)) + reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token)) lines = ['', 'dummy'] max_noise = 500 diff --git a/mercurial/sslutil.py b/mercurial/sslutil.py --- a/mercurial/sslutil.py +++ b/mercurial/sslutil.py @@ -618,14 +618,14 @@ def _dnsnamematch(dn, hostname, maxwildc # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) + pats.append(stringutil.reescape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(br'\*', '[^.]*')) + pats.append(stringutil.reescape(leftmost).replace(br'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: - pats.append(re.escape(frag)) + pats.append(stringutil.reescape(frag)) pat = re.compile(br'\A' + br'\.'.join(pats) + br'\Z', re.IGNORECASE) return pat.match(hostname) is not None @@ -640,9 +640,9 @@ def _verifycert(cert, hostname): return _('no certificate received') dnsnames = [] - san = cert.get('subjectAltName', []) + san = cert.get(r'subjectAltName', []) for key, value in san: - if key == 'DNS': + if key == r'DNS': try: if _dnsnamematch(value, hostname): return @@ -672,6 +672,7 @@ def _verifycert(cert, hostname): dnsnames.append(value) + dnsnames = [pycompat.bytesurl(d) for d in dnsnames] if len(dnsnames) > 1: return _('certificate is for %s') % ', '.join(dnsnames) elif len(dnsnames) == 1: diff --git a/mercurial/state.py b/mercurial/state.py new file mode 100644 --- /dev/null +++ b/mercurial/state.py @@ -0,0 +1,84 @@ +# state.py - writing and reading state files in Mercurial +# +# Copyright 2018 Pulkit Goyal +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +""" +This file contains class to wrap the state for commands and other +related logic. + +All the data related to the command state is stored as dictionary in the object. +The class has methods using which the data can be stored to disk in a file under +.hg/ directory. + +We store the data on disk in cbor, for which we use the third party cbor library +to serialize and deserialize data. +""" + +from __future__ import absolute_import + +from .thirdparty import cbor + +from . import ( + error, + util, +) + +class cmdstate(object): + """a wrapper class to store the state of commands like `rebase`, `graft`, + `histedit`, `shelve` etc. Extensions can also use this to write state files. + + All the data for the state is stored in the form of key-value pairs in a + dictionary. + + The class object can write all the data to a file in .hg/ directory and + can populate the object data reading that file. + + Uses cbor to serialize and deserialize data while writing and reading from + disk. + """ + + def __init__(self, repo, fname): + """ repo is the repo object + fname is the file name in which data should be stored in .hg directory + """ + self._repo = repo + self.fname = fname + + def read(self): + """read the existing state file and return a dict of data stored""" + return self._read() + + def save(self, version, data): + """write all the state data stored to .hg/ file + + we use third-party library cbor to serialize data to write in the file. + """ + if not isinstance(version, int): + raise error.ProgrammingError("version of state file should be" + " an integer") + + with self._repo.vfs(self.fname, 'wb', atomictemp=True) as fp: + fp.write('%d\n' % version) + cbor.dump(data, fp, canonical=True) + + def _read(self): + """reads the state file and returns a dictionary which contain + data in the same format as it was before storing""" + with self._repo.vfs(self.fname, 'rb') as fp: + try: + int(fp.readline()) + except ValueError: + raise error.CorruptedState("unknown version of state file" + " found") + return cbor.load(fp) + + def delete(self): + """drop the state file if exists""" + util.unlinkpath(self._repo.vfs.join(self.fname), ignoremissing=True) + + def exists(self): + """check whether the state file exists or not""" + return self._repo.vfs.exists(self.fname) diff --git a/mercurial/statprof.py b/mercurial/statprof.py --- a/mercurial/statprof.py +++ b/mercurial/statprof.py @@ -112,7 +112,6 @@ import json import os import signal import sys -import tempfile import threading import time @@ -140,7 +139,7 @@ skips = {"util.py:check", "extensions.py def clock(): times = os.times() - return times[0] + times[1] + return (times[0] + times[1], times[4]) ########################################################################### @@ -149,10 +148,11 @@ def clock(): class ProfileState(object): def __init__(self, frequency=None): self.reset(frequency) + self.track = 'cpu' def reset(self, frequency=None): # total so far - self.accumulated_time = 0.0 + self.accumulated_time = (0.0, 0.0) # start_time when timer is active self.last_start_time = None # a float @@ -171,10 +171,23 @@ class ProfileState(object): self.samples = [] def accumulate_time(self, stop_time): - self.accumulated_time += stop_time - self.last_start_time + increment = ( + stop_time[0] - self.last_start_time[0], + stop_time[1] - self.last_start_time[1], + ) + self.accumulated_time = ( + self.accumulated_time[0] + increment[0], + self.accumulated_time[1] + increment[1], + ) def seconds_per_sample(self): - return self.accumulated_time / len(self.samples) + return self.accumulated_time[self.timeidx] / len(self.samples) + + @property + def timeidx(self): + if self.track == 'real': + return 1 + return 0 state = ProfileState() @@ -262,7 +275,8 @@ def profile_signal_handler(signum, frame now = clock() state.accumulate_time(now) - state.samples.append(Sample.from_frame(frame, state.accumulated_time)) + timestamp = state.accumulated_time[state.timeidx] + state.samples.append(Sample.from_frame(frame, timestamp)) signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0) @@ -275,7 +289,9 @@ def samplerthread(tid): state.accumulate_time(now) frame = sys._current_frames()[tid] - state.samples.append(Sample.from_frame(frame, state.accumulated_time)) + + timestamp = state.accumulated_time[state.timeidx] + state.samples.append(Sample.from_frame(frame, timestamp)) state.last_start_time = now time.sleep(state.sample_interval) @@ -289,8 +305,9 @@ def is_active(): return state.profile_level > 0 lastmechanism = None -def start(mechanism='thread'): +def start(mechanism='thread', track='cpu'): '''Install the profiling signal handler, and start profiling.''' + state.track = track # note: nesting different mode won't work state.profile_level += 1 if state.profile_level == 1: state.last_start_time = clock() @@ -333,7 +350,7 @@ def stop(): def save_data(path): with open(path, 'w+') as file: - file.write(str(state.accumulated_time) + '\n') + file.write("%f %f\n" % state.accumulated_time) for sample in state.samples: time = str(sample.time) stack = sample.stack @@ -344,7 +361,7 @@ def save_data(path): def load_data(path): lines = open(path, 'r').read().splitlines() - state.accumulated_time = float(lines[0]) + state.accumulated_time = [float(value) for value in lines[0].split()] state.samples = [] for line in lines[1:]: parts = line.split('\0') @@ -437,7 +454,8 @@ class DisplayFormats: def display(fp=None, format=3, data=None, **kwargs): '''Print statistics, either to stdout or the given file object.''' - data = data or state + if data is None: + data = state if fp is None: import sys @@ -466,7 +484,8 @@ def display(fp=None, format=3, data=None if format not in (DisplayFormats.Json, DisplayFormats.Chrome): print('---', file=fp) print('Sample count: %d' % len(data.samples), file=fp) - print('Total time: %f seconds' % data.accumulated_time, file=fp) + print('Total time: %f seconds (%f wall)' % data.accumulated_time, + file=fp) def display_by_line(data, fp): '''Print the profiler data with each sample line represented @@ -691,7 +710,7 @@ def write_to_flame(data, fp, scriptpath= file=fp) return - fd, path = tempfile.mkstemp() + fd, path = pycompat.mkstemp() file = open(path, "w+") diff --git a/mercurial/store.py b/mercurial/store.py --- a/mercurial/store.py +++ b/mercurial/store.py @@ -449,6 +449,7 @@ class fncache(object): def write(self, tr): if self._dirty: + assert self.entries is not None tr.addbackup('fncache') fp = self.vfs('fncache', mode='wb', atomictemp=True) if self.entries: @@ -489,10 +490,20 @@ class _fncachevfs(vfsmod.abstractvfs, vf self.encode = encode def __call__(self, path, mode='r', *args, **kw): + encoded = self.encode(path) if mode not in ('r', 'rb') and (path.startswith('data/') or path.startswith('meta/')): - self.fncache.add(path) - return self.vfs(self.encode(path), mode, *args, **kw) + # do not trigger a fncache load when adding a file that already is + # known to exist. + notload = self.fncache.entries is None and self.vfs.exists(encoded) + if notload and 'a' in mode and not self.vfs.stat(encoded).st_size: + # when appending to an existing file, if the file has size zero, + # it should be considered as missing. Such zero-size files are + # the result of truncation when a transaction is aborted. + notload = False + if not notload: + self.fncache.add(path) + return self.vfs(encoded, mode, *args, **kw) def join(self, path): if path: diff --git a/mercurial/streamclone.py b/mercurial/streamclone.py --- a/mercurial/streamclone.py +++ b/mercurial/streamclone.py @@ -10,7 +10,6 @@ from __future__ import absolute_import import contextlib import os import struct -import tempfile import warnings from .i18n import _ @@ -19,6 +18,7 @@ from . import ( cacheutil, error, phases, + pycompat, store, util, ) @@ -313,16 +313,15 @@ def generatebundlev1(repo, compression=' # This is where we'll add compression in the future. assert compression == 'UN' - seen = 0 - repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes')) + progress = repo.ui.makeprogress(_('bundle'), total=bytecount, + unit=_('bytes')) + progress.update(0) for chunk in it: - seen += len(chunk) - repo.ui.progress(_('bundle'), seen, total=bytecount, - unit=_('bytes')) + progress.increment(step=len(chunk)) yield chunk - repo.ui.progress(_('bundle'), None) + progress.complete() return requirements, gen() @@ -338,8 +337,9 @@ def consumev1(repo, fp, filecount, bytec with repo.lock(): repo.ui.status(_('%d files to transfer, %s of data\n') % (filecount, util.bytecount(bytecount))) - handled_bytes = 0 - repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes')) + progress = repo.ui.makeprogress(_('clone'), total=bytecount, + unit=_('bytes')) + progress.update(0) start = util.timer() # TODO: get rid of (potential) inconsistency @@ -374,9 +374,7 @@ def consumev1(repo, fp, filecount, bytec path = store.decodedir(name) with repo.svfs(path, 'w', backgroundclose=True) as ofp: for chunk in util.filechunkiter(fp, limit=size): - handled_bytes += len(chunk) - repo.ui.progress(_('clone'), handled_bytes, - total=bytecount, unit=_('bytes')) + progress.increment(step=len(chunk)) ofp.write(chunk) # force @filecache properties to be reloaded from @@ -386,7 +384,7 @@ def consumev1(repo, fp, filecount, bytec elapsed = util.timer() - start if elapsed <= 0: elapsed = 0.001 - repo.ui.progress(_('clone'), None) + progress.complete() repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % (util.bytecount(bytecount), elapsed, util.bytecount(bytecount / elapsed))) @@ -469,7 +467,7 @@ def maketempcopies(): files = [] try: def copy(src): - fd, dst = tempfile.mkstemp() + fd, dst = pycompat.mkstemp() os.close(fd) files.append(dst) util.copyfiles(src, dst, hardlink=True) @@ -494,41 +492,38 @@ def _makemap(repo): def _emit2(repo, entries, totalfilesize): """actually emit the stream bundle""" vfsmap = _makemap(repo) - progress = repo.ui.progress - progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes')) - with maketempcopies() as copy: - try: - # copy is delayed until we are in the try - entries = [_filterfull(e, copy, vfsmap) for e in entries] - yield None # this release the lock on the repository - seen = 0 + progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize, + unit=_('bytes')) + progress.update(0) + with maketempcopies() as copy, progress: + # copy is delayed until we are in the try + entries = [_filterfull(e, copy, vfsmap) for e in entries] + yield None # this release the lock on the repository + seen = 0 - for src, name, ftype, data in entries: - vfs = vfsmap[src] - yield src - yield util.uvarintencode(len(name)) - if ftype == _fileappend: - fp = vfs(name) - size = data - elif ftype == _filefull: - fp = open(data, 'rb') - size = util.fstat(fp).st_size - try: - yield util.uvarintencode(size) - yield name - if size <= 65536: - chunks = (fp.read(size),) - else: - chunks = util.filechunkiter(fp, limit=size) - for chunk in chunks: - seen += len(chunk) - progress(_('bundle'), seen, total=totalfilesize, - unit=_('bytes')) - yield chunk - finally: - fp.close() - finally: - progress(_('bundle'), None) + for src, name, ftype, data in entries: + vfs = vfsmap[src] + yield src + yield util.uvarintencode(len(name)) + if ftype == _fileappend: + fp = vfs(name) + size = data + elif ftype == _filefull: + fp = open(data, 'rb') + size = util.fstat(fp).st_size + try: + yield util.uvarintencode(size) + yield name + if size <= 65536: + chunks = (fp.read(size),) + else: + chunks = util.filechunkiter(fp, limit=size) + for chunk in chunks: + seen += len(chunk) + progress.update(seen) + yield chunk + finally: + fp.close() def generatev2(repo): """Emit content for version 2 of a streaming clone. @@ -589,10 +584,9 @@ def consumev2(repo, fp, filecount, files (filecount, util.bytecount(filesize))) start = util.timer() - handledbytes = 0 - progress = repo.ui.progress - - progress(_('clone'), handledbytes, total=filesize, unit=_('bytes')) + progress = repo.ui.makeprogress(_('clone'), total=filesize, + unit=_('bytes')) + progress.update(0) vfsmap = _makemap(repo) @@ -614,9 +608,7 @@ def consumev2(repo, fp, filecount, files with vfs(name, 'w') as ofp: for chunk in util.filechunkiter(fp, limit=datalen): - handledbytes += len(chunk) - progress(_('clone'), handledbytes, total=filesize, - unit=_('bytes')) + progress.increment(step=len(chunk)) ofp.write(chunk) # force @filecache properties to be reloaded from @@ -626,10 +618,10 @@ def consumev2(repo, fp, filecount, files elapsed = util.timer() - start if elapsed <= 0: elapsed = 0.001 - progress(_('clone'), None) repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % - (util.bytecount(handledbytes), elapsed, - util.bytecount(handledbytes / elapsed))) + (util.bytecount(progress.pos), elapsed, + util.bytecount(progress.pos / elapsed))) + progress.complete() def applybundlev2(repo, fp, filecount, filesize, requirements): missingreqs = [r for r in requirements if r not in repo.supported] diff --git a/mercurial/subrepo.py b/mercurial/subrepo.py --- a/mercurial/subrepo.py +++ b/mercurial/subrepo.py @@ -318,9 +318,9 @@ class abstractsubrepo(object): """return file flags""" return '' - def getfileset(self, expr): + def matchfileset(self, expr, badfn=None): """Resolve the fileset expression for this repo""" - return set() + return matchmod.nevermatcher(self.wvfs.base, '', badfn=badfn) def printfiles(self, ui, m, fm, fmt, subrepos): """handle the files command for this subrepo""" @@ -333,17 +333,17 @@ class abstractsubrepo(object): files = self.files() total = len(files) relpath = subrelpath(self) - self.ui.progress(_('archiving (%s)') % relpath, 0, - unit=_('files'), total=total) - for i, name in enumerate(files): + progress = self.ui.makeprogress(_('archiving (%s)') % relpath, + unit=_('files'), total=total) + progress.update(0) + for name in files: flags = self.fileflags(name) mode = 'x' in flags and 0o755 or 0o644 symlink = 'l' in flags archiver.addfile(prefix + self._path + '/' + name, mode, symlink, self.filedata(name, decode)) - self.ui.progress(_('archiving (%s)') % relpath, i + 1, - unit=_('files'), total=total) - self.ui.progress(_('archiving (%s)') % relpath, None) + progress.increment() + progress.complete() return total def walk(self, match): @@ -792,24 +792,30 @@ class hgsubrepo(abstractsubrepo): return cmdutil.files(ui, ctx, m, fm, fmt, subrepos) @annotatesubrepoerror - def getfileset(self, expr): + def matchfileset(self, expr, badfn=None): + repo = self._repo if self._ctx.rev() is None: - ctx = self._repo[None] + ctx = repo[None] else: rev = self._state[1] - ctx = self._repo[rev] + ctx = repo[rev] - files = ctx.getfileset(expr) + matchers = [ctx.matchfileset(expr, badfn=badfn)] for subpath in ctx.substate: sub = ctx.sub(subpath) try: - files.extend(subpath + '/' + f for f in sub.getfileset(expr)) + sm = sub.matchfileset(expr, badfn=badfn) + pm = matchmod.prefixdirmatcher(repo.root, repo.getcwd(), + subpath, sm, badfn=badfn) + matchers.append(pm) except error.LookupError: self.ui.status(_("skipping missing subrepository: %s\n") % self.wvfs.reljoin(reporelpath(self), subpath)) - return files + if len(matchers) == 1: + return matchers[0] + return matchmod.unionmatcher(matchers) def walk(self, match): ctx = self._repo[None] @@ -1640,8 +1646,10 @@ class gitsubrepo(abstractsubrepo): tarstream = self._gitcommand(['archive', revision], stream=True) tar = tarfile.open(fileobj=tarstream, mode=r'r|') relpath = subrelpath(self) - self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files')) - for i, info in enumerate(tar): + progress = self.ui.makeprogress(_('archiving (%s)') % relpath, + unit=_('files')) + progress.update(0) + for info in tar: if info.isdir(): continue if match and not match(info.name): @@ -1653,9 +1661,8 @@ class gitsubrepo(abstractsubrepo): archiver.addfile(prefix + self._path + '/' + info.name, info.mode, info.issym(), data) total += 1 - self.ui.progress(_('archiving (%s)') % relpath, i + 1, - unit=_('files')) - self.ui.progress(_('archiving (%s)') % relpath, None) + progress.increment() + progress.complete() return total @@ -1695,7 +1702,7 @@ class gitsubrepo(abstractsubrepo): tab = line.find('\t') if tab == -1: continue - status, f = line[tab - 1], line[tab + 1:] + status, f = line[tab - 1:tab], line[tab + 1:] if status == 'M': modified.append(f) elif status == 'A': diff --git a/mercurial/templatefilters.py b/mercurial/templatefilters.py --- a/mercurial/templatefilters.py +++ b/mercurial/templatefilters.py @@ -99,6 +99,45 @@ def basename(path): """ return os.path.basename(path) +@templatefilter('commondir') +def commondir(filelist): + """List of text. Treats each list item as file name with / + as path separator and returns the longest common directory + prefix shared by all list items. + Returns the empty string if no common prefix exists. + + The list items are not normalized, i.e. "foo/../bar" is handled as + file "bar" in the directory "foo/..". Leading slashes are ignored. + + For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and + ["foo/bar", "baz"] becomes "". + """ + def common(a, b): + if len(a) > len(b): + a = b[:len(a)] + elif len(b) > len(a): + b = b[:len(a)] + if a == b: + return a + for i in xrange(len(a)): + if a[i] != b[i]: + return a[:i] + return a + try: + if not filelist: + return "" + dirlist = [f.lstrip('/').split('/')[:-1] for f in filelist] + if len(dirlist) == 1: + return '/'.join(dirlist[0]) + a = min(dirlist) + b = max(dirlist) + # The common prefix of a and b is shared with all + # elements of the list since Python sorts lexicographical + # and [1, x] after [1]. + return '/'.join(common(a, b)) + except TypeError: + raise error.ParseError(_('argument is not a list of text')) + @templatefilter('count') def count(i): """List or text. Returns the length as an integer.""" @@ -238,6 +277,7 @@ def indent(text, prefix): @templatefilter('json') def json(obj, paranoid=True): + """Any object. Serializes the object to a JSON formatted text.""" if obj is None: return 'null' elif obj is False: @@ -248,13 +288,9 @@ def json(obj, paranoid=True): return pycompat.bytestr(obj) elif isinstance(obj, bytes): return '"%s"' % encoding.jsonescape(obj, paranoid=paranoid) - elif isinstance(obj, str): - # This branch is unreachable on Python 2, because bytes == str - # and we'll return in the next-earlier block in the elif - # ladder. On Python 3, this helps us catch bugs before they - # hurt someone. + elif isinstance(obj, type(u'')): raise error.ProgrammingError( - 'Mercurial only does output with bytes on Python 3: %r' % obj) + 'Mercurial only does output with bytes: %r' % obj) elif util.safehasattr(obj, 'keys'): out = ['"%s": %s' % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid)) diff --git a/mercurial/templatefuncs.py b/mercurial/templatefuncs.py --- a/mercurial/templatefuncs.py +++ b/mercurial/templatefuncs.py @@ -12,6 +12,7 @@ import re from .i18n import _ from .node import ( bin, + wdirid, ) from . import ( color, @@ -19,7 +20,6 @@ from . import ( error, minirst, obsutil, - pycompat, registrar, revset as revsetmod, revsetlang, @@ -35,6 +35,7 @@ from .utils import ( ) evalrawexp = templateutil.evalrawexp +evalwrapped = templateutil.evalwrapped evalfuncarg = templateutil.evalfuncarg evalboolean = templateutil.evalboolean evaldate = templateutil.evaldate @@ -84,7 +85,7 @@ def dict_(context, mapping, args): for k, v in args['kwargs'].iteritems()) return templateutil.hybriddict(data) -@templatefunc('diff([includepattern [, excludepattern]])') +@templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'}) def diff(context, mapping, args): """Show a diff, optionally specifying files to include or exclude.""" @@ -104,7 +105,7 @@ def diff(context, mapping, args): return ''.join(chunks) -@templatefunc('extdata(source)', argspec='source') +@templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'}) def extdata(context, mapping, args): """Show a text read from the specified extdata source. (EXPERIMENTAL)""" if 'source' not in args: @@ -112,6 +113,13 @@ def extdata(context, mapping, args): raise error.ParseError(_('extdata expects one argument')) source = evalstring(context, mapping, args['source']) + if not source: + sym = templateutil.findsymbolicname(args['source']) + if sym: + raise error.ParseError(_('empty data source specified'), + hint=_("did you mean extdata('%s')?") % sym) + else: + raise error.ParseError(_('empty data source specified')) cache = context.resource(mapping, 'cache').setdefault('extdata', {}) ctx = context.resource(mapping, 'ctx') if source in cache: @@ -120,7 +128,7 @@ def extdata(context, mapping, args): data = cache[source] = scmutil.extdatasource(ctx.repo(), source) return data.get(ctx.rev(), '') -@templatefunc('files(pattern)') +@templatefunc('files(pattern)', requires={'ctx'}) def files(context, mapping, args): """All files of the current changeset matching the pattern. See :hg:`help patterns`.""" @@ -158,7 +166,26 @@ def fill(context, mapping, args): return templatefilters.fill(text, width, initindent, hangindent) -@templatefunc('formatnode(node)') +@templatefunc('filter(iterable[, expr])') +def filter_(context, mapping, args): + """Remove empty elements from a list or a dict. If expr specified, it's + applied to each element to test emptiness.""" + if not (1 <= len(args) <= 2): + # i18n: "filter" is a keyword + raise error.ParseError(_("filter expects one or two arguments")) + iterable = evalwrapped(context, mapping, args[0]) + if len(args) == 1: + def select(w): + return w.tobool(context, mapping) + else: + def select(w): + if not isinstance(w, templateutil.mappable): + raise error.ParseError(_("not filterable by expression")) + lm = context.overlaymap(mapping, w.tomap(context)) + return evalboolean(context, lm, args[1]) + return iterable.filter(context, mapping, select) + +@templatefunc('formatnode(node)', requires={'ui'}) def formatnode(context, mapping, args): """Obtain the preferred form of a changeset hash. (DEPRECATED)""" if len(args) != 1: @@ -171,7 +198,7 @@ def formatnode(context, mapping, args): return node return templatefilters.short(node) -@templatefunc('mailmap(author)') +@templatefunc('mailmap(author)', requires={'repo', 'cache'}) def mailmap(context, mapping, args): """Return the author, updated according to the value set in the .mailmap file""" @@ -252,13 +279,14 @@ def get(context, mapping, args): # i18n: "get" is a keyword raise error.ParseError(_("get() expects two arguments")) - dictarg = evalfuncarg(context, mapping, args[0]) - if not util.safehasattr(dictarg, 'get'): + dictarg = evalwrapped(context, mapping, args[0]) + key = evalrawexp(context, mapping, args[1]) + try: + return dictarg.getmember(context, mapping, key) + except error.ParseError as err: # i18n: "get" is a keyword - raise error.ParseError(_("get() expects a dict as first argument")) - - key = evalfuncarg(context, mapping, args[1]) - return templateutil.getdictitem(dictarg, key) + hint = _("get() expects a dict as first argument") + raise error.ParseError(bytes(err), hint=hint) @templatefunc('if(expr, then[, else])') def if_(context, mapping, args): @@ -282,13 +310,10 @@ def ifcontains(context, mapping, args): # i18n: "ifcontains" is a keyword raise error.ParseError(_("ifcontains expects three or four arguments")) - haystack = evalfuncarg(context, mapping, args[1]) - keytype = getattr(haystack, 'keytype', None) + haystack = evalwrapped(context, mapping, args[1]) try: needle = evalrawexp(context, mapping, args[0]) - needle = templateutil.unwrapastype(context, mapping, needle, - keytype or bytes) - found = (needle in haystack) + found = haystack.contains(context, mapping, needle) except error.ParseError: found = False @@ -319,18 +344,13 @@ def join(context, mapping, args): # i18n: "join" is a keyword raise error.ParseError(_("join expects one or two arguments")) - joinset = evalrawexp(context, mapping, args[0]) + joinset = evalwrapped(context, mapping, args[0]) joiner = " " if len(args) > 1: joiner = evalstring(context, mapping, args[1]) - if isinstance(joinset, templateutil.wrapped): - return joinset.join(context, mapping, joiner) - # TODO: perhaps a generator should be stringify()-ed here, but we can't - # because hgweb abuses it as a keyword that returns a list of dicts. - joinset = templateutil.unwrapvalue(context, mapping, joinset) - return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner) + return joinset.join(context, mapping, joiner) -@templatefunc('label(label, expr)') +@templatefunc('label(label, expr)', requires={'ui'}) def label(context, mapping, args): """Apply a label to generated content. Content with a label applied can result in additional post-processing, such as @@ -352,7 +372,9 @@ def latesttag(context, mapping, args): """The global tags matching the given pattern on the most recent globally tagged ancestor of this changeset. If no such tags exist, the "{tag}" template resolves to - the string "null".""" + the string "null". See :hg:`help revisions.patterns` for the pattern + syntax. + """ if len(args) > 1: # i18n: "latesttag" is a keyword raise error.ParseError(_("latesttag expects at most one argument")) @@ -388,7 +410,7 @@ def localdate(context, mapping, args): raise error.ParseError(_("localdate expects a timezone")) else: tzoffset = dateutil.makedate()[1] - return (date[0], tzoffset) + return templateutil.date((date[0], tzoffset)) @templatefunc('max(iterable)') def max_(context, mapping, args, **kwargs): @@ -397,13 +419,13 @@ def max_(context, mapping, args, **kwarg # i18n: "max" is a keyword raise error.ParseError(_("max expects one argument")) - iterable = evalfuncarg(context, mapping, args[0]) + iterable = evalwrapped(context, mapping, args[0]) try: - x = max(pycompat.maybebytestr(iterable)) - except (TypeError, ValueError): + return iterable.getmax(context, mapping) + except error.ParseError as err: # i18n: "max" is a keyword - raise error.ParseError(_("max first argument should be an iterable")) - return templateutil.wraphybridvalue(iterable, x, x) + hint = _("max first argument should be an iterable") + raise error.ParseError(bytes(err), hint=hint) @templatefunc('min(iterable)') def min_(context, mapping, args, **kwargs): @@ -412,13 +434,13 @@ def min_(context, mapping, args, **kwarg # i18n: "min" is a keyword raise error.ParseError(_("min expects one argument")) - iterable = evalfuncarg(context, mapping, args[0]) + iterable = evalwrapped(context, mapping, args[0]) try: - x = min(pycompat.maybebytestr(iterable)) - except (TypeError, ValueError): + return iterable.getmin(context, mapping) + except error.ParseError as err: # i18n: "min" is a keyword - raise error.ParseError(_("min first argument should be an iterable")) - return templateutil.wraphybridvalue(iterable, x, x) + hint = _("min first argument should be an iterable") + raise error.ParseError(bytes(err), hint=hint) @templatefunc('mod(a, b)') def mod(context, mapping, args): @@ -458,6 +480,7 @@ def obsfatedate(context, mapping, args): markers = evalfuncarg(context, mapping, args[0]) try: + # TODO: maybe this has to be a wrapped list of date wrappers? data = obsutil.markersdates(markers) return templateutil.hybridlist(data, name='date', fmt='%d %d') except (TypeError, KeyError): @@ -500,7 +523,7 @@ def obsfateverb(context, mapping, args): errmsg = _("obsfateverb first argument should be countable") raise error.ParseError(errmsg) -@templatefunc('relpath(path)') +@templatefunc('relpath(path)', requires={'repo'}) def relpath(context, mapping, args): """Convert a repository-absolute path into a filesystem path relative to the current working directory.""" @@ -508,11 +531,11 @@ def relpath(context, mapping, args): # i18n: "relpath" is a keyword raise error.ParseError(_("relpath expects one argument")) - repo = context.resource(mapping, 'ctx').repo() + repo = context.resource(mapping, 'repo') path = evalstring(context, mapping, args[0]) return repo.pathto(path) -@templatefunc('revset(query[, formatargs...])') +@templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'}) def revset(context, mapping, args): """Execute a revision set query. See :hg:`help revset`.""" @@ -521,8 +544,7 @@ def revset(context, mapping, args): raise error.ParseError(_("revset expects one or more arguments")) raw = evalstring(context, mapping, args[0]) - ctx = context.resource(mapping, 'ctx') - repo = ctx.repo() + repo = context.resource(mapping, 'repo') def query(expr): m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo)) @@ -574,7 +596,7 @@ def separate(context, mapping, args): yield sep yield argstr -@templatefunc('shortest(node, minlength=4)') +@templatefunc('shortest(node, minlength=4)', requires={'repo'}) def shortest(context, mapping, args): """Obtain the shortest representation of a node.""" @@ -590,7 +612,7 @@ def shortest(context, mapping, args): # i18n: "shortest" is a keyword _("shortest() expects an integer minlength")) - repo = context.resource(mapping, 'ctx')._repo + repo = context.resource(mapping, 'repo') if len(hexnode) > 40: return hexnode elif len(hexnode) == 40: @@ -601,11 +623,16 @@ def shortest(context, mapping, args): else: try: node = scmutil.resolvehexnodeidprefix(repo, hexnode) - except (error.LookupError, error.WdirUnsupported): + except error.WdirUnsupported: + node = wdirid + except error.LookupError: return hexnode if not node: return hexnode - return scmutil.shortesthexnodeidprefix(repo, node, minlength) + try: + return scmutil.shortesthexnodeidprefix(repo, node, minlength) + except error.RepoLookupError: + return hexnode @templatefunc('strip(text[, chars])') def strip(context, mapping, args): diff --git a/mercurial/templatekw.py b/mercurial/templatekw.py --- a/mercurial/templatekw.py +++ b/mercurial/templatekw.py @@ -14,6 +14,7 @@ from .node import ( ) from . import ( + diffutil, encoding, error, hbisect, @@ -31,41 +32,12 @@ from .utils import ( ) _hybrid = templateutil.hybrid -_mappable = templateutil.mappable hybriddict = templateutil.hybriddict hybridlist = templateutil.hybridlist compatdict = templateutil.compatdict compatlist = templateutil.compatlist _showcompatlist = templateutil._showcompatlist -def _showlist(name, values, templ, mapping, plural=None, separator=' '): - ui = mapping.get('ui') - if ui: - ui.deprecwarn("templatekw._showlist() is deprecated, use " - "templateutil._showcompatlist()", '4.6') - context = templ # this is actually a template context, not a templater - return _showcompatlist(context, mapping, name, values, plural, separator) - -def showdict(name, data, mapping, plural=None, key='key', value='value', - fmt=None, separator=' '): - ui = mapping.get('ui') - if ui: - ui.deprecwarn("templatekw.showdict() is deprecated, use " - "templateutil.compatdict()", '4.6') - c = [{key: k, value: v} for k, v in data.iteritems()] - f = _showlist(name, c, mapping['templ'], mapping, plural, separator) - return hybriddict(data, key=key, value=value, fmt=fmt, gen=f) - -def showlist(name, values, mapping, plural=None, element=None, separator=' '): - ui = mapping.get('ui') - if ui: - ui.deprecwarn("templatekw.showlist() is deprecated, use " - "templateutil.compatlist()", '4.6') - if not element: - element = name - f = _showlist(name, values, mapping['templ'], mapping, plural, separator) - return hybridlist(values, name=element, gen=f) - def getlatesttags(context, mapping, pattern=None): '''return date, distance and name for the latest tag of rev''' repo = context.resource(mapping, 'repo') @@ -139,7 +111,7 @@ def getrenamedfn(repo, endrev=None): for i in fl: lr = fl.linkrev(i) renamed = fl.renamed(fl.node(i)) - rcache[fn][lr] = renamed + rcache[fn][lr] = renamed and renamed[0] if lr >= endrev: break if rev in rcache[fn]: @@ -148,7 +120,8 @@ def getrenamedfn(repo, endrev=None): # If linkrev != rev (i.e. rev not found in rcache) fallback to # filectx logic. try: - return repo[rev][fn].renamed() + renamed = repo[rev][fn].renamed() + return renamed and renamed[0] except error.LookupError: return None @@ -268,7 +241,9 @@ def showactivebookmark(context, mapping) def showdate(context, mapping): """Date information. The date when the changeset was committed.""" ctx = context.resource(mapping, 'ctx') - return ctx.date() + # the default string format is '' because + # python-hglib splits date at decimal separator. + return templateutil.date(ctx.date(), showfmt='%d.0%d') @templatekeyword('desc', requires={'ctx'}) def showdescription(context, mapping): @@ -278,16 +253,21 @@ def showdescription(context, mapping): if isinstance(s, encoding.localstr): # try hard to preserve utf-8 bytes return encoding.tolocal(encoding.fromlocal(s).strip()) + elif isinstance(s, encoding.safelocalstr): + return encoding.safelocalstr(s.strip()) else: return s.strip() -@templatekeyword('diffstat', requires={'ctx'}) +@templatekeyword('diffstat', requires={'ui', 'ctx'}) def showdiffstat(context, mapping): """String. Statistics of changes with the following format: "modified files: +added/-removed lines" """ + ui = context.resource(mapping, 'ui') ctx = context.resource(mapping, 'ctx') - stats = patch.diffstatdata(util.iterlines(ctx.diff(noprefix=False))) + diffopts = diffutil.diffallopts(ui, {'noprefix': False}) + diff = ctx.diff(opts=diffopts) + stats = patch.diffstatdata(util.iterlines(diff)) maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats) return '%d: +%d/-%d' % (len(stats), adds, removes) @@ -344,7 +324,7 @@ def showfilecopies(context, mapping): for fn in ctx.files(): rename = getrenamed(fn, ctx.rev()) if rename: - copies.append((fn, rename[0])) + copies.append((fn, rename)) copies = util.sortdict(copies) return compatdict(context, mapping, 'file_copy', copies, @@ -392,12 +372,19 @@ def showgraphnode(context, mapping): return getgraphnode(repo, ctx) def getgraphnode(repo, ctx): + return getgraphnodecurrent(repo, ctx) or getgraphnodesymbol(ctx) + +def getgraphnodecurrent(repo, ctx): wpnodes = repo.dirstate.parents() if wpnodes[1] == nullid: wpnodes = wpnodes[:1] if ctx.node() in wpnodes: return '@' - elif ctx.obsolete(): + else: + return '' + +def getgraphnodesymbol(ctx): + if ctx.obsolete(): return 'x' elif ctx.isunstable(): return '*' @@ -481,13 +468,14 @@ def showmanifest(context, mapping): if mnode is None: # just avoid crash, we might want to use the 'ff...' hash in future return - mrev = repo.manifestlog._revlog.rev(mnode) + mrev = repo.manifestlog.rev(mnode) mhex = hex(mnode) mapping = context.overlaymap(mapping, {'rev': mrev, 'node': mhex}) f = context.process('manifest', mapping) # TODO: perhaps 'ctx' should be dropped from mapping because manifest # rev and node are completely different from changeset's. - return _mappable(f, None, f, lambda x: {'rev': mrev, 'node': mhex}) + return templateutil.hybriditem(f, None, f, + lambda x: {'rev': mrev, 'node': mhex}) @templatekeyword('obsfate', requires={'ui', 'repo', 'ctx'}) def showobsfate(context, mapping): @@ -583,7 +571,7 @@ def showpredecessors(context, mapping): repo = context.resource(mapping, 'repo') ctx = context.resource(mapping, 'ctx') predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node())) - predecessors = map(hex, predecessors) + predecessors = pycompat.maplist(hex, predecessors) return _hybrid(None, predecessors, lambda x: {'ctx': repo[x]}, diff --git a/mercurial/templater.py b/mercurial/templater.py --- a/mercurial/templater.py +++ b/mercurial/templater.py @@ -26,23 +26,23 @@ generator values of any printable types, and will be folded by ``stringify()`` or ``flatten()``. - BUG: hgweb overloads this type for mappings (i.e. some hgweb keywords - returns a generator of dicts.) - None sometimes represents an empty value, which can be stringified to ''. True, False, int, float can be stringified as such. -date tuple - a (unixtime, offset) tuple, which produces no meaningful output by itself. +wrappedbytes, wrappedvalue + a wrapper for the above printable types. + +date + represents a (unixtime, offset) tuple. hybrid represents a list/dict of printable values, which can also be converted to mappings by % operator. -mappable +hybriditem represents a scalar printable value, also supports % operator. mappinggenerator, mappinglist @@ -253,7 +253,8 @@ def _scantemplate(tmpl, start, stop, quo p = parser.parser(elements) try: while pos < stop: - n = min((tmpl.find(c, pos, stop) for c in sepchars), + n = min((tmpl.find(c, pos, stop) + for c in pycompat.bytestr(sepchars)), key=lambda n: (n < 0, n)) if n < 0: yield ('string', unescape(tmpl[pos:stop]), pos) @@ -596,8 +597,7 @@ class engine(object): filter uses function to transform value. syntax is {key|filter1|filter2|...}.''' - def __init__(self, loader, filters=None, defaults=None, resources=None, - aliases=()): + def __init__(self, loader, filters=None, defaults=None, resources=None): self._loader = loader if filters is None: filters = {} @@ -609,7 +609,6 @@ class engine(object): resources = nullresourcemapper() self._defaults = defaults self._resources = resources - self._aliasmap = _aliasrules.buildmap(aliases) self._cache = {} # key: (func, data) self._tmplcache = {} # literal template: (func, data) @@ -664,12 +663,10 @@ class engine(object): def _load(self, t): '''load, parse, and cache a template''' if t not in self._cache: + x = self._loader(t) # put poison to cut recursion while compiling 't' self._cache[t] = (_runrecursivesymbol, t) try: - x = parse(self._loader(t)) - if self._aliasmap: - x = _aliasrules.expand(self._aliasmap, x) self._cache[t] = compileexp(x, self, methods) except: # re-raises del self._cache[t] @@ -717,8 +714,6 @@ class engine(object): mapping = extramapping return templateutil.flatten(self, mapping, func(self, mapping, data)) -engines = {'default': engine} - def stylelist(): paths = templatepaths() if not paths: @@ -776,13 +771,81 @@ def _readmapfile(mapfile): conf.source('templates', key)) cache[key] = unquotestring(val) elif key != '__base__': - val = 'default', val - if ':' in val[1]: - val = val[1].split(':', 1) - tmap[key] = val[0], os.path.join(base, val[1]) + tmap[key] = os.path.join(base, val) aliases.extend(conf['templatealias'].items()) return cache, tmap, aliases +class loader(object): + """Load template fragments optionally from a map file""" + + def __init__(self, cache, aliases): + if cache is None: + cache = {} + self.cache = cache.copy() + self._map = {} + self._aliasmap = _aliasrules.buildmap(aliases) + + def __contains__(self, key): + return key in self.cache or key in self._map + + def load(self, t): + """Get parsed tree for the given template name. Use a local cache.""" + if t not in self.cache: + try: + self.cache[t] = util.readfile(self._map[t]) + except KeyError as inst: + raise templateutil.TemplateNotFound( + _('"%s" not in template map') % inst.args[0]) + except IOError as inst: + reason = (_('template file %s: %s') + % (self._map[t], + stringutil.forcebytestr(inst.args[1]))) + raise IOError(inst.args[0], encoding.strfromlocal(reason)) + return self._parse(self.cache[t]) + + def _parse(self, tmpl): + x = parse(tmpl) + if self._aliasmap: + x = _aliasrules.expand(self._aliasmap, x) + return x + + def _findsymbolsused(self, tree, syms): + if not tree: + return + op = tree[0] + if op == 'symbol': + s = tree[1] + if s in syms[0]: + return # avoid recursion: s -> cache[s] -> s + syms[0].add(s) + if s in self.cache or s in self._map: + # s may be a reference for named template + self._findsymbolsused(self.load(s), syms) + return + if op in {'integer', 'string'}: + return + # '{arg|func}' == '{func(arg)}' + if op == '|': + syms[1].add(getsymbol(tree[2])) + self._findsymbolsused(tree[1], syms) + return + if op == 'func': + syms[1].add(getsymbol(tree[1])) + self._findsymbolsused(tree[2], syms) + return + for x in tree[1:]: + self._findsymbolsused(x, syms) + + def symbolsused(self, t): + """Look up (keywords, filters/functions) referenced from the name + template 't' + + This may load additional templates from the map file. + """ + syms = (set(), set()) + self._findsymbolsused(self.load(t), syms) + return syms + class templater(object): def __init__(self, filters=None, defaults=None, resources=None, @@ -800,21 +863,12 @@ class templater(object): self.cache may be updated later to register additional template fragments. """ - if filters is None: - filters = {} - if defaults is None: - defaults = {} - if cache is None: - cache = {} - self.cache = cache.copy() - self.map = {} - self.filters = templatefilters.filters.copy() - self.filters.update(filters) - self.defaults = defaults - self._resources = resources - self._aliases = aliases - self.minchunk, self.maxchunk = minchunk, maxchunk - self.ecache = {} + allfilters = templatefilters.filters.copy() + if filters: + allfilters.update(filters) + self._loader = loader(cache, aliases) + self._proc = engine(self._loader.load, allfilters, defaults, resources) + self._minchunk, self._maxchunk = minchunk, maxchunk @classmethod def frommapfile(cls, mapfile, filters=None, defaults=None, resources=None, @@ -822,28 +876,46 @@ class templater(object): """Create templater from the specified map file""" t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk) cache, tmap, aliases = _readmapfile(mapfile) - t.cache.update(cache) - t.map = tmap - t._aliases = aliases + t._loader.cache.update(cache) + t._loader._map = tmap + t._loader._aliasmap = _aliasrules.buildmap(aliases) return t def __contains__(self, key): - return key in self.cache or key in self.map + return key in self._loader + + @property + def cache(self): + return self._loader.cache + + # for highlight extension to insert one-time 'colorize' filter + @property + def _filters(self): + return self._proc._filters + + @property + def defaults(self): + return self._proc._defaults def load(self, t): - '''Get the template for the given template name. Use a local cache.''' - if t not in self.cache: - try: - self.cache[t] = util.readfile(self.map[t][1]) - except KeyError as inst: - raise templateutil.TemplateNotFound( - _('"%s" not in template map') % inst.args[0]) - except IOError as inst: - reason = (_('template file %s: %s') - % (self.map[t][1], - stringutil.forcebytestr(inst.args[1]))) - raise IOError(inst.args[0], encoding.strfromlocal(reason)) - return self.cache[t] + """Get parsed tree for the given template name. Use a local cache.""" + return self._loader.load(t) + + def symbolsuseddefault(self): + """Look up (keywords, filters/functions) referenced from the default + unnamed template + + This may load additional templates from the map file. + """ + return self.symbolsused('') + + def symbolsused(self, t): + """Look up (keywords, filters/functions) referenced from the name + template 't' + + This may load additional templates from the map file. + """ + return self._loader.symbolsused(t) def renderdefault(self, mapping): """Render the default unnamed template and return result as string""" @@ -856,20 +928,10 @@ class templater(object): def generate(self, t, mapping): """Return a generator that renders the specified named template and yields chunks""" - ttype = t in self.map and self.map[t][0] or 'default' - if ttype not in self.ecache: - try: - ecls = engines[ttype] - except KeyError: - raise error.Abort(_('invalid template engine: %s') % ttype) - self.ecache[ttype] = ecls(self.load, self.filters, self.defaults, - self._resources, self._aliases) - proc = self.ecache[ttype] - - stream = proc.process(t, mapping) - if self.minchunk: - stream = util.increasingchunks(stream, min=self.minchunk, - max=self.maxchunk) + stream = self._proc.process(t, mapping) + if self._minchunk: + stream = util.increasingchunks(stream, min=self._minchunk, + max=self._maxchunk) return stream def templatepaths(): diff --git a/mercurial/templates/gitweb/graph.tmpl b/mercurial/templates/gitweb/graph.tmpl --- a/mercurial/templates/gitweb/graph.tmpl +++ b/mercurial/templates/gitweb/graph.tmpl @@ -21,7 +21,7 @@ graph | tags | bookmarks | branches | -files | +files{archives%archiveentry} | help
less diff --git a/mercurial/templates/gitweb/manifest.tmpl b/mercurial/templates/gitweb/manifest.tmpl --- a/mercurial/templates/gitweb/manifest.tmpl +++ b/mercurial/templates/gitweb/manifest.tmpl @@ -30,13 +30,7 @@ files |
{path|escape} {alltags}
- - - - - - - +{ifeq(path, up, '', updirentry)} {dentries%direntry} {fentries%fileentry}
drwxr-xr-x[up]
diff --git a/mercurial/templates/gitweb/map b/mercurial/templates/gitweb/map --- a/mercurial/templates/gitweb/map +++ b/mercurial/templates/gitweb/map @@ -59,6 +59,16 @@ fileellipses = '...' changelogentry = changelogentry.tmpl changeset = changeset.tmpl manifest = manifest.tmpl +updirentry = ' + + drwxr-xr-x + + + + [up] + +   + ' direntry = ' drwxr-xr-x diff --git a/mercurial/templates/map-cmdline.show b/mercurial/templates/map-cmdline.show --- a/mercurial/templates/map-cmdline.show +++ b/mercurial/templates/map-cmdline.show @@ -15,7 +15,11 @@ cset_shortnode = '{labelcset(shortest(no # Treat branch and tags specially so we don't display "default" or "tip" cset_namespace = '{ifeq(namespace, "branches", names_branches, ifeq(namespace, "tags", names_tags, names_others))}' names_branches = '{ifeq(branch, "default", "", " ({label('log.{colorname}', branch)})")}' -names_tags = '{if(names % "{ifeq(name, 'tip', '', name)}", " ({label('log.{colorname}', join(names % "{ifeq(name, 'tip', '', name)}", ' '))})")}' +names_tags = '{if(filter_tags(names), + " ({label('log.{colorname}', join(filter_tags(names), ' '))})")}' names_others = '{if(names, " ({label('log.{colorname}', join(names, ' '))})")}' cset_shortdesc = '{label("log.description", desc|firstline)}' + +[templatealias] +filter_tags(names) = filter(names, ifeq(name, 'tip', '', name)) diff --git a/mercurial/templates/monoblue/graph.tmpl b/mercurial/templates/monoblue/graph.tmpl --- a/mercurial/templates/monoblue/graph.tmpl +++ b/mercurial/templates/monoblue/graph.tmpl @@ -20,6 +20,7 @@
  • bookmarks
  • branches
  • files
  • + {archives%archiveentry}
  • help
  • diff --git a/mercurial/templates/monoblue/manifest.tmpl b/mercurial/templates/monoblue/manifest.tmpl --- a/mercurial/templates/monoblue/manifest.tmpl +++ b/mercurial/templates/monoblue/manifest.tmpl @@ -33,13 +33,7 @@

    {path|escape} {alltags}

    - - - - - - - + {ifeq(path, up, '', updirentry)} {dentries%direntry} {fentries%fileentry}
    drwxr-xr-x[up]
    diff --git a/mercurial/templates/monoblue/map b/mercurial/templates/monoblue/map --- a/mercurial/templates/monoblue/map +++ b/mercurial/templates/monoblue/map @@ -59,6 +59,16 @@ fileellipses = '...' changelogentry = changelogentry.tmpl changeset = changeset.tmpl manifest = manifest.tmpl +updirentry = ' + + drwxr-xr-x + + + + [up] + +   + ' direntry = ' drwxr-xr-x diff --git a/mercurial/templates/paper/graph.tmpl b/mercurial/templates/paper/graph.tmpl --- a/mercurial/templates/paper/graph.tmpl +++ b/mercurial/templates/paper/graph.tmpl @@ -25,6 +25,9 @@
  • browse
    • +{archives%archiveentry} +
    +