Show More
The requested changes are too big and content was truncated. Show full diff
@@ -0,0 +1,79 b'' | |||||
|
1 | PYTHONVER=2.7.6 | |||
|
2 | PYTHONNAME=python- | |||
|
3 | PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER) | |||
|
4 | SYMLINKDIR=$(HOME)/bin | |||
|
5 | ||||
|
6 | help: | |||
|
7 | @echo | |||
|
8 | @echo 'Make a custom installation of a Python version' | |||
|
9 | @echo | |||
|
10 | @echo 'Common make parameters:' | |||
|
11 | @echo ' PYTHONVER=... [$(PYTHONVER)]' | |||
|
12 | @echo ' PREFIX=... [$(PREFIX)]' | |||
|
13 | @echo ' SYMLINKDIR=... [$(SYMLINKDIR) creating $(PYTHONNAME)$(PYTHONVER)]' | |||
|
14 | @echo | |||
|
15 | @echo 'Common make targets:' | |||
|
16 | @echo ' python - install Python $$PYTHONVER in $$PREFIX' | |||
|
17 | @echo ' symlink - create a $$SYMLINKDIR/$(PYTHONNAME)$$PYTHONVER symlink' | |||
|
18 | @echo | |||
|
19 | @echo 'Example: create a temporary Python installation:' | |||
|
20 | @echo ' $$ make -f Makefile.python python PYTHONVER=2.4 PREFIX=/tmp/p24' | |||
|
21 | @echo ' $$ /tmp/p24/bin/python -V' | |||
|
22 | @echo ' Python 2.4' | |||
|
23 | @echo | |||
|
24 | @echo 'Some external libraries are required for building Python: zlib bzip2 openssl.' | |||
|
25 | @echo 'Make sure their development packages are installed systemwide.' | |||
|
26 | # fedora: yum install zlib-devel bzip2-devel openssl-devel | |||
|
27 | # debian: apt-get install zlib1g-dev libbz2-dev libssl-dev | |||
|
28 | @echo | |||
|
29 | @echo 'To build a nice collection of interesting Python versions:' | |||
|
30 | @echo ' $$ for v in 2.{4{,.2,.3},5{,.6},6{,.1,.2,.9},7{,.6}}; do' | |||
|
31 | @echo ' make -f Makefile.python symlink PYTHONVER=$$v || break; done' | |||
|
32 | @echo 'To run a Mercurial test on all these Python versions:' | |||
|
33 | @echo ' $$ for py in `cd ~/bin && ls $(PYTHONNAME)2.*`; do' | |||
|
34 | @echo ' echo $$py; $$py run-tests.py test-http.t; echo; done' | |||
|
35 | @echo | |||
|
36 | ||||
|
37 | export LANGUAGE=C | |||
|
38 | export LC_ALL=C | |||
|
39 | ||||
|
40 | python: $(PREFIX)/bin/python docutils | |||
|
41 | printf 'import sys, zlib, bz2, docutils\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python | |||
|
42 | ||||
|
43 | PYTHON_SRCDIR=Python-$(PYTHONVER) | |||
|
44 | PYTHON_SRCFILE=$(PYTHON_SRCDIR).tgz | |||
|
45 | ||||
|
46 | $(PREFIX)/bin/python: | |||
|
47 | [ -f $(PYTHON_SRCFILE) ] || wget http://www.python.org/ftp/python/$(PYTHONVER)/$(PYTHON_SRCFILE) || [ -f $(PYTHON_SRCFILE) ] | |||
|
48 | rm -rf $(PYTHON_SRCDIR) | |||
|
49 | tar xf $(PYTHON_SRCFILE) | |||
|
50 | # Ubuntu disables SSLv2 the hard way, disable it on old Pythons too | |||
|
51 | -sed -i 's,self.*SSLv2_method(),0;//\0,g' $(PYTHON_SRCDIR)/Modules/_ssl.c | |||
|
52 | # Find multiarch system libraries on Ubuntu with Python 2.4.x | |||
|
53 | # http://lipyrary.blogspot.dk/2011/05/how-to-compile-python-on-ubuntu-1104.html | |||
|
54 | -sed -i "s|lib_dirs = .* \[|\0'/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`',|g" $(PYTHON_SRCDIR)/setup.py | |||
|
55 | # Find multiarch system libraries on Ubuntu and disable fortify error when setting argv | |||
|
56 | LDFLAGS="-L/usr/lib/`dpkg-architecture -qDEB_HOST_MULTIARCH`"; \ | |||
|
57 | BASECFLAGS=-U_FORTIFY_SOURCE; \ | |||
|
58 | export LDFLAGS BASECFLAGS; \ | |||
|
59 | cd $(PYTHON_SRCDIR) && ./configure --prefix=$(PREFIX) && make all SVNVERSION=pwd && make install | |||
|
60 | printf 'import sys, zlib, bz2\nif sys.version_info >= (2,6):\n import ssl' | $(PREFIX)/bin/python | |||
|
61 | rm -rf $(PYTHON_SRCDIR) | |||
|
62 | ||||
|
63 | DOCUTILSVER=0.11 | |||
|
64 | DOCUTILS_SRCDIR=docutils-$(DOCUTILSVER) | |||
|
65 | DOCUTILS_SRCFILE=$(DOCUTILS_SRCDIR).tar.gz | |||
|
66 | ||||
|
67 | docutils: $(PREFIX)/bin/python | |||
|
68 | @$(PREFIX)/bin/python -c 'import docutils' || ( set -ex; \ | |||
|
69 | [ -f $(DOCUTILS_SRCFILE) ] || wget http://downloads.sourceforge.net/project/docutils/docutils/$(DOCUTILSVER)/$(DOCUTILS_SRCFILE) || [ -f $(DOCUTILS_SRCFILE) ]; \ | |||
|
70 | rm -rf $(DOCUTILS_SRCDIR); \ | |||
|
71 | tar xf $(DOCUTILS_SRCFILE); \ | |||
|
72 | cd $(DOCUTILS_SRCDIR) && $(PREFIX)/bin/python setup.py install --prefix=$(PREFIX); \ | |||
|
73 | $(PREFIX)/bin/python -c 'import docutils'; \ | |||
|
74 | rm -rf $(DOCUTILS_SRCDIR); ) | |||
|
75 | ||||
|
76 | symlink: python $(SYMLINKDIR) | |||
|
77 | ln -sf $(PREFIX)/bin/python $(SYMLINKDIR)/$(PYTHONNAME)$(PYTHONVER) | |||
|
78 | ||||
|
79 | .PHONY: help python docutils symlink |
@@ -0,0 +1,100 b'' | |||||
|
1 | #!/usr/bin/env python | |||
|
2 | # | |||
|
3 | # hgperf - measure performance of Mercurial commands | |||
|
4 | # | |||
|
5 | # Copyright 2014 Matt Mackall <mpm@selenic.com> | |||
|
6 | # | |||
|
7 | # This software may be used and distributed according to the terms of the | |||
|
8 | # GNU General Public License version 2 or any later version. | |||
|
9 | ||||
|
10 | '''measure performance of Mercurial commands | |||
|
11 | ||||
|
12 | Using ``hgperf`` instead of ``hg`` measures performance of the target | |||
|
13 | Mercurial command. For example, the execution below measures | |||
|
14 | performance of :hg:`heads --topo`:: | |||
|
15 | ||||
|
16 | $ hgperf heads --topo | |||
|
17 | ||||
|
18 | All command output via ``ui`` is suppressed, and just measurement | |||
|
19 | result is displayed: see also "perf" extension in "contrib". | |||
|
20 | ||||
|
21 | Costs of processing before dispatching to the command function like | |||
|
22 | below are not measured:: | |||
|
23 | ||||
|
24 | - parsing command line (e.g. option validity check) | |||
|
25 | - reading configuration files in | |||
|
26 | ||||
|
27 | But ``pre-`` and ``post-`` hook invocation for the target command is | |||
|
28 | measured, even though these are invoked before or after dispatching to | |||
|
29 | the command function, because these may be required to repeat | |||
|
30 | execution of the target command correctly. | |||
|
31 | ''' | |||
|
32 | ||||
|
33 | import os | |||
|
34 | import sys | |||
|
35 | ||||
|
36 | libdir = '@LIBDIR@' | |||
|
37 | ||||
|
38 | if libdir != '@' 'LIBDIR' '@': | |||
|
39 | if not os.path.isabs(libdir): | |||
|
40 | libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), | |||
|
41 | libdir) | |||
|
42 | libdir = os.path.abspath(libdir) | |||
|
43 | sys.path.insert(0, libdir) | |||
|
44 | ||||
|
45 | # enable importing on demand to reduce startup time | |||
|
46 | try: | |||
|
47 | from mercurial import demandimport; demandimport.enable() | |||
|
48 | except ImportError: | |||
|
49 | import sys | |||
|
50 | sys.stderr.write("abort: couldn't find mercurial libraries in [%s]\n" % | |||
|
51 | ' '.join(sys.path)) | |||
|
52 | sys.stderr.write("(check your install and PYTHONPATH)\n") | |||
|
53 | sys.exit(-1) | |||
|
54 | ||||
|
55 | import mercurial.util | |||
|
56 | import mercurial.dispatch | |||
|
57 | ||||
|
58 | import time | |||
|
59 | ||||
|
60 | def timer(func, title=None): | |||
|
61 | results = [] | |||
|
62 | begin = time.time() | |||
|
63 | count = 0 | |||
|
64 | while True: | |||
|
65 | ostart = os.times() | |||
|
66 | cstart = time.time() | |||
|
67 | r = func() | |||
|
68 | cstop = time.time() | |||
|
69 | ostop = os.times() | |||
|
70 | count += 1 | |||
|
71 | a, b = ostart, ostop | |||
|
72 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) | |||
|
73 | if cstop - begin > 3 and count >= 100: | |||
|
74 | break | |||
|
75 | if cstop - begin > 10 and count >= 3: | |||
|
76 | break | |||
|
77 | if title: | |||
|
78 | sys.stderr.write("! %s\n" % title) | |||
|
79 | if r: | |||
|
80 | sys.stderr.write("! result: %s\n" % r) | |||
|
81 | m = min(results) | |||
|
82 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" | |||
|
83 | % (m[0], m[1] + m[2], m[1], m[2], count)) | |||
|
84 | ||||
|
85 | orgruncommand = mercurial.dispatch.runcommand | |||
|
86 | ||||
|
87 | def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions): | |||
|
88 | ui.pushbuffer() | |||
|
89 | lui.pushbuffer() | |||
|
90 | timer(lambda : orgruncommand(lui, repo, cmd, fullargs, ui, | |||
|
91 | options, d, cmdpats, cmdoptions)) | |||
|
92 | ui.popbuffer() | |||
|
93 | lui.popbuffer() | |||
|
94 | ||||
|
95 | mercurial.dispatch.runcommand = runcommand | |||
|
96 | ||||
|
97 | for fp in (sys.stdin, sys.stdout, sys.stderr): | |||
|
98 | mercurial.util.setbinary(fp) | |||
|
99 | ||||
|
100 | mercurial.dispatch.run() |
@@ -0,0 +1,125 b'' | |||||
|
1 | #!/usr/bin/env python | |||
|
2 | ||||
|
3 | # Measure the performance of a list of revsets against multiple revisions | |||
|
4 | # defined by parameter. Checkout one by one and run perfrevset with every | |||
|
5 | # revset in the list to benchmark its performance. | |||
|
6 | # | |||
|
7 | # - First argument is a revset of mercurial own repo to runs against. | |||
|
8 | # - Second argument is the file from which the revset array will be taken | |||
|
9 | # If second argument is omitted read it from standard input | |||
|
10 | # | |||
|
11 | # You should run this from the root of your mercurial repository. | |||
|
12 | # | |||
|
13 | # This script also does one run of the current version of mercurial installed | |||
|
14 | # to compare performance. | |||
|
15 | ||||
|
16 | import sys | |||
|
17 | from subprocess import check_call, Popen, CalledProcessError, STDOUT, PIPE | |||
|
18 | ||||
|
19 | def check_output(*args, **kwargs): | |||
|
20 | kwargs.setdefault('stderr', PIPE) | |||
|
21 | kwargs.setdefault('stdout', PIPE) | |||
|
22 | proc = Popen(*args, **kwargs) | |||
|
23 | output, error = proc.communicate() | |||
|
24 | if proc.returncode != 0: | |||
|
25 | raise CalledProcessError(proc.returncode, ' '.join(args)) | |||
|
26 | return output | |||
|
27 | ||||
|
28 | def update(rev): | |||
|
29 | """update the repo to a revision""" | |||
|
30 | try: | |||
|
31 | check_call(['hg', 'update', '--quiet', '--check', str(rev)]) | |||
|
32 | except CalledProcessError, exc: | |||
|
33 | print >> sys.stderr, 'update to revision %s failed, aborting' % rev | |||
|
34 | sys.exit(exc.returncode) | |||
|
35 | ||||
|
36 | def perf(revset): | |||
|
37 | """run benchmark for this very revset""" | |||
|
38 | try: | |||
|
39 | output = check_output(['./hg', | |||
|
40 | '--config', | |||
|
41 | 'extensions.perf=contrib/perf.py', | |||
|
42 | 'perfrevset', | |||
|
43 | revset], | |||
|
44 | stderr=STDOUT) | |||
|
45 | output = output.lstrip('!') # remove useless ! in this context | |||
|
46 | return output.strip() | |||
|
47 | except CalledProcessError, exc: | |||
|
48 | print >> sys.stderr, 'abort: cannot run revset benchmark' | |||
|
49 | sys.exit(exc.returncode) | |||
|
50 | ||||
|
51 | def printrevision(rev): | |||
|
52 | """print data about a revision""" | |||
|
53 | sys.stdout.write("Revision: ") | |||
|
54 | sys.stdout.flush() | |||
|
55 | check_call(['hg', 'log', '--rev', str(rev), '--template', | |||
|
56 | '{desc|firstline}\n']) | |||
|
57 | ||||
|
58 | def getrevs(spec): | |||
|
59 | """get the list of rev matched by a revset""" | |||
|
60 | try: | |||
|
61 | out = check_output(['hg', 'log', '--template={rev}\n', '--rev', spec]) | |||
|
62 | except CalledProcessError, exc: | |||
|
63 | print >> sys.stderr, "abort, can't get revision from %s" % spec | |||
|
64 | sys.exit(exc.returncode) | |||
|
65 | return [r for r in out.split() if r] | |||
|
66 | ||||
|
67 | ||||
|
68 | ||||
|
69 | target_rev = sys.argv[1] | |||
|
70 | ||||
|
71 | revsetsfile = sys.stdin | |||
|
72 | if len(sys.argv) > 2: | |||
|
73 | revsetsfile = open(sys.argv[2]) | |||
|
74 | ||||
|
75 | revsets = [l.strip() for l in revsetsfile] | |||
|
76 | ||||
|
77 | print "Revsets to benchmark" | |||
|
78 | print "----------------------------" | |||
|
79 | ||||
|
80 | for idx, rset in enumerate(revsets): | |||
|
81 | print "%i) %s" % (idx, rset) | |||
|
82 | ||||
|
83 | print "----------------------------" | |||
|
84 | ||||
|
85 | ||||
|
86 | ||||
|
87 | revs = getrevs(target_rev) | |||
|
88 | ||||
|
89 | results = [] | |||
|
90 | for r in revs: | |||
|
91 | print "----------------------------" | |||
|
92 | printrevision(r) | |||
|
93 | print "----------------------------" | |||
|
94 | update(r) | |||
|
95 | res = [] | |||
|
96 | results.append(res) | |||
|
97 | for idx, rset in enumerate(revsets): | |||
|
98 | data = perf(rset) | |||
|
99 | res.append(data) | |||
|
100 | print "%i)" % idx, data | |||
|
101 | sys.stdout.flush() | |||
|
102 | print "----------------------------" | |||
|
103 | ||||
|
104 | ||||
|
105 | print """ | |||
|
106 | ||||
|
107 | Result by revset | |||
|
108 | ================ | |||
|
109 | """ | |||
|
110 | ||||
|
111 | print 'Revision:', revs | |||
|
112 | for idx, rev in enumerate(revs): | |||
|
113 | sys.stdout.write('%i) ' % idx) | |||
|
114 | sys.stdout.flush() | |||
|
115 | printrevision(rev) | |||
|
116 | ||||
|
117 | ||||
|
118 | ||||
|
119 | ||||
|
120 | for ridx, rset in enumerate(revsets): | |||
|
121 | ||||
|
122 | print "revset #%i: %s" % (ridx, rset) | |||
|
123 | for idx, data in enumerate(results): | |||
|
124 | print '%i) %s' % (idx, data[ridx]) | |||
|
125 |
@@ -0,0 +1,16 b'' | |||||
|
1 | all() | |||
|
2 | draft() | |||
|
3 | ::tip | |||
|
4 | draft() and ::tip | |||
|
5 | 0::tip | |||
|
6 | roots(0::tip) | |||
|
7 | author(lmoscovicz) | |||
|
8 | author(mpm) | |||
|
9 | author(lmoscovicz) or author(mpm) | |||
|
10 | tip:0 | |||
|
11 | max(tip:0) | |||
|
12 | min(0:tip) | |||
|
13 | 0:: | |||
|
14 | min(0::) | |||
|
15 | roots((tip~100::) - (tip~100::tip)) | |||
|
16 | ::p1(p1(tip)):: |
This diff has been collapsed as it changes many lines, (739 lines changed) Show them Hide them | |||||
@@ -0,0 +1,739 b'' | |||||
|
1 | # bundle2.py - generic container format to transmit arbitrary data. | |||
|
2 | # | |||
|
3 | # Copyright 2013 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | """Handling of the new bundle2 format | |||
|
8 | ||||
|
9 | The goal of bundle2 is to act as an atomically packet to transmit a set of | |||
|
10 | payloads in an application agnostic way. It consist in a sequence of "parts" | |||
|
11 | that will be handed to and processed by the application layer. | |||
|
12 | ||||
|
13 | ||||
|
14 | General format architecture | |||
|
15 | =========================== | |||
|
16 | ||||
|
17 | The format is architectured as follow | |||
|
18 | ||||
|
19 | - magic string | |||
|
20 | - stream level parameters | |||
|
21 | - payload parts (any number) | |||
|
22 | - end of stream marker. | |||
|
23 | ||||
|
24 | the Binary format | |||
|
25 | ============================ | |||
|
26 | ||||
|
27 | All numbers are unsigned and big-endian. | |||
|
28 | ||||
|
29 | stream level parameters | |||
|
30 | ------------------------ | |||
|
31 | ||||
|
32 | Binary format is as follow | |||
|
33 | ||||
|
34 | :params size: (16 bits integer) | |||
|
35 | ||||
|
36 | The total number of Bytes used by the parameters | |||
|
37 | ||||
|
38 | :params value: arbitrary number of Bytes | |||
|
39 | ||||
|
40 | A blob of `params size` containing the serialized version of all stream level | |||
|
41 | parameters. | |||
|
42 | ||||
|
43 | The blob contains a space separated list of parameters. Parameters with value | |||
|
44 | are stored in the form `<name>=<value>`. Both name and value are urlquoted. | |||
|
45 | ||||
|
46 | Empty name are obviously forbidden. | |||
|
47 | ||||
|
48 | Name MUST start with a letter. If this first letter is lower case, the | |||
|
49 | parameter is advisory and can be safely ignored. However when the first | |||
|
50 | letter is capital, the parameter is mandatory and the bundling process MUST | |||
|
51 | stop if he is not able to proceed it. | |||
|
52 | ||||
|
53 | Stream parameters use a simple textual format for two main reasons: | |||
|
54 | ||||
|
55 | - Stream level parameters should remain simple and we want to discourage any | |||
|
56 | crazy usage. | |||
|
57 | - Textual data allow easy human inspection of a bundle2 header in case of | |||
|
58 | troubles. | |||
|
59 | ||||
|
60 | Any Applicative level options MUST go into a bundle2 part instead. | |||
|
61 | ||||
|
62 | Payload part | |||
|
63 | ------------------------ | |||
|
64 | ||||
|
65 | Binary format is as follow | |||
|
66 | ||||
|
67 | :header size: (16 bits inter) | |||
|
68 | ||||
|
69 | The total number of Bytes used by the part headers. When the header is empty | |||
|
70 | (size = 0) this is interpreted as the end of stream marker. | |||
|
71 | ||||
|
72 | :header: | |||
|
73 | ||||
|
74 | The header defines how to interpret the part. It contains two piece of | |||
|
75 | data: the part type, and the part parameters. | |||
|
76 | ||||
|
77 | The part type is used to route an application level handler, that can | |||
|
78 | interpret payload. | |||
|
79 | ||||
|
80 | Part parameters are passed to the application level handler. They are | |||
|
81 | meant to convey information that will help the application level object to | |||
|
82 | interpret the part payload. | |||
|
83 | ||||
|
84 | The binary format of the header is has follow | |||
|
85 | ||||
|
86 | :typesize: (one byte) | |||
|
87 | ||||
|
88 | :parttype: alphanumerical part name | |||
|
89 | ||||
|
90 | :partid: A 32bits integer (unique in the bundle) that can be used to refer | |||
|
91 | to this part. | |||
|
92 | ||||
|
93 | :parameters: | |||
|
94 | ||||
|
95 | Part's parameter may have arbitrary content, the binary structure is:: | |||
|
96 | ||||
|
97 | <mandatory-count><advisory-count><param-sizes><param-data> | |||
|
98 | ||||
|
99 | :mandatory-count: 1 byte, number of mandatory parameters | |||
|
100 | ||||
|
101 | :advisory-count: 1 byte, number of advisory parameters | |||
|
102 | ||||
|
103 | :param-sizes: | |||
|
104 | ||||
|
105 | N couple of bytes, where N is the total number of parameters. Each | |||
|
106 | couple contains (<size-of-key>, <size-of-value) for one parameter. | |||
|
107 | ||||
|
108 | :param-data: | |||
|
109 | ||||
|
110 | A blob of bytes from which each parameter key and value can be | |||
|
111 | retrieved using the list of size couples stored in the previous | |||
|
112 | field. | |||
|
113 | ||||
|
114 | Mandatory parameters comes first, then the advisory ones. | |||
|
115 | ||||
|
116 | :payload: | |||
|
117 | ||||
|
118 | payload is a series of `<chunksize><chunkdata>`. | |||
|
119 | ||||
|
120 | `chunksize` is a 32 bits integer, `chunkdata` are plain bytes (as much as | |||
|
121 | `chunksize` says)` The payload part is concluded by a zero size chunk. | |||
|
122 | ||||
|
123 | The current implementation always produces either zero or one chunk. | |||
|
124 | This is an implementation limitation that will ultimately be lifted. | |||
|
125 | ||||
|
126 | Bundle processing | |||
|
127 | ============================ | |||
|
128 | ||||
|
129 | Each part is processed in order using a "part handler". Handler are registered | |||
|
130 | for a certain part type. | |||
|
131 | ||||
|
132 | The matching of a part to its handler is case insensitive. The case of the | |||
|
133 | part type is used to know if a part is mandatory or advisory. If the Part type | |||
|
134 | contains any uppercase char it is considered mandatory. When no handler is | |||
|
135 | known for a Mandatory part, the process is aborted and an exception is raised. | |||
|
136 | If the part is advisory and no handler is known, the part is ignored. When the | |||
|
137 | process is aborted, the full bundle is still read from the stream to keep the | |||
|
138 | channel usable. But none of the part read from an abort are processed. In the | |||
|
139 | future, dropping the stream may become an option for channel we do not care to | |||
|
140 | preserve. | |||
|
141 | """ | |||
|
142 | ||||
|
143 | import util | |||
|
144 | import struct | |||
|
145 | import urllib | |||
|
146 | import string | |||
|
147 | ||||
|
148 | import changegroup | |||
|
149 | from i18n import _ | |||
|
150 | ||||
|
151 | _pack = struct.pack | |||
|
152 | _unpack = struct.unpack | |||
|
153 | ||||
|
154 | _magicstring = 'HG2X' | |||
|
155 | ||||
|
156 | _fstreamparamsize = '>H' | |||
|
157 | _fpartheadersize = '>H' | |||
|
158 | _fparttypesize = '>B' | |||
|
159 | _fpartid = '>I' | |||
|
160 | _fpayloadsize = '>I' | |||
|
161 | _fpartparamcount = '>BB' | |||
|
162 | ||||
|
163 | preferedchunksize = 4096 | |||
|
164 | ||||
|
165 | def _makefpartparamsizes(nbparams): | |||
|
166 | """return a struct format to read part parameter sizes | |||
|
167 | ||||
|
168 | The number parameters is variable so we need to build that format | |||
|
169 | dynamically. | |||
|
170 | """ | |||
|
171 | return '>'+('BB'*nbparams) | |||
|
172 | ||||
|
173 | parthandlermapping = {} | |||
|
174 | ||||
|
175 | def parthandler(parttype): | |||
|
176 | """decorator that register a function as a bundle2 part handler | |||
|
177 | ||||
|
178 | eg:: | |||
|
179 | ||||
|
180 | @parthandler('myparttype') | |||
|
181 | def myparttypehandler(...): | |||
|
182 | '''process a part of type "my part".''' | |||
|
183 | ... | |||
|
184 | """ | |||
|
185 | def _decorator(func): | |||
|
186 | lparttype = parttype.lower() # enforce lower case matching. | |||
|
187 | assert lparttype not in parthandlermapping | |||
|
188 | parthandlermapping[lparttype] = func | |||
|
189 | return func | |||
|
190 | return _decorator | |||
|
191 | ||||
|
192 | class unbundlerecords(object): | |||
|
193 | """keep record of what happens during and unbundle | |||
|
194 | ||||
|
195 | New records are added using `records.add('cat', obj)`. Where 'cat' is a | |||
|
196 | category of record and obj is an arbitrary object. | |||
|
197 | ||||
|
198 | `records['cat']` will return all entries of this category 'cat'. | |||
|
199 | ||||
|
200 | Iterating on the object itself will yield `('category', obj)` tuples | |||
|
201 | for all entries. | |||
|
202 | ||||
|
203 | All iterations happens in chronological order. | |||
|
204 | """ | |||
|
205 | ||||
|
206 | def __init__(self): | |||
|
207 | self._categories = {} | |||
|
208 | self._sequences = [] | |||
|
209 | self._replies = {} | |||
|
210 | ||||
|
211 | def add(self, category, entry, inreplyto=None): | |||
|
212 | """add a new record of a given category. | |||
|
213 | ||||
|
214 | The entry can then be retrieved in the list returned by | |||
|
215 | self['category'].""" | |||
|
216 | self._categories.setdefault(category, []).append(entry) | |||
|
217 | self._sequences.append((category, entry)) | |||
|
218 | if inreplyto is not None: | |||
|
219 | self.getreplies(inreplyto).add(category, entry) | |||
|
220 | ||||
|
221 | def getreplies(self, partid): | |||
|
222 | """get the subrecords that replies to a specific part""" | |||
|
223 | return self._replies.setdefault(partid, unbundlerecords()) | |||
|
224 | ||||
|
225 | def __getitem__(self, cat): | |||
|
226 | return tuple(self._categories.get(cat, ())) | |||
|
227 | ||||
|
228 | def __iter__(self): | |||
|
229 | return iter(self._sequences) | |||
|
230 | ||||
|
231 | def __len__(self): | |||
|
232 | return len(self._sequences) | |||
|
233 | ||||
|
234 | def __nonzero__(self): | |||
|
235 | return bool(self._sequences) | |||
|
236 | ||||
|
237 | class bundleoperation(object): | |||
|
238 | """an object that represents a single bundling process | |||
|
239 | ||||
|
240 | Its purpose is to carry unbundle-related objects and states. | |||
|
241 | ||||
|
242 | A new object should be created at the beginning of each bundle processing. | |||
|
243 | The object is to be returned by the processing function. | |||
|
244 | ||||
|
245 | The object has very little content now it will ultimately contain: | |||
|
246 | * an access to the repo the bundle is applied to, | |||
|
247 | * a ui object, | |||
|
248 | * a way to retrieve a transaction to add changes to the repo, | |||
|
249 | * a way to record the result of processing each part, | |||
|
250 | * a way to construct a bundle response when applicable. | |||
|
251 | """ | |||
|
252 | ||||
|
253 | def __init__(self, repo, transactiongetter): | |||
|
254 | self.repo = repo | |||
|
255 | self.ui = repo.ui | |||
|
256 | self.records = unbundlerecords() | |||
|
257 | self.gettransaction = transactiongetter | |||
|
258 | self.reply = None | |||
|
259 | ||||
|
260 | class TransactionUnavailable(RuntimeError): | |||
|
261 | pass | |||
|
262 | ||||
|
263 | def _notransaction(): | |||
|
264 | """default method to get a transaction while processing a bundle | |||
|
265 | ||||
|
266 | Raise an exception to highlight the fact that no transaction was expected | |||
|
267 | to be created""" | |||
|
268 | raise TransactionUnavailable() | |||
|
269 | ||||
|
270 | def processbundle(repo, unbundler, transactiongetter=_notransaction): | |||
|
271 | """This function process a bundle, apply effect to/from a repo | |||
|
272 | ||||
|
273 | It iterates over each part then searches for and uses the proper handling | |||
|
274 | code to process the part. Parts are processed in order. | |||
|
275 | ||||
|
276 | This is very early version of this function that will be strongly reworked | |||
|
277 | before final usage. | |||
|
278 | ||||
|
279 | Unknown Mandatory part will abort the process. | |||
|
280 | """ | |||
|
281 | op = bundleoperation(repo, transactiongetter) | |||
|
282 | # todo: | |||
|
283 | # - replace this is a init function soon. | |||
|
284 | # - exception catching | |||
|
285 | unbundler.params | |||
|
286 | iterparts = unbundler.iterparts() | |||
|
287 | part = None | |||
|
288 | try: | |||
|
289 | for part in iterparts: | |||
|
290 | parttype = part.type | |||
|
291 | # part key are matched lower case | |||
|
292 | key = parttype.lower() | |||
|
293 | try: | |||
|
294 | handler = parthandlermapping[key] | |||
|
295 | op.ui.debug('found a handler for part %r\n' % parttype) | |||
|
296 | except KeyError: | |||
|
297 | if key != parttype: # mandatory parts | |||
|
298 | # todo: | |||
|
299 | # - use a more precise exception | |||
|
300 | raise | |||
|
301 | op.ui.debug('ignoring unknown advisory part %r\n' % key) | |||
|
302 | # consuming the part | |||
|
303 | part.read() | |||
|
304 | continue | |||
|
305 | ||||
|
306 | # handler is called outside the above try block so that we don't | |||
|
307 | # risk catching KeyErrors from anything other than the | |||
|
308 | # parthandlermapping lookup (any KeyError raised by handler() | |||
|
309 | # itself represents a defect of a different variety). | |||
|
310 | output = None | |||
|
311 | if op.reply is not None: | |||
|
312 | op.ui.pushbuffer(error=True) | |||
|
313 | output = '' | |||
|
314 | try: | |||
|
315 | handler(op, part) | |||
|
316 | finally: | |||
|
317 | if output is not None: | |||
|
318 | output = op.ui.popbuffer() | |||
|
319 | if output: | |||
|
320 | outpart = bundlepart('b2x:output', | |||
|
321 | advisoryparams=[('in-reply-to', | |||
|
322 | str(part.id))], | |||
|
323 | data=output) | |||
|
324 | op.reply.addpart(outpart) | |||
|
325 | part.read() | |||
|
326 | except Exception: | |||
|
327 | if part is not None: | |||
|
328 | # consume the bundle content | |||
|
329 | part.read() | |||
|
330 | for part in iterparts: | |||
|
331 | # consume the bundle content | |||
|
332 | part.read() | |||
|
333 | raise | |||
|
334 | return op | |||
|
335 | ||||
|
336 | def decodecaps(blob): | |||
|
337 | """decode a bundle2 caps bytes blob into a dictionnary | |||
|
338 | ||||
|
339 | The blob is a list of capabilities (one per line) | |||
|
340 | Capabilities may have values using a line of the form:: | |||
|
341 | ||||
|
342 | capability=value1,value2,value3 | |||
|
343 | ||||
|
344 | The values are always a list.""" | |||
|
345 | caps = {} | |||
|
346 | for line in blob.splitlines(): | |||
|
347 | if not line: | |||
|
348 | continue | |||
|
349 | if '=' not in line: | |||
|
350 | key, vals = line, () | |||
|
351 | else: | |||
|
352 | key, vals = line.split('=', 1) | |||
|
353 | vals = vals.split(',') | |||
|
354 | key = urllib.unquote(key) | |||
|
355 | vals = [urllib.unquote(v) for v in vals] | |||
|
356 | caps[key] = vals | |||
|
357 | return caps | |||
|
358 | ||||
|
359 | def encodecaps(caps): | |||
|
360 | """encode a bundle2 caps dictionary into a bytes blob""" | |||
|
361 | chunks = [] | |||
|
362 | for ca in sorted(caps): | |||
|
363 | vals = caps[ca] | |||
|
364 | ca = urllib.quote(ca) | |||
|
365 | vals = [urllib.quote(v) for v in vals] | |||
|
366 | if vals: | |||
|
367 | ca = "%s=%s" % (ca, ','.join(vals)) | |||
|
368 | chunks.append(ca) | |||
|
369 | return '\n'.join(chunks) | |||
|
370 | ||||
|
371 | class bundle20(object): | |||
|
372 | """represent an outgoing bundle2 container | |||
|
373 | ||||
|
374 | Use the `addparam` method to add stream level parameter. and `addpart` to | |||
|
375 | populate it. Then call `getchunks` to retrieve all the binary chunks of | |||
|
376 | data that compose the bundle2 container.""" | |||
|
377 | ||||
|
378 | def __init__(self, ui, capabilities=()): | |||
|
379 | self.ui = ui | |||
|
380 | self._params = [] | |||
|
381 | self._parts = [] | |||
|
382 | self.capabilities = dict(capabilities) | |||
|
383 | ||||
|
384 | def addparam(self, name, value=None): | |||
|
385 | """add a stream level parameter""" | |||
|
386 | if not name: | |||
|
387 | raise ValueError('empty parameter name') | |||
|
388 | if name[0] not in string.letters: | |||
|
389 | raise ValueError('non letter first character: %r' % name) | |||
|
390 | self._params.append((name, value)) | |||
|
391 | ||||
|
392 | def addpart(self, part): | |||
|
393 | """add a new part to the bundle2 container | |||
|
394 | ||||
|
395 | Parts contains the actual applicative payload.""" | |||
|
396 | assert part.id is None | |||
|
397 | part.id = len(self._parts) # very cheap counter | |||
|
398 | self._parts.append(part) | |||
|
399 | ||||
|
400 | def getchunks(self): | |||
|
401 | self.ui.debug('start emission of %s stream\n' % _magicstring) | |||
|
402 | yield _magicstring | |||
|
403 | param = self._paramchunk() | |||
|
404 | self.ui.debug('bundle parameter: %s\n' % param) | |||
|
405 | yield _pack(_fstreamparamsize, len(param)) | |||
|
406 | if param: | |||
|
407 | yield param | |||
|
408 | ||||
|
409 | self.ui.debug('start of parts\n') | |||
|
410 | for part in self._parts: | |||
|
411 | self.ui.debug('bundle part: "%s"\n' % part.type) | |||
|
412 | for chunk in part.getchunks(): | |||
|
413 | yield chunk | |||
|
414 | self.ui.debug('end of bundle\n') | |||
|
415 | yield '\0\0' | |||
|
416 | ||||
|
417 | def _paramchunk(self): | |||
|
418 | """return a encoded version of all stream parameters""" | |||
|
419 | blocks = [] | |||
|
420 | for par, value in self._params: | |||
|
421 | par = urllib.quote(par) | |||
|
422 | if value is not None: | |||
|
423 | value = urllib.quote(value) | |||
|
424 | par = '%s=%s' % (par, value) | |||
|
425 | blocks.append(par) | |||
|
426 | return ' '.join(blocks) | |||
|
427 | ||||
|
428 | class unpackermixin(object): | |||
|
429 | """A mixin to extract bytes and struct data from a stream""" | |||
|
430 | ||||
|
431 | def __init__(self, fp): | |||
|
432 | self._fp = fp | |||
|
433 | ||||
|
434 | def _unpack(self, format): | |||
|
435 | """unpack this struct format from the stream""" | |||
|
436 | data = self._readexact(struct.calcsize(format)) | |||
|
437 | return _unpack(format, data) | |||
|
438 | ||||
|
439 | def _readexact(self, size): | |||
|
440 | """read exactly <size> bytes from the stream""" | |||
|
441 | return changegroup.readexactly(self._fp, size) | |||
|
442 | ||||
|
443 | ||||
|
444 | class unbundle20(unpackermixin): | |||
|
445 | """interpret a bundle2 stream | |||
|
446 | ||||
|
447 | This class is fed with a binary stream and yields parts through its | |||
|
448 | `iterparts` methods.""" | |||
|
449 | ||||
|
450 | def __init__(self, ui, fp, header=None): | |||
|
451 | """If header is specified, we do not read it out of the stream.""" | |||
|
452 | self.ui = ui | |||
|
453 | super(unbundle20, self).__init__(fp) | |||
|
454 | if header is None: | |||
|
455 | header = self._readexact(4) | |||
|
456 | magic, version = header[0:2], header[2:4] | |||
|
457 | if magic != 'HG': | |||
|
458 | raise util.Abort(_('not a Mercurial bundle')) | |||
|
459 | if version != '2X': | |||
|
460 | raise util.Abort(_('unknown bundle version %s') % version) | |||
|
461 | self.ui.debug('start processing of %s stream\n' % header) | |||
|
462 | ||||
|
463 | @util.propertycache | |||
|
464 | def params(self): | |||
|
465 | """dictionary of stream level parameters""" | |||
|
466 | self.ui.debug('reading bundle2 stream parameters\n') | |||
|
467 | params = {} | |||
|
468 | paramssize = self._unpack(_fstreamparamsize)[0] | |||
|
469 | if paramssize: | |||
|
470 | for p in self._readexact(paramssize).split(' '): | |||
|
471 | p = p.split('=', 1) | |||
|
472 | p = [urllib.unquote(i) for i in p] | |||
|
473 | if len(p) < 2: | |||
|
474 | p.append(None) | |||
|
475 | self._processparam(*p) | |||
|
476 | params[p[0]] = p[1] | |||
|
477 | return params | |||
|
478 | ||||
|
479 | def _processparam(self, name, value): | |||
|
480 | """process a parameter, applying its effect if needed | |||
|
481 | ||||
|
482 | Parameter starting with a lower case letter are advisory and will be | |||
|
483 | ignored when unknown. Those starting with an upper case letter are | |||
|
484 | mandatory and will this function will raise a KeyError when unknown. | |||
|
485 | ||||
|
486 | Note: no option are currently supported. Any input will be either | |||
|
487 | ignored or failing. | |||
|
488 | """ | |||
|
489 | if not name: | |||
|
490 | raise ValueError('empty parameter name') | |||
|
491 | if name[0] not in string.letters: | |||
|
492 | raise ValueError('non letter first character: %r' % name) | |||
|
493 | # Some logic will be later added here to try to process the option for | |||
|
494 | # a dict of known parameter. | |||
|
495 | if name[0].islower(): | |||
|
496 | self.ui.debug("ignoring unknown parameter %r\n" % name) | |||
|
497 | else: | |||
|
498 | raise KeyError(name) | |||
|
499 | ||||
|
500 | ||||
|
501 | def iterparts(self): | |||
|
502 | """yield all parts contained in the stream""" | |||
|
503 | # make sure param have been loaded | |||
|
504 | self.params | |||
|
505 | self.ui.debug('start extraction of bundle2 parts\n') | |||
|
506 | headerblock = self._readpartheader() | |||
|
507 | while headerblock is not None: | |||
|
508 | part = unbundlepart(self.ui, headerblock, self._fp) | |||
|
509 | yield part | |||
|
510 | headerblock = self._readpartheader() | |||
|
511 | self.ui.debug('end of bundle2 stream\n') | |||
|
512 | ||||
|
513 | def _readpartheader(self): | |||
|
514 | """reads a part header size and return the bytes blob | |||
|
515 | ||||
|
516 | returns None if empty""" | |||
|
517 | headersize = self._unpack(_fpartheadersize)[0] | |||
|
518 | self.ui.debug('part header size: %i\n' % headersize) | |||
|
519 | if headersize: | |||
|
520 | return self._readexact(headersize) | |||
|
521 | return None | |||
|
522 | ||||
|
523 | ||||
|
524 | class bundlepart(object): | |||
|
525 | """A bundle2 part contains application level payload | |||
|
526 | ||||
|
527 | The part `type` is used to route the part to the application level | |||
|
528 | handler. | |||
|
529 | """ | |||
|
530 | ||||
|
531 | def __init__(self, parttype, mandatoryparams=(), advisoryparams=(), | |||
|
532 | data=''): | |||
|
533 | self.id = None | |||
|
534 | self.type = parttype | |||
|
535 | self.data = data | |||
|
536 | self.mandatoryparams = mandatoryparams | |||
|
537 | self.advisoryparams = advisoryparams | |||
|
538 | ||||
|
539 | def getchunks(self): | |||
|
540 | #### header | |||
|
541 | ## parttype | |||
|
542 | header = [_pack(_fparttypesize, len(self.type)), | |||
|
543 | self.type, _pack(_fpartid, self.id), | |||
|
544 | ] | |||
|
545 | ## parameters | |||
|
546 | # count | |||
|
547 | manpar = self.mandatoryparams | |||
|
548 | advpar = self.advisoryparams | |||
|
549 | header.append(_pack(_fpartparamcount, len(manpar), len(advpar))) | |||
|
550 | # size | |||
|
551 | parsizes = [] | |||
|
552 | for key, value in manpar: | |||
|
553 | parsizes.append(len(key)) | |||
|
554 | parsizes.append(len(value)) | |||
|
555 | for key, value in advpar: | |||
|
556 | parsizes.append(len(key)) | |||
|
557 | parsizes.append(len(value)) | |||
|
558 | paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes) | |||
|
559 | header.append(paramsizes) | |||
|
560 | # key, value | |||
|
561 | for key, value in manpar: | |||
|
562 | header.append(key) | |||
|
563 | header.append(value) | |||
|
564 | for key, value in advpar: | |||
|
565 | header.append(key) | |||
|
566 | header.append(value) | |||
|
567 | ## finalize header | |||
|
568 | headerchunk = ''.join(header) | |||
|
569 | yield _pack(_fpartheadersize, len(headerchunk)) | |||
|
570 | yield headerchunk | |||
|
571 | ## payload | |||
|
572 | for chunk in self._payloadchunks(): | |||
|
573 | yield _pack(_fpayloadsize, len(chunk)) | |||
|
574 | yield chunk | |||
|
575 | # end of payload | |||
|
576 | yield _pack(_fpayloadsize, 0) | |||
|
577 | ||||
|
578 | def _payloadchunks(self): | |||
|
579 | """yield chunks of a the part payload | |||
|
580 | ||||
|
581 | Exists to handle the different methods to provide data to a part.""" | |||
|
582 | # we only support fixed size data now. | |||
|
583 | # This will be improved in the future. | |||
|
584 | if util.safehasattr(self.data, 'next'): | |||
|
585 | buff = util.chunkbuffer(self.data) | |||
|
586 | chunk = buff.read(preferedchunksize) | |||
|
587 | while chunk: | |||
|
588 | yield chunk | |||
|
589 | chunk = buff.read(preferedchunksize) | |||
|
590 | elif len(self.data): | |||
|
591 | yield self.data | |||
|
592 | ||||
|
593 | class unbundlepart(unpackermixin): | |||
|
594 | """a bundle part read from a bundle""" | |||
|
595 | ||||
|
596 | def __init__(self, ui, header, fp): | |||
|
597 | super(unbundlepart, self).__init__(fp) | |||
|
598 | self.ui = ui | |||
|
599 | # unbundle state attr | |||
|
600 | self._headerdata = header | |||
|
601 | self._headeroffset = 0 | |||
|
602 | self._initialized = False | |||
|
603 | self.consumed = False | |||
|
604 | # part data | |||
|
605 | self.id = None | |||
|
606 | self.type = None | |||
|
607 | self.mandatoryparams = None | |||
|
608 | self.advisoryparams = None | |||
|
609 | self._payloadstream = None | |||
|
610 | self._readheader() | |||
|
611 | ||||
|
612 | def _fromheader(self, size): | |||
|
613 | """return the next <size> byte from the header""" | |||
|
614 | offset = self._headeroffset | |||
|
615 | data = self._headerdata[offset:(offset + size)] | |||
|
616 | self._headeroffset = offset + size | |||
|
617 | return data | |||
|
618 | ||||
|
619 | def _unpackheader(self, format): | |||
|
620 | """read given format from header | |||
|
621 | ||||
|
622 | This automatically compute the size of the format to read.""" | |||
|
623 | data = self._fromheader(struct.calcsize(format)) | |||
|
624 | return _unpack(format, data) | |||
|
625 | ||||
|
626 | def _readheader(self): | |||
|
627 | """read the header and setup the object""" | |||
|
628 | typesize = self._unpackheader(_fparttypesize)[0] | |||
|
629 | self.type = self._fromheader(typesize) | |||
|
630 | self.ui.debug('part type: "%s"\n' % self.type) | |||
|
631 | self.id = self._unpackheader(_fpartid)[0] | |||
|
632 | self.ui.debug('part id: "%s"\n' % self.id) | |||
|
633 | ## reading parameters | |||
|
634 | # param count | |||
|
635 | mancount, advcount = self._unpackheader(_fpartparamcount) | |||
|
636 | self.ui.debug('part parameters: %i\n' % (mancount + advcount)) | |||
|
637 | # param size | |||
|
638 | fparamsizes = _makefpartparamsizes(mancount + advcount) | |||
|
639 | paramsizes = self._unpackheader(fparamsizes) | |||
|
640 | # make it a list of couple again | |||
|
641 | paramsizes = zip(paramsizes[::2], paramsizes[1::2]) | |||
|
642 | # split mandatory from advisory | |||
|
643 | mansizes = paramsizes[:mancount] | |||
|
644 | advsizes = paramsizes[mancount:] | |||
|
645 | # retrive param value | |||
|
646 | manparams = [] | |||
|
647 | for key, value in mansizes: | |||
|
648 | manparams.append((self._fromheader(key), self._fromheader(value))) | |||
|
649 | advparams = [] | |||
|
650 | for key, value in advsizes: | |||
|
651 | advparams.append((self._fromheader(key), self._fromheader(value))) | |||
|
652 | self.mandatoryparams = manparams | |||
|
653 | self.advisoryparams = advparams | |||
|
654 | ## part payload | |||
|
655 | def payloadchunks(): | |||
|
656 | payloadsize = self._unpack(_fpayloadsize)[0] | |||
|
657 | self.ui.debug('payload chunk size: %i\n' % payloadsize) | |||
|
658 | while payloadsize: | |||
|
659 | yield self._readexact(payloadsize) | |||
|
660 | payloadsize = self._unpack(_fpayloadsize)[0] | |||
|
661 | self.ui.debug('payload chunk size: %i\n' % payloadsize) | |||
|
662 | self._payloadstream = util.chunkbuffer(payloadchunks()) | |||
|
663 | # we read the data, tell it | |||
|
664 | self._initialized = True | |||
|
665 | ||||
|
666 | def read(self, size=None): | |||
|
667 | """read payload data""" | |||
|
668 | if not self._initialized: | |||
|
669 | self._readheader() | |||
|
670 | if size is None: | |||
|
671 | data = self._payloadstream.read() | |||
|
672 | else: | |||
|
673 | data = self._payloadstream.read(size) | |||
|
674 | if size is None or len(data) < size: | |||
|
675 | self.consumed = True | |||
|
676 | return data | |||
|
677 | ||||
|
678 | ||||
|
679 | @parthandler('b2x:changegroup') | |||
|
680 | def handlechangegroup(op, inpart): | |||
|
681 | """apply a changegroup part on the repo | |||
|
682 | ||||
|
683 | This is a very early implementation that will massive rework before being | |||
|
684 | inflicted to any end-user. | |||
|
685 | """ | |||
|
686 | # Make sure we trigger a transaction creation | |||
|
687 | # | |||
|
688 | # The addchangegroup function will get a transaction object by itself, but | |||
|
689 | # we need to make sure we trigger the creation of a transaction object used | |||
|
690 | # for the whole processing scope. | |||
|
691 | op.gettransaction() | |||
|
692 | cg = changegroup.unbundle10(inpart, 'UN') | |||
|
693 | ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2') | |||
|
694 | op.records.add('changegroup', {'return': ret}) | |||
|
695 | if op.reply is not None: | |||
|
696 | # This is definitly not the final form of this | |||
|
697 | # return. But one need to start somewhere. | |||
|
698 | part = bundlepart('b2x:reply:changegroup', (), | |||
|
699 | [('in-reply-to', str(inpart.id)), | |||
|
700 | ('return', '%i' % ret)]) | |||
|
701 | op.reply.addpart(part) | |||
|
702 | assert not inpart.read() | |||
|
703 | ||||
|
704 | @parthandler('b2x:reply:changegroup') | |||
|
705 | def handlechangegroup(op, inpart): | |||
|
706 | p = dict(inpart.advisoryparams) | |||
|
707 | ret = int(p['return']) | |||
|
708 | op.records.add('changegroup', {'return': ret}, int(p['in-reply-to'])) | |||
|
709 | ||||
|
710 | @parthandler('b2x:check:heads') | |||
|
711 | def handlechangegroup(op, inpart): | |||
|
712 | """check that head of the repo did not change | |||
|
713 | ||||
|
714 | This is used to detect a push race when using unbundle. | |||
|
715 | This replaces the "heads" argument of unbundle.""" | |||
|
716 | h = inpart.read(20) | |||
|
717 | heads = [] | |||
|
718 | while len(h) == 20: | |||
|
719 | heads.append(h) | |||
|
720 | h = inpart.read(20) | |||
|
721 | assert not h | |||
|
722 | if heads != op.repo.heads(): | |||
|
723 | raise exchange.PushRaced() | |||
|
724 | ||||
|
725 | @parthandler('b2x:output') | |||
|
726 | def handleoutput(op, inpart): | |||
|
727 | """forward output captured on the server to the client""" | |||
|
728 | for line in inpart.read().splitlines(): | |||
|
729 | op.ui.write(('remote: %s\n' % line)) | |||
|
730 | ||||
|
731 | @parthandler('b2x:replycaps') | |||
|
732 | def handlereplycaps(op, inpart): | |||
|
733 | """Notify that a reply bundle should be created | |||
|
734 | ||||
|
735 | The payload contains the capabilities information for the reply""" | |||
|
736 | caps = decodecaps(inpart.read()) | |||
|
737 | if op.reply is None: | |||
|
738 | op.reply = bundle20(op.ui, caps) | |||
|
739 |
This diff has been collapsed as it changes many lines, (757 lines changed) Show them Hide them | |||||
@@ -0,0 +1,757 b'' | |||||
|
1 | # exchange.py - utility to exchange data between repos. | |||
|
2 | # | |||
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from i18n import _ | |||
|
9 | from node import hex, nullid | |||
|
10 | import errno, urllib | |||
|
11 | import util, scmutil, changegroup, base85 | |||
|
12 | import discovery, phases, obsolete, bookmarks, bundle2 | |||
|
13 | ||||
|
14 | def readbundle(ui, fh, fname, vfs=None): | |||
|
15 | header = changegroup.readexactly(fh, 4) | |||
|
16 | ||||
|
17 | alg = None | |||
|
18 | if not fname: | |||
|
19 | fname = "stream" | |||
|
20 | if not header.startswith('HG') and header.startswith('\0'): | |||
|
21 | fh = changegroup.headerlessfixup(fh, header) | |||
|
22 | header = "HG10" | |||
|
23 | alg = 'UN' | |||
|
24 | elif vfs: | |||
|
25 | fname = vfs.join(fname) | |||
|
26 | ||||
|
27 | magic, version = header[0:2], header[2:4] | |||
|
28 | ||||
|
29 | if magic != 'HG': | |||
|
30 | raise util.Abort(_('%s: not a Mercurial bundle') % fname) | |||
|
31 | if version == '10': | |||
|
32 | if alg is None: | |||
|
33 | alg = changegroup.readexactly(fh, 2) | |||
|
34 | return changegroup.unbundle10(fh, alg) | |||
|
35 | elif version == '2X': | |||
|
36 | return bundle2.unbundle20(ui, fh, header=magic + version) | |||
|
37 | else: | |||
|
38 | raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) | |||
|
39 | ||||
|
40 | ||||
|
41 | class pushoperation(object): | |||
|
42 | """A object that represent a single push operation | |||
|
43 | ||||
|
44 | It purpose is to carry push related state and very common operation. | |||
|
45 | ||||
|
46 | A new should be created at the beginning of each push and discarded | |||
|
47 | afterward. | |||
|
48 | """ | |||
|
49 | ||||
|
50 | def __init__(self, repo, remote, force=False, revs=None, newbranch=False): | |||
|
51 | # repo we push from | |||
|
52 | self.repo = repo | |||
|
53 | self.ui = repo.ui | |||
|
54 | # repo we push to | |||
|
55 | self.remote = remote | |||
|
56 | # force option provided | |||
|
57 | self.force = force | |||
|
58 | # revs to be pushed (None is "all") | |||
|
59 | self.revs = revs | |||
|
60 | # allow push of new branch | |||
|
61 | self.newbranch = newbranch | |||
|
62 | # did a local lock get acquired? | |||
|
63 | self.locallocked = None | |||
|
64 | # Integer version of the push result | |||
|
65 | # - None means nothing to push | |||
|
66 | # - 0 means HTTP error | |||
|
67 | # - 1 means we pushed and remote head count is unchanged *or* | |||
|
68 | # we have outgoing changesets but refused to push | |||
|
69 | # - other values as described by addchangegroup() | |||
|
70 | self.ret = None | |||
|
71 | # discover.outgoing object (contains common and outgoing data) | |||
|
72 | self.outgoing = None | |||
|
73 | # all remote heads before the push | |||
|
74 | self.remoteheads = None | |||
|
75 | # testable as a boolean indicating if any nodes are missing locally. | |||
|
76 | self.incoming = None | |||
|
77 | # set of all heads common after changeset bundle push | |||
|
78 | self.commonheads = None | |||
|
79 | ||||
|
80 | def push(repo, remote, force=False, revs=None, newbranch=False): | |||
|
81 | '''Push outgoing changesets (limited by revs) from a local | |||
|
82 | repository to remote. Return an integer: | |||
|
83 | - None means nothing to push | |||
|
84 | - 0 means HTTP error | |||
|
85 | - 1 means we pushed and remote head count is unchanged *or* | |||
|
86 | we have outgoing changesets but refused to push | |||
|
87 | - other values as described by addchangegroup() | |||
|
88 | ''' | |||
|
89 | pushop = pushoperation(repo, remote, force, revs, newbranch) | |||
|
90 | if pushop.remote.local(): | |||
|
91 | missing = (set(pushop.repo.requirements) | |||
|
92 | - pushop.remote.local().supported) | |||
|
93 | if missing: | |||
|
94 | msg = _("required features are not" | |||
|
95 | " supported in the destination:" | |||
|
96 | " %s") % (', '.join(sorted(missing))) | |||
|
97 | raise util.Abort(msg) | |||
|
98 | ||||
|
99 | # there are two ways to push to remote repo: | |||
|
100 | # | |||
|
101 | # addchangegroup assumes local user can lock remote | |||
|
102 | # repo (local filesystem, old ssh servers). | |||
|
103 | # | |||
|
104 | # unbundle assumes local user cannot lock remote repo (new ssh | |||
|
105 | # servers, http servers). | |||
|
106 | ||||
|
107 | if not pushop.remote.canpush(): | |||
|
108 | raise util.Abort(_("destination does not support push")) | |||
|
109 | # get local lock as we might write phase data | |||
|
110 | locallock = None | |||
|
111 | try: | |||
|
112 | locallock = pushop.repo.lock() | |||
|
113 | pushop.locallocked = True | |||
|
114 | except IOError, err: | |||
|
115 | pushop.locallocked = False | |||
|
116 | if err.errno != errno.EACCES: | |||
|
117 | raise | |||
|
118 | # source repo cannot be locked. | |||
|
119 | # We do not abort the push, but just disable the local phase | |||
|
120 | # synchronisation. | |||
|
121 | msg = 'cannot lock source repository: %s\n' % err | |||
|
122 | pushop.ui.debug(msg) | |||
|
123 | try: | |||
|
124 | pushop.repo.checkpush(pushop) | |||
|
125 | lock = None | |||
|
126 | unbundle = pushop.remote.capable('unbundle') | |||
|
127 | if not unbundle: | |||
|
128 | lock = pushop.remote.lock() | |||
|
129 | try: | |||
|
130 | _pushdiscovery(pushop) | |||
|
131 | if _pushcheckoutgoing(pushop): | |||
|
132 | pushop.repo.prepushoutgoinghooks(pushop.repo, | |||
|
133 | pushop.remote, | |||
|
134 | pushop.outgoing) | |||
|
135 | if (pushop.repo.ui.configbool('experimental', 'bundle2-exp', | |||
|
136 | False) | |||
|
137 | and pushop.remote.capable('bundle2-exp')): | |||
|
138 | _pushbundle2(pushop) | |||
|
139 | else: | |||
|
140 | _pushchangeset(pushop) | |||
|
141 | _pushcomputecommonheads(pushop) | |||
|
142 | _pushsyncphase(pushop) | |||
|
143 | _pushobsolete(pushop) | |||
|
144 | finally: | |||
|
145 | if lock is not None: | |||
|
146 | lock.release() | |||
|
147 | finally: | |||
|
148 | if locallock is not None: | |||
|
149 | locallock.release() | |||
|
150 | ||||
|
151 | _pushbookmark(pushop) | |||
|
152 | return pushop.ret | |||
|
153 | ||||
|
154 | def _pushdiscovery(pushop): | |||
|
155 | # discovery | |||
|
156 | unfi = pushop.repo.unfiltered() | |||
|
157 | fci = discovery.findcommonincoming | |||
|
158 | commoninc = fci(unfi, pushop.remote, force=pushop.force) | |||
|
159 | common, inc, remoteheads = commoninc | |||
|
160 | fco = discovery.findcommonoutgoing | |||
|
161 | outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs, | |||
|
162 | commoninc=commoninc, force=pushop.force) | |||
|
163 | pushop.outgoing = outgoing | |||
|
164 | pushop.remoteheads = remoteheads | |||
|
165 | pushop.incoming = inc | |||
|
166 | ||||
|
167 | def _pushcheckoutgoing(pushop): | |||
|
168 | outgoing = pushop.outgoing | |||
|
169 | unfi = pushop.repo.unfiltered() | |||
|
170 | if not outgoing.missing: | |||
|
171 | # nothing to push | |||
|
172 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) | |||
|
173 | return False | |||
|
174 | # something to push | |||
|
175 | if not pushop.force: | |||
|
176 | # if repo.obsstore == False --> no obsolete | |||
|
177 | # then, save the iteration | |||
|
178 | if unfi.obsstore: | |||
|
179 | # this message are here for 80 char limit reason | |||
|
180 | mso = _("push includes obsolete changeset: %s!") | |||
|
181 | mst = "push includes %s changeset: %s!" | |||
|
182 | # plain versions for i18n tool to detect them | |||
|
183 | _("push includes unstable changeset: %s!") | |||
|
184 | _("push includes bumped changeset: %s!") | |||
|
185 | _("push includes divergent changeset: %s!") | |||
|
186 | # If we are to push if there is at least one | |||
|
187 | # obsolete or unstable changeset in missing, at | |||
|
188 | # least one of the missinghead will be obsolete or | |||
|
189 | # unstable. So checking heads only is ok | |||
|
190 | for node in outgoing.missingheads: | |||
|
191 | ctx = unfi[node] | |||
|
192 | if ctx.obsolete(): | |||
|
193 | raise util.Abort(mso % ctx) | |||
|
194 | elif ctx.troubled(): | |||
|
195 | raise util.Abort(_(mst) | |||
|
196 | % (ctx.troubles()[0], | |||
|
197 | ctx)) | |||
|
198 | newbm = pushop.ui.configlist('bookmarks', 'pushing') | |||
|
199 | discovery.checkheads(unfi, pushop.remote, outgoing, | |||
|
200 | pushop.remoteheads, | |||
|
201 | pushop.newbranch, | |||
|
202 | bool(pushop.incoming), | |||
|
203 | newbm) | |||
|
204 | return True | |||
|
205 | ||||
|
206 | def _pushbundle2(pushop): | |||
|
207 | """push data to the remote using bundle2 | |||
|
208 | ||||
|
209 | The only currently supported type of data is changegroup but this will | |||
|
210 | evolve in the future.""" | |||
|
211 | # Send known head to the server for race detection. | |||
|
212 | capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp')) | |||
|
213 | caps = bundle2.decodecaps(capsblob) | |||
|
214 | bundler = bundle2.bundle20(pushop.ui, caps) | |||
|
215 | # create reply capability | |||
|
216 | capsblob = bundle2.encodecaps(pushop.repo.bundle2caps) | |||
|
217 | bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob)) | |||
|
218 | if not pushop.force: | |||
|
219 | part = bundle2.bundlepart('B2X:CHECK:HEADS', | |||
|
220 | data=iter(pushop.remoteheads)) | |||
|
221 | bundler.addpart(part) | |||
|
222 | extrainfo = _pushbundle2extraparts(pushop, bundler) | |||
|
223 | # add the changegroup bundle | |||
|
224 | cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing) | |||
|
225 | cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks()) | |||
|
226 | bundler.addpart(cgpart) | |||
|
227 | stream = util.chunkbuffer(bundler.getchunks()) | |||
|
228 | reply = pushop.remote.unbundle(stream, ['force'], 'push') | |||
|
229 | try: | |||
|
230 | op = bundle2.processbundle(pushop.repo, reply) | |||
|
231 | except KeyError, exc: | |||
|
232 | raise util.Abort('missing support for %s' % exc) | |||
|
233 | cgreplies = op.records.getreplies(cgpart.id) | |||
|
234 | assert len(cgreplies['changegroup']) == 1 | |||
|
235 | pushop.ret = cgreplies['changegroup'][0]['return'] | |||
|
236 | _pushbundle2extrareply(pushop, op, extrainfo) | |||
|
237 | ||||
|
238 | def _pushbundle2extraparts(pushop, bundler): | |||
|
239 | """hook function to let extensions add parts | |||
|
240 | ||||
|
241 | Return a dict to let extensions pass data to the reply processing. | |||
|
242 | """ | |||
|
243 | return {} | |||
|
244 | ||||
|
245 | def _pushbundle2extrareply(pushop, op, extrainfo): | |||
|
246 | """hook function to let extensions react to part replies | |||
|
247 | ||||
|
248 | The dict from _pushbundle2extrareply is fed to this function. | |||
|
249 | """ | |||
|
250 | pass | |||
|
251 | ||||
|
252 | def _pushchangeset(pushop): | |||
|
253 | """Make the actual push of changeset bundle to remote repo""" | |||
|
254 | outgoing = pushop.outgoing | |||
|
255 | unbundle = pushop.remote.capable('unbundle') | |||
|
256 | # TODO: get bundlecaps from remote | |||
|
257 | bundlecaps = None | |||
|
258 | # create a changegroup from local | |||
|
259 | if pushop.revs is None and not (outgoing.excluded | |||
|
260 | or pushop.repo.changelog.filteredrevs): | |||
|
261 | # push everything, | |||
|
262 | # use the fast path, no race possible on push | |||
|
263 | bundler = changegroup.bundle10(pushop.repo, bundlecaps) | |||
|
264 | cg = changegroup.getsubset(pushop.repo, | |||
|
265 | outgoing, | |||
|
266 | bundler, | |||
|
267 | 'push', | |||
|
268 | fastpath=True) | |||
|
269 | else: | |||
|
270 | cg = changegroup.getlocalbundle(pushop.repo, 'push', outgoing, | |||
|
271 | bundlecaps) | |||
|
272 | ||||
|
273 | # apply changegroup to remote | |||
|
274 | if unbundle: | |||
|
275 | # local repo finds heads on server, finds out what | |||
|
276 | # revs it must push. once revs transferred, if server | |||
|
277 | # finds it has different heads (someone else won | |||
|
278 | # commit/push race), server aborts. | |||
|
279 | if pushop.force: | |||
|
280 | remoteheads = ['force'] | |||
|
281 | else: | |||
|
282 | remoteheads = pushop.remoteheads | |||
|
283 | # ssh: return remote's addchangegroup() | |||
|
284 | # http: return remote's addchangegroup() or 0 for error | |||
|
285 | pushop.ret = pushop.remote.unbundle(cg, remoteheads, | |||
|
286 | 'push') | |||
|
287 | else: | |||
|
288 | # we return an integer indicating remote head count | |||
|
289 | # change | |||
|
290 | pushop.ret = pushop.remote.addchangegroup(cg, 'push', pushop.repo.url()) | |||
|
291 | ||||
|
292 | def _pushcomputecommonheads(pushop): | |||
|
293 | unfi = pushop.repo.unfiltered() | |||
|
294 | if pushop.ret: | |||
|
295 | # push succeed, synchronize target of the push | |||
|
296 | cheads = pushop.outgoing.missingheads | |||
|
297 | elif pushop.revs is None: | |||
|
298 | # All out push fails. synchronize all common | |||
|
299 | cheads = pushop.outgoing.commonheads | |||
|
300 | else: | |||
|
301 | # I want cheads = heads(::missingheads and ::commonheads) | |||
|
302 | # (missingheads is revs with secret changeset filtered out) | |||
|
303 | # | |||
|
304 | # This can be expressed as: | |||
|
305 | # cheads = ( (missingheads and ::commonheads) | |||
|
306 | # + (commonheads and ::missingheads))" | |||
|
307 | # ) | |||
|
308 | # | |||
|
309 | # while trying to push we already computed the following: | |||
|
310 | # common = (::commonheads) | |||
|
311 | # missing = ((commonheads::missingheads) - commonheads) | |||
|
312 | # | |||
|
313 | # We can pick: | |||
|
314 | # * missingheads part of common (::commonheads) | |||
|
315 | common = set(pushop.outgoing.common) | |||
|
316 | nm = pushop.repo.changelog.nodemap | |||
|
317 | cheads = [node for node in pushop.revs if nm[node] in common] | |||
|
318 | # and | |||
|
319 | # * commonheads parents on missing | |||
|
320 | revset = unfi.set('%ln and parents(roots(%ln))', | |||
|
321 | pushop.outgoing.commonheads, | |||
|
322 | pushop.outgoing.missing) | |||
|
323 | cheads.extend(c.node() for c in revset) | |||
|
324 | pushop.commonheads = cheads | |||
|
325 | ||||
|
326 | def _pushsyncphase(pushop): | |||
|
327 | """synchronise phase information locally and remotely""" | |||
|
328 | unfi = pushop.repo.unfiltered() | |||
|
329 | cheads = pushop.commonheads | |||
|
330 | if pushop.ret: | |||
|
331 | # push succeed, synchronize target of the push | |||
|
332 | cheads = pushop.outgoing.missingheads | |||
|
333 | elif pushop.revs is None: | |||
|
334 | # All out push fails. synchronize all common | |||
|
335 | cheads = pushop.outgoing.commonheads | |||
|
336 | else: | |||
|
337 | # I want cheads = heads(::missingheads and ::commonheads) | |||
|
338 | # (missingheads is revs with secret changeset filtered out) | |||
|
339 | # | |||
|
340 | # This can be expressed as: | |||
|
341 | # cheads = ( (missingheads and ::commonheads) | |||
|
342 | # + (commonheads and ::missingheads))" | |||
|
343 | # ) | |||
|
344 | # | |||
|
345 | # while trying to push we already computed the following: | |||
|
346 | # common = (::commonheads) | |||
|
347 | # missing = ((commonheads::missingheads) - commonheads) | |||
|
348 | # | |||
|
349 | # We can pick: | |||
|
350 | # * missingheads part of common (::commonheads) | |||
|
351 | common = set(pushop.outgoing.common) | |||
|
352 | nm = pushop.repo.changelog.nodemap | |||
|
353 | cheads = [node for node in pushop.revs if nm[node] in common] | |||
|
354 | # and | |||
|
355 | # * commonheads parents on missing | |||
|
356 | revset = unfi.set('%ln and parents(roots(%ln))', | |||
|
357 | pushop.outgoing.commonheads, | |||
|
358 | pushop.outgoing.missing) | |||
|
359 | cheads.extend(c.node() for c in revset) | |||
|
360 | pushop.commonheads = cheads | |||
|
361 | # even when we don't push, exchanging phase data is useful | |||
|
362 | remotephases = pushop.remote.listkeys('phases') | |||
|
363 | if (pushop.ui.configbool('ui', '_usedassubrepo', False) | |||
|
364 | and remotephases # server supports phases | |||
|
365 | and pushop.ret is None # nothing was pushed | |||
|
366 | and remotephases.get('publishing', False)): | |||
|
367 | # When: | |||
|
368 | # - this is a subrepo push | |||
|
369 | # - and remote support phase | |||
|
370 | # - and no changeset was pushed | |||
|
371 | # - and remote is publishing | |||
|
372 | # We may be in issue 3871 case! | |||
|
373 | # We drop the possible phase synchronisation done by | |||
|
374 | # courtesy to publish changesets possibly locally draft | |||
|
375 | # on the remote. | |||
|
376 | remotephases = {'publishing': 'True'} | |||
|
377 | if not remotephases: # old server or public only reply from non-publishing | |||
|
378 | _localphasemove(pushop, cheads) | |||
|
379 | # don't push any phase data as there is nothing to push | |||
|
380 | else: | |||
|
381 | ana = phases.analyzeremotephases(pushop.repo, cheads, | |||
|
382 | remotephases) | |||
|
383 | pheads, droots = ana | |||
|
384 | ### Apply remote phase on local | |||
|
385 | if remotephases.get('publishing', False): | |||
|
386 | _localphasemove(pushop, cheads) | |||
|
387 | else: # publish = False | |||
|
388 | _localphasemove(pushop, pheads) | |||
|
389 | _localphasemove(pushop, cheads, phases.draft) | |||
|
390 | ### Apply local phase on remote | |||
|
391 | ||||
|
392 | # Get the list of all revs draft on remote by public here. | |||
|
393 | # XXX Beware that revset break if droots is not strictly | |||
|
394 | # XXX root we may want to ensure it is but it is costly | |||
|
395 | outdated = unfi.set('heads((%ln::%ln) and public())', | |||
|
396 | droots, cheads) | |||
|
397 | for newremotehead in outdated: | |||
|
398 | r = pushop.remote.pushkey('phases', | |||
|
399 | newremotehead.hex(), | |||
|
400 | str(phases.draft), | |||
|
401 | str(phases.public)) | |||
|
402 | if not r: | |||
|
403 | pushop.ui.warn(_('updating %s to public failed!\n') | |||
|
404 | % newremotehead) | |||
|
405 | ||||
|
406 | def _localphasemove(pushop, nodes, phase=phases.public): | |||
|
407 | """move <nodes> to <phase> in the local source repo""" | |||
|
408 | if pushop.locallocked: | |||
|
409 | phases.advanceboundary(pushop.repo, phase, nodes) | |||
|
410 | else: | |||
|
411 | # repo is not locked, do not change any phases! | |||
|
412 | # Informs the user that phases should have been moved when | |||
|
413 | # applicable. | |||
|
414 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] | |||
|
415 | phasestr = phases.phasenames[phase] | |||
|
416 | if actualmoves: | |||
|
417 | pushop.ui.status(_('cannot lock source repo, skipping ' | |||
|
418 | 'local %s phase update\n') % phasestr) | |||
|
419 | ||||
|
420 | def _pushobsolete(pushop): | |||
|
421 | """utility function to push obsolete markers to a remote""" | |||
|
422 | pushop.ui.debug('try to push obsolete markers to remote\n') | |||
|
423 | repo = pushop.repo | |||
|
424 | remote = pushop.remote | |||
|
425 | if (obsolete._enabled and repo.obsstore and | |||
|
426 | 'obsolete' in remote.listkeys('namespaces')): | |||
|
427 | rslts = [] | |||
|
428 | remotedata = repo.listkeys('obsolete') | |||
|
429 | for key in sorted(remotedata, reverse=True): | |||
|
430 | # reverse sort to ensure we end with dump0 | |||
|
431 | data = remotedata[key] | |||
|
432 | rslts.append(remote.pushkey('obsolete', key, '', data)) | |||
|
433 | if [r for r in rslts if not r]: | |||
|
434 | msg = _('failed to push some obsolete markers!\n') | |||
|
435 | repo.ui.warn(msg) | |||
|
436 | ||||
|
437 | def _pushbookmark(pushop): | |||
|
438 | """Update bookmark position on remote""" | |||
|
439 | ui = pushop.ui | |||
|
440 | repo = pushop.repo.unfiltered() | |||
|
441 | remote = pushop.remote | |||
|
442 | ui.debug("checking for updated bookmarks\n") | |||
|
443 | revnums = map(repo.changelog.rev, pushop.revs or []) | |||
|
444 | ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)] | |||
|
445 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid | |||
|
446 | ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), | |||
|
447 | srchex=hex) | |||
|
448 | ||||
|
449 | for b, scid, dcid in advsrc: | |||
|
450 | if ancestors and repo[scid].rev() not in ancestors: | |||
|
451 | continue | |||
|
452 | if remote.pushkey('bookmarks', b, dcid, scid): | |||
|
453 | ui.status(_("updating bookmark %s\n") % b) | |||
|
454 | else: | |||
|
455 | ui.warn(_('updating bookmark %s failed!\n') % b) | |||
|
456 | ||||
|
457 | class pulloperation(object): | |||
|
458 | """A object that represent a single pull operation | |||
|
459 | ||||
|
460 | It purpose is to carry push related state and very common operation. | |||
|
461 | ||||
|
462 | A new should be created at the beginning of each pull and discarded | |||
|
463 | afterward. | |||
|
464 | """ | |||
|
465 | ||||
|
466 | def __init__(self, repo, remote, heads=None, force=False): | |||
|
467 | # repo we pull into | |||
|
468 | self.repo = repo | |||
|
469 | # repo we pull from | |||
|
470 | self.remote = remote | |||
|
471 | # revision we try to pull (None is "all") | |||
|
472 | self.heads = heads | |||
|
473 | # do we force pull? | |||
|
474 | self.force = force | |||
|
475 | # the name the pull transaction | |||
|
476 | self._trname = 'pull\n' + util.hidepassword(remote.url()) | |||
|
477 | # hold the transaction once created | |||
|
478 | self._tr = None | |||
|
479 | # set of common changeset between local and remote before pull | |||
|
480 | self.common = None | |||
|
481 | # set of pulled head | |||
|
482 | self.rheads = None | |||
|
483 | # list of missing changeset to fetch remotely | |||
|
484 | self.fetch = None | |||
|
485 | # result of changegroup pulling (used as return code by pull) | |||
|
486 | self.cgresult = None | |||
|
487 | # list of step remaining todo (related to future bundle2 usage) | |||
|
488 | self.todosteps = set(['changegroup', 'phases', 'obsmarkers']) | |||
|
489 | ||||
|
490 | @util.propertycache | |||
|
491 | def pulledsubset(self): | |||
|
492 | """heads of the set of changeset target by the pull""" | |||
|
493 | # compute target subset | |||
|
494 | if self.heads is None: | |||
|
495 | # We pulled every thing possible | |||
|
496 | # sync on everything common | |||
|
497 | c = set(self.common) | |||
|
498 | ret = list(self.common) | |||
|
499 | for n in self.rheads: | |||
|
500 | if n not in c: | |||
|
501 | ret.append(n) | |||
|
502 | return ret | |||
|
503 | else: | |||
|
504 | # We pulled a specific subset | |||
|
505 | # sync on this subset | |||
|
506 | return self.heads | |||
|
507 | ||||
|
508 | def gettransaction(self): | |||
|
509 | """get appropriate pull transaction, creating it if needed""" | |||
|
510 | if self._tr is None: | |||
|
511 | self._tr = self.repo.transaction(self._trname) | |||
|
512 | return self._tr | |||
|
513 | ||||
|
514 | def closetransaction(self): | |||
|
515 | """close transaction if created""" | |||
|
516 | if self._tr is not None: | |||
|
517 | self._tr.close() | |||
|
518 | ||||
|
519 | def releasetransaction(self): | |||
|
520 | """release transaction if created""" | |||
|
521 | if self._tr is not None: | |||
|
522 | self._tr.release() | |||
|
523 | ||||
|
524 | def pull(repo, remote, heads=None, force=False): | |||
|
525 | pullop = pulloperation(repo, remote, heads, force) | |||
|
526 | if pullop.remote.local(): | |||
|
527 | missing = set(pullop.remote.requirements) - pullop.repo.supported | |||
|
528 | if missing: | |||
|
529 | msg = _("required features are not" | |||
|
530 | " supported in the destination:" | |||
|
531 | " %s") % (', '.join(sorted(missing))) | |||
|
532 | raise util.Abort(msg) | |||
|
533 | ||||
|
534 | lock = pullop.repo.lock() | |||
|
535 | try: | |||
|
536 | _pulldiscovery(pullop) | |||
|
537 | if (pullop.repo.ui.configbool('server', 'bundle2', False) | |||
|
538 | and pullop.remote.capable('bundle2-exp')): | |||
|
539 | _pullbundle2(pullop) | |||
|
540 | if 'changegroup' in pullop.todosteps: | |||
|
541 | _pullchangeset(pullop) | |||
|
542 | if 'phases' in pullop.todosteps: | |||
|
543 | _pullphase(pullop) | |||
|
544 | if 'obsmarkers' in pullop.todosteps: | |||
|
545 | _pullobsolete(pullop) | |||
|
546 | pullop.closetransaction() | |||
|
547 | finally: | |||
|
548 | pullop.releasetransaction() | |||
|
549 | lock.release() | |||
|
550 | ||||
|
551 | return pullop.cgresult | |||
|
552 | ||||
|
553 | def _pulldiscovery(pullop): | |||
|
554 | """discovery phase for the pull | |||
|
555 | ||||
|
556 | Current handle changeset discovery only, will change handle all discovery | |||
|
557 | at some point.""" | |||
|
558 | tmp = discovery.findcommonincoming(pullop.repo.unfiltered(), | |||
|
559 | pullop.remote, | |||
|
560 | heads=pullop.heads, | |||
|
561 | force=pullop.force) | |||
|
562 | pullop.common, pullop.fetch, pullop.rheads = tmp | |||
|
563 | ||||
|
564 | def _pullbundle2(pullop): | |||
|
565 | """pull data using bundle2 | |||
|
566 | ||||
|
567 | For now, the only supported data are changegroup.""" | |||
|
568 | kwargs = {'bundlecaps': set(['HG2X'])} | |||
|
569 | capsblob = bundle2.encodecaps(pullop.repo.bundle2caps) | |||
|
570 | kwargs['bundlecaps'].add('bundle2=' + urllib.quote(capsblob)) | |||
|
571 | # pulling changegroup | |||
|
572 | pullop.todosteps.remove('changegroup') | |||
|
573 | if not pullop.fetch: | |||
|
574 | pullop.repo.ui.status(_("no changes found\n")) | |||
|
575 | pullop.cgresult = 0 | |||
|
576 | else: | |||
|
577 | kwargs['common'] = pullop.common | |||
|
578 | kwargs['heads'] = pullop.heads or pullop.rheads | |||
|
579 | if pullop.heads is None and list(pullop.common) == [nullid]: | |||
|
580 | pullop.repo.ui.status(_("requesting all changes\n")) | |||
|
581 | _pullbundle2extraprepare(pullop, kwargs) | |||
|
582 | if kwargs.keys() == ['format']: | |||
|
583 | return # nothing to pull | |||
|
584 | bundle = pullop.remote.getbundle('pull', **kwargs) | |||
|
585 | try: | |||
|
586 | op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction) | |||
|
587 | except KeyError, exc: | |||
|
588 | raise util.Abort('missing support for %s' % exc) | |||
|
589 | assert len(op.records['changegroup']) == 1 | |||
|
590 | pullop.cgresult = op.records['changegroup'][0]['return'] | |||
|
591 | ||||
|
592 | def _pullbundle2extraprepare(pullop, kwargs): | |||
|
593 | """hook function so that extensions can extend the getbundle call""" | |||
|
594 | pass | |||
|
595 | ||||
|
596 | def _pullchangeset(pullop): | |||
|
597 | """pull changeset from unbundle into the local repo""" | |||
|
598 | # We delay the open of the transaction as late as possible so we | |||
|
599 | # don't open transaction for nothing or you break future useful | |||
|
600 | # rollback call | |||
|
601 | pullop.todosteps.remove('changegroup') | |||
|
602 | if not pullop.fetch: | |||
|
603 | pullop.repo.ui.status(_("no changes found\n")) | |||
|
604 | pullop.cgresult = 0 | |||
|
605 | return | |||
|
606 | pullop.gettransaction() | |||
|
607 | if pullop.heads is None and list(pullop.common) == [nullid]: | |||
|
608 | pullop.repo.ui.status(_("requesting all changes\n")) | |||
|
609 | elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): | |||
|
610 | # issue1320, avoid a race if remote changed after discovery | |||
|
611 | pullop.heads = pullop.rheads | |||
|
612 | ||||
|
613 | if pullop.remote.capable('getbundle'): | |||
|
614 | # TODO: get bundlecaps from remote | |||
|
615 | cg = pullop.remote.getbundle('pull', common=pullop.common, | |||
|
616 | heads=pullop.heads or pullop.rheads) | |||
|
617 | elif pullop.heads is None: | |||
|
618 | cg = pullop.remote.changegroup(pullop.fetch, 'pull') | |||
|
619 | elif not pullop.remote.capable('changegroupsubset'): | |||
|
620 | raise util.Abort(_("partial pull cannot be done because " | |||
|
621 | "other repository doesn't support " | |||
|
622 | "changegroupsubset.")) | |||
|
623 | else: | |||
|
624 | cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull') | |||
|
625 | pullop.cgresult = changegroup.addchangegroup(pullop.repo, cg, 'pull', | |||
|
626 | pullop.remote.url()) | |||
|
627 | ||||
|
628 | def _pullphase(pullop): | |||
|
629 | # Get remote phases data from remote | |||
|
630 | pullop.todosteps.remove('phases') | |||
|
631 | remotephases = pullop.remote.listkeys('phases') | |||
|
632 | publishing = bool(remotephases.get('publishing', False)) | |||
|
633 | if remotephases and not publishing: | |||
|
634 | # remote is new and unpublishing | |||
|
635 | pheads, _dr = phases.analyzeremotephases(pullop.repo, | |||
|
636 | pullop.pulledsubset, | |||
|
637 | remotephases) | |||
|
638 | phases.advanceboundary(pullop.repo, phases.public, pheads) | |||
|
639 | phases.advanceboundary(pullop.repo, phases.draft, | |||
|
640 | pullop.pulledsubset) | |||
|
641 | else: | |||
|
642 | # Remote is old or publishing all common changesets | |||
|
643 | # should be seen as public | |||
|
644 | phases.advanceboundary(pullop.repo, phases.public, | |||
|
645 | pullop.pulledsubset) | |||
|
646 | ||||
|
647 | def _pullobsolete(pullop): | |||
|
648 | """utility function to pull obsolete markers from a remote | |||
|
649 | ||||
|
650 | The `gettransaction` is function that return the pull transaction, creating | |||
|
651 | one if necessary. We return the transaction to inform the calling code that | |||
|
652 | a new transaction have been created (when applicable). | |||
|
653 | ||||
|
654 | Exists mostly to allow overriding for experimentation purpose""" | |||
|
655 | pullop.todosteps.remove('obsmarkers') | |||
|
656 | tr = None | |||
|
657 | if obsolete._enabled: | |||
|
658 | pullop.repo.ui.debug('fetching remote obsolete markers\n') | |||
|
659 | remoteobs = pullop.remote.listkeys('obsolete') | |||
|
660 | if 'dump0' in remoteobs: | |||
|
661 | tr = pullop.gettransaction() | |||
|
662 | for key in sorted(remoteobs, reverse=True): | |||
|
663 | if key.startswith('dump'): | |||
|
664 | data = base85.b85decode(remoteobs[key]) | |||
|
665 | pullop.repo.obsstore.mergemarkers(tr, data) | |||
|
666 | pullop.repo.invalidatevolatilesets() | |||
|
667 | return tr | |||
|
668 | ||||
|
669 | def getbundle(repo, source, heads=None, common=None, bundlecaps=None, | |||
|
670 | **kwargs): | |||
|
671 | """return a full bundle (with potentially multiple kind of parts) | |||
|
672 | ||||
|
673 | Could be a bundle HG10 or a bundle HG2X depending on bundlecaps | |||
|
674 | passed. For now, the bundle can contain only changegroup, but this will | |||
|
675 | changes when more part type will be available for bundle2. | |||
|
676 | ||||
|
677 | This is different from changegroup.getbundle that only returns an HG10 | |||
|
678 | changegroup bundle. They may eventually get reunited in the future when we | |||
|
679 | have a clearer idea of the API we what to query different data. | |||
|
680 | ||||
|
681 | The implementation is at a very early stage and will get massive rework | |||
|
682 | when the API of bundle is refined. | |||
|
683 | """ | |||
|
684 | # build bundle here. | |||
|
685 | cg = changegroup.getbundle(repo, source, heads=heads, | |||
|
686 | common=common, bundlecaps=bundlecaps) | |||
|
687 | if bundlecaps is None or 'HG2X' not in bundlecaps: | |||
|
688 | return cg | |||
|
689 | # very crude first implementation, | |||
|
690 | # the bundle API will change and the generation will be done lazily. | |||
|
691 | b2caps = {} | |||
|
692 | for bcaps in bundlecaps: | |||
|
693 | if bcaps.startswith('bundle2='): | |||
|
694 | blob = urllib.unquote(bcaps[len('bundle2='):]) | |||
|
695 | b2caps.update(bundle2.decodecaps(blob)) | |||
|
696 | bundler = bundle2.bundle20(repo.ui, b2caps) | |||
|
697 | part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks()) | |||
|
698 | bundler.addpart(part) | |||
|
699 | _getbundleextrapart(bundler, repo, source, heads=None, common=None, | |||
|
700 | bundlecaps=None, **kwargs) | |||
|
701 | return util.chunkbuffer(bundler.getchunks()) | |||
|
702 | ||||
|
703 | def _getbundleextrapart(bundler, repo, source, heads=None, common=None, | |||
|
704 | bundlecaps=None, **kwargs): | |||
|
705 | """hook function to let extensions add parts to the requested bundle""" | |||
|
706 | pass | |||
|
707 | ||||
|
708 | class PushRaced(RuntimeError): | |||
|
709 | """An exception raised during unbundling that indicate a push race""" | |||
|
710 | ||||
|
711 | def check_heads(repo, their_heads, context): | |||
|
712 | """check if the heads of a repo have been modified | |||
|
713 | ||||
|
714 | Used by peer for unbundling. | |||
|
715 | """ | |||
|
716 | heads = repo.heads() | |||
|
717 | heads_hash = util.sha1(''.join(sorted(heads))).digest() | |||
|
718 | if not (their_heads == ['force'] or their_heads == heads or | |||
|
719 | their_heads == ['hashed', heads_hash]): | |||
|
720 | # someone else committed/pushed/unbundled while we | |||
|
721 | # were transferring data | |||
|
722 | raise PushRaced('repository changed while %s - ' | |||
|
723 | 'please try again' % context) | |||
|
724 | ||||
|
725 | def unbundle(repo, cg, heads, source, url): | |||
|
726 | """Apply a bundle to a repo. | |||
|
727 | ||||
|
728 | this function makes sure the repo is locked during the application and have | |||
|
729 | mechanism to check that no push race occurred between the creation of the | |||
|
730 | bundle and its application. | |||
|
731 | ||||
|
732 | If the push was raced as PushRaced exception is raised.""" | |||
|
733 | r = 0 | |||
|
734 | # need a transaction when processing a bundle2 stream | |||
|
735 | tr = None | |||
|
736 | lock = repo.lock() | |||
|
737 | try: | |||
|
738 | check_heads(repo, heads, 'uploading changes') | |||
|
739 | # push can proceed | |||
|
740 | if util.safehasattr(cg, 'params'): | |||
|
741 | tr = repo.transaction('unbundle') | |||
|
742 | tr.hookargs['bundle2-exp'] = '1' | |||
|
743 | r = bundle2.processbundle(repo, cg, lambda: tr).reply | |||
|
744 | cl = repo.unfiltered().changelog | |||
|
745 | p = cl.writepending() and repo.root or "" | |||
|
746 | repo.hook('b2x-pretransactionclose', throw=True, source=source, | |||
|
747 | url=url, pending=p, **tr.hookargs) | |||
|
748 | tr.close() | |||
|
749 | repo.hook('b2x-transactionclose', source=source, url=url, | |||
|
750 | **tr.hookargs) | |||
|
751 | else: | |||
|
752 | r = changegroup.addchangegroup(repo, cg, source, url) | |||
|
753 | finally: | |||
|
754 | if tr is not None: | |||
|
755 | tr.release() | |||
|
756 | lock.release() | |||
|
757 | return r |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -71,7 +71,7 b' install-doc: doc' | |||||
71 | install-home: install-home-bin install-home-doc |
|
71 | install-home: install-home-bin install-home-doc | |
72 |
|
72 | |||
73 | install-home-bin: build |
|
73 | install-home-bin: build | |
74 | $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --force |
|
74 | $(PYTHON) setup.py $(PURE) install --home="$(HOME)" --prefix="" --force | |
75 |
|
75 | |||
76 | install-home-doc: doc |
|
76 | install-home-doc: doc | |
77 | cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install |
|
77 | cd doc && $(MAKE) $(MFLAGS) PREFIX="$(HOME)" install | |
@@ -102,7 +102,7 b' check-code:' | |||||
102 |
|
102 | |||
103 | update-pot: i18n/hg.pot |
|
103 | update-pot: i18n/hg.pot | |
104 |
|
104 | |||
105 | i18n/hg.pot: $(PYFILES) $(DOCFILES) |
|
105 | i18n/hg.pot: $(PYFILES) $(DOCFILES) i18n/posplit i18n/hggettext | |
106 | $(PYTHON) i18n/hggettext mercurial/commands.py \ |
|
106 | $(PYTHON) i18n/hggettext mercurial/commands.py \ | |
107 | hgext/*.py hgext/*/__init__.py \ |
|
107 | hgext/*.py hgext/*/__init__.py \ | |
108 | mercurial/fileset.py mercurial/revset.py \ |
|
108 | mercurial/fileset.py mercurial/revset.py \ |
@@ -121,6 +121,7 b' testpats = [' | |||||
121 | (r'^( *)\t', "don't use tabs to indent"), |
|
121 | (r'^( *)\t', "don't use tabs to indent"), | |
122 | (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', |
|
122 | (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)', | |
123 | "put a backslash-escaped newline after sed 'i' command"), |
|
123 | "put a backslash-escaped newline after sed 'i' command"), | |
|
124 | (r'^diff *-\w*u.*$\n(^ \$ |^$)', "prefix diff -u with cmp"), | |||
124 | ], |
|
125 | ], | |
125 | # warnings |
|
126 | # warnings | |
126 | [ |
|
127 | [ | |
@@ -150,6 +151,9 b' utestpats = [' | |||||
150 | "explicit exit code checks unnecessary"), |
|
151 | "explicit exit code checks unnecessary"), | |
151 | (uprefix + r'set -e', "don't use set -e"), |
|
152 | (uprefix + r'set -e', "don't use set -e"), | |
152 | (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"), |
|
153 | (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"), | |
|
154 | (uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite " | |||
|
155 | "as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx | |||
|
156 | 'hg pull -q file:../test'), # in test-pull.t which is skipped on windows | |||
153 | (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), |
|
157 | (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), | |
154 | (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', |
|
158 | (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', | |
155 | winglobmsg), |
|
159 | winglobmsg), | |
@@ -162,6 +166,8 b' utestpats = [' | |||||
162 | (r'^ moving \S+/.*[^)]$', winglobmsg), |
|
166 | (r'^ moving \S+/.*[^)]$', winglobmsg), | |
163 | (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg), |
|
167 | (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg), | |
164 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), |
|
168 | (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), | |
|
169 | (r'^ .*file://\$TESTTMP', | |||
|
170 | 'write "file:/*/$TESTTMP" + (glob) to match on windows too'), | |||
165 | ], |
|
171 | ], | |
166 | # warnings |
|
172 | # warnings | |
167 | [ |
|
173 | [ | |
@@ -185,6 +191,7 b' utestfilters = [' | |||||
185 |
|
191 | |||
186 | pypats = [ |
|
192 | pypats = [ | |
187 | [ |
|
193 | [ | |
|
194 | (r'\([^)]*\*\w[^()]+\w+=', "can't pass varargs with keyword in Py2.5"), | |||
188 | (r'^\s*def\s*\w+\s*\(.*,\s*\(', |
|
195 | (r'^\s*def\s*\w+\s*\(.*,\s*\(', | |
189 | "tuple parameter unpacking not available in Python 3+"), |
|
196 | "tuple parameter unpacking not available in Python 3+"), | |
190 | (r'lambda\s*\(.*,.*\)', |
|
197 | (r'lambda\s*\(.*,.*\)', | |
@@ -194,12 +201,14 b' pypats = [' | |||||
194 | 'use "import foo.bar" on its own line instead.'), |
|
201 | 'use "import foo.bar" on its own line instead.'), | |
195 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), |
|
202 | (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), | |
196 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), |
|
203 | (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), | |
|
204 | (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}', | |||
|
205 | 'dict-from-generator'), | |||
197 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), |
|
206 | (r'\.has_key\b', "dict.has_key is not available in Python 3+"), | |
198 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), |
|
207 | (r'\s<>\s', '<> operator is not available in Python 3+, use !='), | |
199 | (r'^\s*\t', "don't use tabs"), |
|
208 | (r'^\s*\t', "don't use tabs"), | |
200 | (r'\S;\s*\n', "semicolon"), |
|
209 | (r'\S;\s*\n', "semicolon"), | |
201 | (r'[^_]_\((?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"), |
|
210 | (r'[^_]_\([ \t\n]*(?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"), | |
202 | (r"[^_]_\((?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"), |
|
211 | (r"[^_]_\([ \t\n]*(?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"), | |
203 | (r'(\w|\)),\w', "missing whitespace after ,"), |
|
212 | (r'(\w|\)),\w', "missing whitespace after ,"), | |
204 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), |
|
213 | (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"), | |
205 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), |
|
214 | (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"), | |
@@ -306,6 +315,7 b' txtfilters = []' | |||||
306 | txtpats = [ |
|
315 | txtpats = [ | |
307 | [ |
|
316 | [ | |
308 | ('\s$', 'trailing whitespace'), |
|
317 | ('\s$', 'trailing whitespace'), | |
|
318 | ('.. note::[ \n][^\n]', 'add two newlines after note::') | |||
309 | ], |
|
319 | ], | |
310 | [] |
|
320 | [] | |
311 | ] |
|
321 | ] |
@@ -1,4 +1,4 b'' | |||||
1 | #!/bin/bash |
|
1 | #!/usr/bin/env bash | |
2 | # A simple script for opening merge conflicts in the editor. |
|
2 | # A simple script for opening merge conflicts in the editor. | |
3 | # Use the following Mercurial settings to enable it. |
|
3 | # Use the following Mercurial settings to enable it. | |
4 | # |
|
4 | # |
@@ -33,10 +33,11 b' class FixBytesmod(fixer_base.BaseFix):' | |||||
33 | ''' |
|
33 | ''' | |
34 |
|
34 | |||
35 | def transform(self, node, results): |
|
35 | def transform(self, node, results): | |
36 |
|
|
36 | for bfn in blacklist: | |
37 | return |
|
37 | if self.filename.endswith(bfn): | |
38 | elif self.filename == 'mercurial/util.py': |
|
38 | return | |
39 | touch_import('.', 'py3kcompat', node=node) |
|
39 | if not self.filename.endswith('mercurial/py3kcompat.py'): | |
|
40 | touch_import('mercurial', 'py3kcompat', node=node) | |||
40 |
|
41 | |||
41 | formatstr = results['formatstr'].clone() |
|
42 | formatstr = results['formatstr'].clone() | |
42 | data = results['data'].clone() |
|
43 | data = results['data'].clone() | |
@@ -60,4 +61,3 b' class FixBytesmod(fixer_base.BaseFix):' | |||||
60 |
|
61 | |||
61 | call = Call(Name('bytesformatter', prefix=' '), args) |
|
62 | call = Call(Name('bytesformatter', prefix=' '), args) | |
62 | return call |
|
63 | return call | |
63 |
|
@@ -208,7 +208,7 b' proc getcommits {rargs} {' | |||||
208 | exit 1 |
|
208 | exit 1 | |
209 | } |
|
209 | } | |
210 | set leftover {} |
|
210 | set leftover {} | |
211 | fconfigure $commfd -blocking 0 -translation lf |
|
211 | fconfigure $commfd -blocking 0 -translation lf -eofchar {} | |
212 | fileevent $commfd readable [list getcommitlines $commfd] |
|
212 | fileevent $commfd readable [list getcommitlines $commfd] | |
213 | $canv delete all |
|
213 | $canv delete all | |
214 | $canv create text 3 3 -anchor nw -text "Reading commits..." \ |
|
214 | $canv create text 3 3 -anchor nw -text "Reading commits..." \ | |
@@ -795,8 +795,8 b' proc bindkey {ev script} {' | |||||
795 | # set the focus back to the toplevel for any click outside |
|
795 | # set the focus back to the toplevel for any click outside | |
796 | # the entry widgets |
|
796 | # the entry widgets | |
797 | proc click {w} { |
|
797 | proc click {w} { | |
798 | global entries |
|
798 | global ctext entries | |
799 | foreach e $entries { |
|
799 | foreach e [concat $entries $ctext] { | |
800 | if {$w == $e} return |
|
800 | if {$w == $e} return | |
801 | } |
|
801 | } | |
802 | focus . |
|
802 | focus . | |
@@ -2546,6 +2546,7 b' proc selectline {l isnew} {' | |||||
2546 |
|
2546 | |||
2547 | proc selnextline {dir} { |
|
2547 | proc selnextline {dir} { | |
2548 | global selectedline |
|
2548 | global selectedline | |
|
2549 | focus . | |||
2549 | if {![info exists selectedline]} return |
|
2550 | if {![info exists selectedline]} return | |
2550 | set l [expr $selectedline + $dir] |
|
2551 | set l [expr $selectedline + $dir] | |
2551 | unmarkmatches |
|
2552 | unmarkmatches | |
@@ -2583,6 +2584,7 b' proc addtohistory {cmd} {' | |||||
2583 |
|
2584 | |||
2584 | proc goback {} { |
|
2585 | proc goback {} { | |
2585 | global history historyindex |
|
2586 | global history historyindex | |
|
2587 | focus . | |||
2586 |
|
2588 | |||
2587 | if {$historyindex > 1} { |
|
2589 | if {$historyindex > 1} { | |
2588 | incr historyindex -1 |
|
2590 | incr historyindex -1 | |
@@ -2597,6 +2599,7 b' proc goback {} {' | |||||
2597 |
|
2599 | |||
2598 | proc goforw {} { |
|
2600 | proc goforw {} { | |
2599 | global history historyindex |
|
2601 | global history historyindex | |
|
2602 | focus . | |||
2600 |
|
2603 | |||
2601 | if {$historyindex < [llength $history]} { |
|
2604 | if {$historyindex < [llength $history]} { | |
2602 | set cmd [lindex $history $historyindex] |
|
2605 | set cmd [lindex $history $historyindex] | |
@@ -3890,7 +3893,7 b' proc mktaggo {} {' | |||||
3890 | } |
|
3893 | } | |
3891 |
|
3894 | |||
3892 | proc writecommit {} { |
|
3895 | proc writecommit {} { | |
3893 |
global rowmenuid wrcomtop commitinfo |
|
3896 | global rowmenuid wrcomtop commitinfo | |
3894 |
|
3897 | |||
3895 | set top .writecommit |
|
3898 | set top .writecommit | |
3896 | set wrcomtop $top |
|
3899 | set wrcomtop $top | |
@@ -3905,12 +3908,9 b' proc writecommit {} {' | |||||
3905 | $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] |
|
3908 | $top.head insert 0 [lindex $commitinfo($rowmenuid) 0] | |
3906 | $top.head conf -state readonly |
|
3909 | $top.head conf -state readonly | |
3907 | grid x $top.head -sticky w |
|
3910 | grid x $top.head -sticky w | |
3908 | ttk::label $top.clab -text "Command:" |
|
|||
3909 | ttk::entry $top.cmd -width 60 -textvariable wrcomcmd |
|
|||
3910 | grid $top.clab $top.cmd -sticky w -pady 10 |
|
|||
3911 | ttk::label $top.flab -text "Output file:" |
|
3911 | ttk::label $top.flab -text "Output file:" | |
3912 | ttk::entry $top.fname -width 60 |
|
3912 | ttk::entry $top.fname -width 60 | |
3913 | $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6]"] |
|
3913 | $top.fname insert 0 [file normalize "commit-[string range $rowmenuid 0 6].diff"] | |
3914 | grid $top.flab $top.fname -sticky w |
|
3914 | grid $top.flab $top.fname -sticky w | |
3915 | ttk::frame $top.buts |
|
3915 | ttk::frame $top.buts | |
3916 | ttk::button $top.buts.gen -text "Write" -command wrcomgo |
|
3916 | ttk::button $top.buts.gen -text "Write" -command wrcomgo | |
@@ -3928,9 +3928,8 b' proc wrcomgo {} {' | |||||
3928 | global wrcomtop |
|
3928 | global wrcomtop | |
3929 |
|
3929 | |||
3930 | set id [$wrcomtop.sha1 get] |
|
3930 | set id [$wrcomtop.sha1 get] | |
3931 | set cmd "echo $id | [$wrcomtop.cmd get]" |
|
|||
3932 | set fname [$wrcomtop.fname get] |
|
3931 | set fname [$wrcomtop.fname get] | |
3933 | if {[catch {exec sh -c $cmd > $fname &} err]} { |
|
3932 | if {[catch {exec $::env(HG) --config ui.report_untrusted=false export --git -o [string map {% %%} $fname] $id} err]} { | |
3934 | error_popup "Error writing commit: $err" |
|
3933 | error_popup "Error writing commit: $err" | |
3935 | } |
|
3934 | } | |
3936 | catch {destroy $wrcomtop} |
|
3935 | catch {destroy $wrcomtop} | |
@@ -4056,7 +4055,6 b' proc getconfig {} {' | |||||
4056 | set datemode 0 |
|
4055 | set datemode 0 | |
4057 | set boldnames 0 |
|
4056 | set boldnames 0 | |
4058 | set diffopts "-U 5 -p" |
|
4057 | set diffopts "-U 5 -p" | |
4059 | set wrcomcmd "\"\$HG\" --config ui.report_untrusted=false debug-diff-tree --stdin -p --pretty" |
|
|||
4060 |
|
4058 | |||
4061 | set mainfont {Helvetica 9} |
|
4059 | set mainfont {Helvetica 9} | |
4062 | set curidfont {} |
|
4060 | set curidfont {} |
@@ -11,12 +11,15 b' import zlib' | |||||
11 | def dotted_name_of_path(path): |
|
11 | def dotted_name_of_path(path): | |
12 | """Given a relative path to a source file, return its dotted module name. |
|
12 | """Given a relative path to a source file, return its dotted module name. | |
13 |
|
13 | |||
14 |
|
||||
15 | >>> dotted_name_of_path('mercurial/error.py') |
|
14 | >>> dotted_name_of_path('mercurial/error.py') | |
16 | 'mercurial.error' |
|
15 | 'mercurial.error' | |
|
16 | >>> dotted_name_of_path('zlibmodule.so') | |||
|
17 | 'zlib' | |||
17 | """ |
|
18 | """ | |
18 | parts = path.split('/') |
|
19 | parts = path.split('/') | |
19 |
parts[-1] = parts[-1][ |
|
20 | parts[-1] = parts[-1].split('.', 1)[0] # remove .py and .so and .ARCH.so | |
|
21 | if parts[-1].endswith('module'): | |||
|
22 | parts[-1] = parts[-1][:-6] | |||
20 | return '.'.join(parts) |
|
23 | return '.'.join(parts) | |
21 |
|
24 | |||
22 |
|
25 | |||
@@ -136,7 +139,7 b' def verify_stdlib_on_own_line(source):' | |||||
136 | http://bugs.python.org/issue19510. |
|
139 | http://bugs.python.org/issue19510. | |
137 |
|
140 | |||
138 | >>> list(verify_stdlib_on_own_line('import sys, foo')) |
|
141 | >>> list(verify_stdlib_on_own_line('import sys, foo')) | |
139 |
['mixed stdlib |
|
142 | ['mixed imports\\n stdlib: sys\\n relative: foo'] | |
140 | >>> list(verify_stdlib_on_own_line('import sys, os')) |
|
143 | >>> list(verify_stdlib_on_own_line('import sys, os')) | |
141 | [] |
|
144 | [] | |
142 | >>> list(verify_stdlib_on_own_line('import foo, bar')) |
|
145 | >>> list(verify_stdlib_on_own_line('import foo, bar')) | |
@@ -144,13 +147,13 b' def verify_stdlib_on_own_line(source):' | |||||
144 | """ |
|
147 | """ | |
145 | for node in ast.walk(ast.parse(source)): |
|
148 | for node in ast.walk(ast.parse(source)): | |
146 | if isinstance(node, ast.Import): |
|
149 | if isinstance(node, ast.Import): | |
147 | from_stdlib = {} |
|
150 | from_stdlib = {False: [], True: []} | |
148 | for n in node.names: |
|
151 | for n in node.names: | |
149 |
from_stdlib[n.name |
|
152 | from_stdlib[n.name in stdlib_modules].append(n.name) | |
150 | num_std = len([x for x in from_stdlib.values() if x]) |
|
153 | if from_stdlib[True] and from_stdlib[False]: | |
151 | if num_std not in (len(from_stdlib.values()), 0): |
|
154 | yield ('mixed imports\n stdlib: %s\n relative: %s' % | |
152 | yield ('mixed stdlib and relative imports:\n %s' % |
|
155 | (', '.join(sorted(from_stdlib[True])), | |
153 |
', '.join(sorted(from_stdlib |
|
156 | ', '.join(sorted(from_stdlib[False])))) | |
154 |
|
157 | |||
155 | class CircularImport(Exception): |
|
158 | class CircularImport(Exception): | |
156 | pass |
|
159 | pass |
@@ -85,7 +85,6 b' beyondcompare3.diffargs=/lro /lefttitle=' | |||||
85 |
|
85 | |||
86 | ; Linux version of Beyond Compare |
|
86 | ; Linux version of Beyond Compare | |
87 | bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo |
|
87 | bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo | |
88 | bcompare.premerge=False |
|
|||
89 | bcompare.gui=True |
|
88 | bcompare.gui=True | |
90 | bcompare.priority=-1 |
|
89 | bcompare.priority=-1 | |
91 | bcompare.diffargs=-lro -lefttitle='$plabel1' -righttitle='$clabel' -solo -expandall $parent $child |
|
90 | bcompare.diffargs=-lro -lefttitle='$plabel1' -righttitle='$clabel' -solo -expandall $parent $child | |
@@ -103,7 +102,6 b' araxis.regkey=SOFTWARE\\Classes\\TypeLib\\{' | |||||
103 | araxis.regappend=\ConsoleCompare.exe |
|
102 | araxis.regappend=\ConsoleCompare.exe | |
104 | araxis.priority=-2 |
|
103 | araxis.priority=-2 | |
105 | araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output |
|
104 | araxis.args=/3 /a2 /wait /merge /title1:"Other" /title2:"Base" /title3:"Local :"$local $other $base $local $output | |
106 | araxis.premerge=False |
|
|||
107 | araxis.checkconflict=True |
|
105 | araxis.checkconflict=True | |
108 | araxis.binary=True |
|
106 | araxis.binary=True | |
109 | araxis.gui=True |
|
107 | araxis.gui=True |
@@ -335,7 +335,7 b' def perfrevset(ui, repo, expr, clear=Fal' | |||||
335 | def d(): |
|
335 | def d(): | |
336 | if clear: |
|
336 | if clear: | |
337 | repo.invalidatevolatilesets() |
|
337 | repo.invalidatevolatilesets() | |
338 | repo.revs(expr) |
|
338 | for r in repo.revs(expr): pass | |
339 | timer(d) |
|
339 | timer(d) | |
340 |
|
340 | |||
341 | @command('perfvolatilesets') |
|
341 | @command('perfvolatilesets') |
@@ -152,7 +152,7 b' def analyze(ui, repo, *revs, **opts):' | |||||
152 | if lastctx.rev() != nullrev: |
|
152 | if lastctx.rev() != nullrev: | |
153 | interarrival[roundto(ctx.date()[0] - lastctx.date()[0], 300)] += 1 |
|
153 | interarrival[roundto(ctx.date()[0] - lastctx.date()[0], 300)] += 1 | |
154 | diff = sum((d.splitlines() |
|
154 | diff = sum((d.splitlines() | |
155 |
for d in ctx.diff(pctx, opts= |
|
155 | for d in ctx.diff(pctx, opts={'git': True})), []) | |
156 | fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 |
|
156 | fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0 | |
157 | for filename, mar, lineadd, lineremove, binary in parsegitdiff(diff): |
|
157 | for filename, mar, lineadd, lineremove, binary in parsegitdiff(diff): | |
158 | if binary: |
|
158 | if binary: | |
@@ -189,21 +189,21 b' def analyze(ui, repo, *revs, **opts):' | |||||
189 | def pronk(d): |
|
189 | def pronk(d): | |
190 | return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) |
|
190 | return sorted(d.iteritems(), key=lambda x: x[1], reverse=True) | |
191 |
|
191 | |||
192 |
json.dump( |
|
192 | json.dump({'revs': len(revs), | |
193 |
|
|
193 | 'lineschanged': pronk(lineschanged), | |
194 |
|
|
194 | 'children': pronk(invchildren), | |
195 |
|
|
195 | 'fileschanged': pronk(fileschanged), | |
196 |
|
|
196 | 'filesadded': pronk(filesadded), | |
197 |
|
|
197 | 'linesinfilesadded': pronk(linesinfilesadded), | |
198 |
|
|
198 | 'dirsadded': pronk(dirsadded), | |
199 |
|
|
199 | 'filesremoved': pronk(filesremoved), | |
200 |
|
|
200 | 'linelengths': pronk(linelengths), | |
201 |
|
|
201 | 'parents': pronk(parents), | |
202 |
|
|
202 | 'p1distance': pronk(p1distance), | |
203 |
|
|
203 | 'p2distance': pronk(p2distance), | |
204 |
|
|
204 | 'interarrival': pronk(interarrival), | |
205 |
|
|
205 | 'tzoffset': pronk(tzoffset), | |
206 |
|
|
206 | }, | |
207 | fp) |
|
207 | fp) | |
208 | fp.close() |
|
208 | fp.close() | |
209 |
|
209 |
@@ -50,6 +50,9 b' def get_opts(opts):' | |||||
50 | allopts[-1] += " <%s[+]>" % optlabel |
|
50 | allopts[-1] += " <%s[+]>" % optlabel | |
51 | elif (default is not None) and not isinstance(default, bool): |
|
51 | elif (default is not None) and not isinstance(default, bool): | |
52 | allopts[-1] += " <%s>" % optlabel |
|
52 | allopts[-1] += " <%s>" % optlabel | |
|
53 | if '\n' in desc: | |||
|
54 | # only remove line breaks and indentation | |||
|
55 | desc = ' '.join(l.lstrip() for l in desc.split('\n')) | |||
53 | desc += default and _(" (default: %s)") % default or "" |
|
56 | desc += default and _(" (default: %s)") % default or "" | |
54 | yield (", ".join(allopts), desc) |
|
57 | yield (", ".join(allopts), desc) | |
55 |
|
58 | |||
@@ -153,6 +156,8 b' def commandprinter(ui, cmdtable, section' | |||||
153 | continue |
|
156 | continue | |
154 | d = get_cmd(h[f], cmdtable) |
|
157 | d = get_cmd(h[f], cmdtable) | |
155 | ui.write(sectionfunc(d['cmd'])) |
|
158 | ui.write(sectionfunc(d['cmd'])) | |
|
159 | # short description | |||
|
160 | ui.write(d['desc'][0]) | |||
156 | # synopsis |
|
161 | # synopsis | |
157 | ui.write("::\n\n") |
|
162 | ui.write("::\n\n") | |
158 | synopsislines = d['synopsis'].splitlines() |
|
163 | synopsislines = d['synopsis'].splitlines() |
@@ -620,7 +620,7 b' class bzxmlrpc(bzaccess):' | |||||
620 | ver = self.bzproxy.Bugzilla.version()['version'].split('.') |
|
620 | ver = self.bzproxy.Bugzilla.version()['version'].split('.') | |
621 | self.bzvermajor = int(ver[0]) |
|
621 | self.bzvermajor = int(ver[0]) | |
622 | self.bzverminor = int(ver[1]) |
|
622 | self.bzverminor = int(ver[1]) | |
623 |
self.bzproxy.User.login( |
|
623 | self.bzproxy.User.login({'login': user, 'password': passwd}) | |
624 |
|
624 | |||
625 | def transport(self, uri): |
|
625 | def transport(self, uri): | |
626 | if urlparse.urlparse(uri, "http")[0] == "https": |
|
626 | if urlparse.urlparse(uri, "http")[0] == "https": | |
@@ -630,13 +630,15 b' class bzxmlrpc(bzaccess):' | |||||
630 |
|
630 | |||
631 | def get_bug_comments(self, id): |
|
631 | def get_bug_comments(self, id): | |
632 | """Return a string with all comment text for a bug.""" |
|
632 | """Return a string with all comment text for a bug.""" | |
633 |
c = self.bzproxy.Bug.comments( |
|
633 | c = self.bzproxy.Bug.comments({'ids': [id], | |
|
634 | 'include_fields': ['text']}) | |||
634 | return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']]) |
|
635 | return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']]) | |
635 |
|
636 | |||
636 | def filter_real_bug_ids(self, bugs): |
|
637 | def filter_real_bug_ids(self, bugs): | |
637 |
probe = self.bzproxy.Bug.get( |
|
638 | probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()), | |
638 |
|
|
639 | 'include_fields': [], | |
639 |
|
|
640 | 'permissive': True, | |
|
641 | }) | |||
640 | for badbug in probe['faults']: |
|
642 | for badbug in probe['faults']: | |
641 | id = badbug['id'] |
|
643 | id = badbug['id'] | |
642 | self.ui.status(_('bug %d does not exist\n') % id) |
|
644 | self.ui.status(_('bug %d does not exist\n') % id) | |
@@ -717,10 +719,10 b' class bzxmlrpcemail(bzxmlrpc):' | |||||
717 | than the subject line, and leave a blank line after it. |
|
719 | than the subject line, and leave a blank line after it. | |
718 | ''' |
|
720 | ''' | |
719 | user = self.map_committer(committer) |
|
721 | user = self.map_committer(committer) | |
720 |
matches = self.bzproxy.User.get( |
|
722 | matches = self.bzproxy.User.get({'match': [user]}) | |
721 | if not matches['users']: |
|
723 | if not matches['users']: | |
722 | user = self.ui.config('bugzilla', 'user', 'bugs') |
|
724 | user = self.ui.config('bugzilla', 'user', 'bugs') | |
723 |
matches = self.bzproxy.User.get( |
|
725 | matches = self.bzproxy.User.get({'match': [user]}) | |
724 | if not matches['users']: |
|
726 | if not matches['users']: | |
725 | raise util.Abort(_("default bugzilla user %s email not found") % |
|
727 | raise util.Abort(_("default bugzilla user %s email not found") % | |
726 | user) |
|
728 | user) | |
@@ -879,14 +881,13 b' class bugzilla(object):' | |||||
879 |
|
881 | |||
880 | mapfile = self.ui.config('bugzilla', 'style') |
|
882 | mapfile = self.ui.config('bugzilla', 'style') | |
881 | tmpl = self.ui.config('bugzilla', 'template') |
|
883 | tmpl = self.ui.config('bugzilla', 'template') | |
882 | t = cmdutil.changeset_templater(self.ui, self.repo, |
|
|||
883 | False, None, mapfile, False) |
|
|||
884 | if not mapfile and not tmpl: |
|
884 | if not mapfile and not tmpl: | |
885 | tmpl = _('changeset {node|short} in repo {root} refers ' |
|
885 | tmpl = _('changeset {node|short} in repo {root} refers ' | |
886 | 'to bug {bug}.\ndetails:\n\t{desc|tabindent}') |
|
886 | 'to bug {bug}.\ndetails:\n\t{desc|tabindent}') | |
887 | if tmpl: |
|
887 | if tmpl: | |
888 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
888 | tmpl = templater.parsestring(tmpl, quoted=False) | |
889 | t.use_template(tmpl) |
|
889 | t = cmdutil.changeset_templater(self.ui, self.repo, | |
|
890 | False, None, tmpl, mapfile, False) | |||
890 | self.ui.pushbuffer() |
|
891 | self.ui.pushbuffer() | |
891 | t.show(ctx, changes=ctx.changeset(), |
|
892 | t.show(ctx, changes=ctx.changeset(), | |
892 | bug=str(bugid), |
|
893 | bug=str(bugid), |
@@ -18,10 +18,10 b" testedwith = 'internal'" | |||||
18 | def maketemplater(ui, repo, tmpl): |
|
18 | def maketemplater(ui, repo, tmpl): | |
19 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
19 | tmpl = templater.parsestring(tmpl, quoted=False) | |
20 | try: |
|
20 | try: | |
21 |
t = cmdutil.changeset_templater(ui, repo, False, None, |
|
21 | t = cmdutil.changeset_templater(ui, repo, False, None, tmpl, | |
|
22 | None, False) | |||
22 | except SyntaxError, inst: |
|
23 | except SyntaxError, inst: | |
23 | raise util.Abort(inst.args[0]) |
|
24 | raise util.Abort(inst.args[0]) | |
24 | t.use_template(tmpl) |
|
|||
25 | return t |
|
25 | return t | |
26 |
|
26 | |||
27 | def changedlines(ui, repo, ctx1, ctx2, fns): |
|
27 | def changedlines(ui, repo, ctx1, ctx2, fns): |
@@ -311,6 +311,15 b' def extstyles():' | |||||
311 | for name, ext in extensions.extensions(): |
|
311 | for name, ext in extensions.extensions(): | |
312 | _styles.update(getattr(ext, 'colortable', {})) |
|
312 | _styles.update(getattr(ext, 'colortable', {})) | |
313 |
|
313 | |||
|
314 | def valideffect(effect): | |||
|
315 | 'Determine if the effect is valid or not.' | |||
|
316 | good = False | |||
|
317 | if not _terminfo_params and effect in _effects: | |||
|
318 | good = True | |||
|
319 | elif effect in _terminfo_params or effect[:-11] in _terminfo_params: | |||
|
320 | good = True | |||
|
321 | return good | |||
|
322 | ||||
314 | def configstyles(ui): |
|
323 | def configstyles(ui): | |
315 | for status, cfgeffects in ui.configitems('color'): |
|
324 | for status, cfgeffects in ui.configitems('color'): | |
316 | if '.' not in status or status.startswith('color.'): |
|
325 | if '.' not in status or status.startswith('color.'): | |
@@ -319,9 +328,7 b' def configstyles(ui):' | |||||
319 | if cfgeffects: |
|
328 | if cfgeffects: | |
320 | good = [] |
|
329 | good = [] | |
321 | for e in cfgeffects: |
|
330 | for e in cfgeffects: | |
322 |
if |
|
331 | if valideffect(e): | |
323 | good.append(e) |
|
|||
324 | elif e in _terminfo_params or e[:-11] in _terminfo_params: |
|
|||
325 | good.append(e) |
|
332 | good.append(e) | |
326 | else: |
|
333 | else: | |
327 | ui.warn(_("ignoring unknown color/effect %r " |
|
334 | ui.warn(_("ignoring unknown color/effect %r " | |
@@ -375,6 +382,8 b' class colorui(uimod.ui):' | |||||
375 | s = _styles.get(l, '') |
|
382 | s = _styles.get(l, '') | |
376 | if s: |
|
383 | if s: | |
377 | effects.append(s) |
|
384 | effects.append(s) | |
|
385 | elif valideffect(l): | |||
|
386 | effects.append(l) | |||
378 | effects = ' '.join(effects) |
|
387 | effects = ' '.join(effects) | |
379 | if effects: |
|
388 | if effects: | |
380 | return '\n'.join([render_effects(s, effects) |
|
389 | return '\n'.join([render_effects(s, effects) | |
@@ -386,6 +395,10 b' def templatelabel(context, mapping, args' | |||||
386 | # i18n: "label" is a keyword |
|
395 | # i18n: "label" is a keyword | |
387 | raise error.ParseError(_("label expects two arguments")) |
|
396 | raise error.ParseError(_("label expects two arguments")) | |
388 |
|
397 | |||
|
398 | # add known effects to the mapping so symbols like 'red', 'bold', | |||
|
399 | # etc. don't need to be quoted | |||
|
400 | mapping.update(dict([(k, k) for k in _effects])) | |||
|
401 | ||||
389 | thing = templater._evalifliteral(args[1], context, mapping) |
|
402 | thing = templater._evalifliteral(args[1], context, mapping) | |
390 |
|
403 | |||
391 | # apparently, repo could be a string that is the favicon? |
|
404 | # apparently, repo could be a string that is the favicon? | |
@@ -424,6 +437,16 b' def extsetup(ui):' | |||||
424 | _("when to colorize (boolean, always, auto, or never)"), |
|
437 | _("when to colorize (boolean, always, auto, or never)"), | |
425 | _('TYPE'))) |
|
438 | _('TYPE'))) | |
426 |
|
439 | |||
|
440 | def debugcolor(ui, repo, **opts): | |||
|
441 | global _styles | |||
|
442 | _styles = {} | |||
|
443 | for effect in _effects.keys(): | |||
|
444 | _styles[effect] = effect | |||
|
445 | ui.write(('color mode: %s\n') % ui._colormode) | |||
|
446 | ui.write(_('available colors:\n')) | |||
|
447 | for label, colors in _styles.items(): | |||
|
448 | ui.write(('%s\n') % colors, label=label) | |||
|
449 | ||||
427 | if os.name != 'nt': |
|
450 | if os.name != 'nt': | |
428 | w32effects = None |
|
451 | w32effects = None | |
429 | else: |
|
452 | else: | |
@@ -553,3 +576,8 b' else:' | |||||
553 | finally: |
|
576 | finally: | |
554 | # Explicitly reset original attributes |
|
577 | # Explicitly reset original attributes | |
555 | _kernel32.SetConsoleTextAttribute(stdout, origattr) |
|
578 | _kernel32.SetConsoleTextAttribute(stdout, origattr) | |
|
579 | ||||
|
580 | cmdtable = { | |||
|
581 | 'debugcolor': | |||
|
582 | (debugcolor, [], ('hg debugcolor')) | |||
|
583 | } |
@@ -63,13 +63,13 b' class converter_source(object):' | |||||
63 |
|
63 | |||
64 | self.encoding = 'utf-8' |
|
64 | self.encoding = 'utf-8' | |
65 |
|
65 | |||
66 | def checkhexformat(self, revstr): |
|
66 | def checkhexformat(self, revstr, mapname='splicemap'): | |
67 | """ fails if revstr is not a 40 byte hex. mercurial and git both uses |
|
67 | """ fails if revstr is not a 40 byte hex. mercurial and git both uses | |
68 | such format for their revision numbering |
|
68 | such format for their revision numbering | |
69 | """ |
|
69 | """ | |
70 | if not re.match(r'[0-9a-fA-F]{40,40}$', revstr): |
|
70 | if not re.match(r'[0-9a-fA-F]{40,40}$', revstr): | |
71 |
raise util.Abort(_('s |
|
71 | raise util.Abort(_('%s entry %s is not a valid revision' | |
72 | ' identifier') % revstr) |
|
72 | ' identifier') % (mapname, revstr)) | |
73 |
|
73 | |||
74 | def before(self): |
|
74 | def before(self): | |
75 | pass |
|
75 | pass | |
@@ -172,7 +172,7 b' class converter_source(object):' | |||||
172 | """ |
|
172 | """ | |
173 | return {} |
|
173 | return {} | |
174 |
|
174 | |||
175 | def checkrevformat(self, revstr): |
|
175 | def checkrevformat(self, revstr, mapname='splicemap'): | |
176 | """revstr is a string that describes a revision in the given |
|
176 | """revstr is a string that describes a revision in the given | |
177 | source control system. Return true if revstr has correct |
|
177 | source control system. Return true if revstr has correct | |
178 | format. |
|
178 | format. | |
@@ -192,10 +192,6 b' class converter_sink(object):' | |||||
192 | self.path = path |
|
192 | self.path = path | |
193 | self.created = [] |
|
193 | self.created = [] | |
194 |
|
194 | |||
195 | def getheads(self): |
|
|||
196 | """Return a list of this repository's heads""" |
|
|||
197 | raise NotImplementedError |
|
|||
198 |
|
||||
199 | def revmapfile(self): |
|
195 | def revmapfile(self): | |
200 | """Path to a file that will contain lines |
|
196 | """Path to a file that will contain lines | |
201 | source_rev_id sink_rev_id |
|
197 | source_rev_id sink_rev_id |
@@ -297,7 +297,7 b' class convert_git(converter_source):' | |||||
297 |
|
297 | |||
298 | return bookmarks |
|
298 | return bookmarks | |
299 |
|
299 | |||
300 | def checkrevformat(self, revstr): |
|
300 | def checkrevformat(self, revstr, mapname='splicemap'): | |
301 | """ git revision string is a 40 byte hex """ |
|
301 | """ git revision string is a 40 byte hex """ | |
302 | self.checkhexformat(revstr) |
|
302 | self.checkhexformat(revstr, mapname) | |
303 |
|
303 |
@@ -25,6 +25,9 b' from mercurial import hg, util, context,' | |||||
25 |
|
25 | |||
26 | from common import NoRepo, commit, converter_source, converter_sink |
|
26 | from common import NoRepo, commit, converter_source, converter_sink | |
27 |
|
27 | |||
|
28 | import re | |||
|
29 | sha1re = re.compile(r'\b[0-9a-f]{6,40}\b') | |||
|
30 | ||||
28 | class mercurial_sink(converter_sink): |
|
31 | class mercurial_sink(converter_sink): | |
29 | def __init__(self, ui, path): |
|
32 | def __init__(self, ui, path): | |
30 | converter_sink.__init__(self, ui, path) |
|
33 | converter_sink.__init__(self, ui, path) | |
@@ -75,10 +78,6 b' class mercurial_sink(converter_sink):' | |||||
75 | def authorfile(self): |
|
78 | def authorfile(self): | |
76 | return self.repo.join("authormap") |
|
79 | return self.repo.join("authormap") | |
77 |
|
80 | |||
78 | def getheads(self): |
|
|||
79 | h = self.repo.changelog.heads() |
|
|||
80 | return [hex(x) for x in h] |
|
|||
81 |
|
||||
82 | def setbranch(self, branch, pbranches): |
|
81 | def setbranch(self, branch, pbranches): | |
83 | if not self.clonebranches: |
|
82 | if not self.clonebranches: | |
84 | return |
|
83 | return | |
@@ -157,6 +156,14 b' class mercurial_sink(converter_sink):' | |||||
157 | p2 = parents.pop(0) |
|
156 | p2 = parents.pop(0) | |
158 |
|
157 | |||
159 | text = commit.desc |
|
158 | text = commit.desc | |
|
159 | ||||
|
160 | sha1s = re.findall(sha1re, text) | |||
|
161 | for sha1 in sha1s: | |||
|
162 | oldrev = source.lookuprev(sha1) | |||
|
163 | newrev = revmap.get(oldrev) | |||
|
164 | if newrev is not None: | |||
|
165 | text = text.replace(sha1, newrev[:len(sha1)]) | |||
|
166 | ||||
160 | extra = commit.extra.copy() |
|
167 | extra = commit.extra.copy() | |
161 | if self.branchnames and commit.branch: |
|
168 | if self.branchnames and commit.branch: | |
162 | extra['branch'] = commit.branch |
|
169 | extra['branch'] = commit.branch | |
@@ -190,14 +197,36 b' class mercurial_sink(converter_sink):' | |||||
190 | parentctx = None |
|
197 | parentctx = None | |
191 | tagparent = nullid |
|
198 | tagparent = nullid | |
192 |
|
199 | |||
193 | try: |
|
200 | oldlines = set() | |
194 | oldlines = sorted(parentctx['.hgtags'].data().splitlines(True)) |
|
201 | for branch, heads in self.repo.branchmap().iteritems(): | |
195 | except Exception: |
|
202 | for h in heads: | |
196 | oldlines = [] |
|
203 | if '.hgtags' in self.repo[h]: | |
|
204 | oldlines.update( | |||
|
205 | set(self.repo[h]['.hgtags'].data().splitlines(True))) | |||
|
206 | oldlines = sorted(list(oldlines)) | |||
197 |
|
207 | |||
198 | newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
208 | newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags]) | |
199 | if newlines == oldlines: |
|
209 | if newlines == oldlines: | |
200 | return None, None |
|
210 | return None, None | |
|
211 | ||||
|
212 | # if the old and new tags match, then there is nothing to update | |||
|
213 | oldtags = set() | |||
|
214 | newtags = set() | |||
|
215 | for line in oldlines: | |||
|
216 | s = line.strip().split(' ', 1) | |||
|
217 | if len(s) != 2: | |||
|
218 | continue | |||
|
219 | oldtags.add(s[1]) | |||
|
220 | for line in newlines: | |||
|
221 | s = line.strip().split(' ', 1) | |||
|
222 | if len(s) != 2: | |||
|
223 | continue | |||
|
224 | if s[1] not in oldtags: | |||
|
225 | newtags.add(s[1].strip()) | |||
|
226 | ||||
|
227 | if not newtags: | |||
|
228 | return None, None | |||
|
229 | ||||
201 | data = "".join(newlines) |
|
230 | data = "".join(newlines) | |
202 | def getfilectx(repo, memctx, f): |
|
231 | def getfilectx(repo, memctx, f): | |
203 | return context.memfilectx(f, data, False, False, None) |
|
232 | return context.memfilectx(f, data, False, False, None) | |
@@ -412,6 +441,6 b' class mercurial_source(converter_source)' | |||||
412 | def getbookmarks(self): |
|
441 | def getbookmarks(self): | |
413 | return bookmarks.listbookmarks(self.repo) |
|
442 | return bookmarks.listbookmarks(self.repo) | |
414 |
|
443 | |||
415 | def checkrevformat(self, revstr): |
|
444 | def checkrevformat(self, revstr, mapname='splicemap'): | |
416 | """ Mercurial, revision string is a 40 byte hex """ |
|
445 | """ Mercurial, revision string is a 40 byte hex """ | |
417 | self.checkhexformat(revstr) |
|
446 | self.checkhexformat(revstr, mapname) |
@@ -41,13 +41,30 b' class SvnPathNotFound(Exception):' | |||||
41 | pass |
|
41 | pass | |
42 |
|
42 | |||
43 | def revsplit(rev): |
|
43 | def revsplit(rev): | |
44 |
"""Parse a revision string and return (uuid, path, revnum). |
|
44 | """Parse a revision string and return (uuid, path, revnum). | |
45 | url, revnum = rev.rsplit('@', 1) |
|
45 | >>> revsplit('svn:a2147622-4a9f-4db4-a8d3-13562ff547b2' | |
46 | parts = url.split('/', 1) |
|
46 | ... '/proj%20B/mytrunk/mytrunk@1') | |
|
47 | ('a2147622-4a9f-4db4-a8d3-13562ff547b2', '/proj%20B/mytrunk/mytrunk', 1) | |||
|
48 | >>> revsplit('svn:8af66a51-67f5-4354-b62c-98d67cc7be1d@1') | |||
|
49 | ('', '', 1) | |||
|
50 | >>> revsplit('@7') | |||
|
51 | ('', '', 7) | |||
|
52 | >>> revsplit('7') | |||
|
53 | ('', '', 0) | |||
|
54 | >>> revsplit('bad') | |||
|
55 | ('', '', 0) | |||
|
56 | """ | |||
|
57 | parts = rev.rsplit('@', 1) | |||
|
58 | revnum = 0 | |||
|
59 | if len(parts) > 1: | |||
|
60 | revnum = int(parts[1]) | |||
|
61 | parts = parts[0].split('/', 1) | |||
|
62 | uuid = '' | |||
47 | mod = '' |
|
63 | mod = '' | |
48 | if len(parts) > 1: |
|
64 | if len(parts) > 1 and parts[0].startswith('svn:'): | |
|
65 | uuid = parts[0][4:] | |||
49 | mod = '/' + parts[1] |
|
66 | mod = '/' + parts[1] | |
50 |
return |
|
67 | return uuid, mod, revnum | |
51 |
|
68 | |||
52 | def quote(s): |
|
69 | def quote(s): | |
53 | # As of svn 1.7, many svn calls expect "canonical" paths. In |
|
70 | # As of svn 1.7, many svn calls expect "canonical" paths. In | |
@@ -157,6 +174,30 b' class logstream(object):' | |||||
157 | self._stdout.close() |
|
174 | self._stdout.close() | |
158 | self._stdout = None |
|
175 | self._stdout = None | |
159 |
|
176 | |||
|
177 | class directlogstream(list): | |||
|
178 | """Direct revision log iterator. | |||
|
179 | This can be used for debugging and development but it will probably leak | |||
|
180 | memory and is not suitable for real conversions.""" | |||
|
181 | def __init__(self, url, paths, start, end, limit=0, | |||
|
182 | discover_changed_paths=True, strict_node_history=False): | |||
|
183 | ||||
|
184 | def receiver(orig_paths, revnum, author, date, message, pool): | |||
|
185 | paths = {} | |||
|
186 | if orig_paths is not None: | |||
|
187 | for k, v in orig_paths.iteritems(): | |||
|
188 | paths[k] = changedpath(v) | |||
|
189 | self.append((paths, revnum, author, date, message)) | |||
|
190 | ||||
|
191 | # Use an ra of our own so that our parent can consume | |||
|
192 | # our results without confusing the server. | |||
|
193 | t = transport.SvnRaTransport(url=url) | |||
|
194 | svn.ra.get_log(t.ra, paths, start, end, limit, | |||
|
195 | discover_changed_paths, | |||
|
196 | strict_node_history, | |||
|
197 | receiver) | |||
|
198 | ||||
|
199 | def close(self): | |||
|
200 | pass | |||
160 |
|
201 | |||
161 | # Check to see if the given path is a local Subversion repo. Verify this by |
|
202 | # Check to see if the given path is a local Subversion repo. Verify this by | |
162 | # looking for several svn-specific files and directories in the given |
|
203 | # looking for several svn-specific files and directories in the given | |
@@ -454,13 +495,13 b' class svn_source(converter_source):' | |||||
454 | del self.commits[rev] |
|
495 | del self.commits[rev] | |
455 | return commit |
|
496 | return commit | |
456 |
|
497 | |||
457 | def checkrevformat(self, revstr): |
|
498 | def checkrevformat(self, revstr, mapname='splicemap'): | |
458 | """ fails if revision format does not match the correct format""" |
|
499 | """ fails if revision format does not match the correct format""" | |
459 | if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' |
|
500 | if not re.match(r'svn:[0-9a-f]{8,8}-[0-9a-f]{4,4}-' | |
460 | '[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' |
|
501 | '[0-9a-f]{4,4}-[0-9a-f]{4,4}-[0-9a-f]' | |
461 | '{12,12}(.*)\@[0-9]+$',revstr): |
|
502 | '{12,12}(.*)\@[0-9]+$',revstr): | |
462 |
raise util.Abort(_('s |
|
503 | raise util.Abort(_('%s entry %s is not a valid revision' | |
463 | ' identifier') % revstr) |
|
504 | ' identifier') % (mapname, revstr)) | |
464 |
|
505 | |||
465 | def gettags(self): |
|
506 | def gettags(self): | |
466 | tags = {} |
|
507 | tags = {} | |
@@ -975,6 +1016,9 b' class svn_source(converter_source):' | |||||
975 | relpaths.append(p.strip('/')) |
|
1016 | relpaths.append(p.strip('/')) | |
976 | args = [self.baseurl, relpaths, start, end, limit, |
|
1017 | args = [self.baseurl, relpaths, start, end, limit, | |
977 | discover_changed_paths, strict_node_history] |
|
1018 | discover_changed_paths, strict_node_history] | |
|
1019 | # undocumented feature: debugsvnlog can be disabled | |||
|
1020 | if not self.ui.configbool('convert', 'svn.debugsvnlog', True): | |||
|
1021 | return directlogstream(*args) | |||
978 | arg = encodeargs(args) |
|
1022 | arg = encodeargs(args) | |
979 | hgexe = util.hgexecutable() |
|
1023 | hgexe = util.hgexecutable() | |
980 | cmd = '%s debugsvnlog' % util.shellquote(hgexe) |
|
1024 | cmd = '%s debugsvnlog' % util.shellquote(hgexe) |
@@ -151,7 +151,7 b' class eolfile(object):' | |||||
151 | self.cfg = config.config() |
|
151 | self.cfg = config.config() | |
152 | # Our files should not be touched. The pattern must be |
|
152 | # Our files should not be touched. The pattern must be | |
153 | # inserted first override a '** = native' pattern. |
|
153 | # inserted first override a '** = native' pattern. | |
154 | self.cfg.set('patterns', '.hg*', 'BIN') |
|
154 | self.cfg.set('patterns', '.hg*', 'BIN', 'eol') | |
155 | # We can then parse the user's patterns. |
|
155 | # We can then parse the user's patterns. | |
156 | self.cfg.parse('.hgeol', data) |
|
156 | self.cfg.parse('.hgeol', data) | |
157 |
|
157 | |||
@@ -176,14 +176,14 b' class eolfile(object):' | |||||
176 | for pattern, style in self.cfg.items('patterns'): |
|
176 | for pattern, style in self.cfg.items('patterns'): | |
177 | key = style.upper() |
|
177 | key = style.upper() | |
178 | try: |
|
178 | try: | |
179 | ui.setconfig('decode', pattern, self._decode[key]) |
|
179 | ui.setconfig('decode', pattern, self._decode[key], 'eol') | |
180 | ui.setconfig('encode', pattern, self._encode[key]) |
|
180 | ui.setconfig('encode', pattern, self._encode[key], 'eol') | |
181 | except KeyError: |
|
181 | except KeyError: | |
182 | ui.warn(_("ignoring unknown EOL style '%s' from %s\n") |
|
182 | ui.warn(_("ignoring unknown EOL style '%s' from %s\n") | |
183 | % (style, self.cfg.source('patterns', pattern))) |
|
183 | % (style, self.cfg.source('patterns', pattern))) | |
184 | # eol.only-consistent can be specified in ~/.hgrc or .hgeol |
|
184 | # eol.only-consistent can be specified in ~/.hgrc or .hgeol | |
185 | for k, v in self.cfg.items('eol'): |
|
185 | for k, v in self.cfg.items('eol'): | |
186 | ui.setconfig('eol', k, v) |
|
186 | ui.setconfig('eol', k, v, 'eol') | |
187 |
|
187 | |||
188 | def checkrev(self, repo, ctx, files): |
|
188 | def checkrev(self, repo, ctx, files): | |
189 | failed = [] |
|
189 | failed = [] | |
@@ -261,7 +261,7 b' def preupdate(ui, repo, hooktype, parent' | |||||
261 | return False |
|
261 | return False | |
262 |
|
262 | |||
263 | def uisetup(ui): |
|
263 | def uisetup(ui): | |
264 | ui.setconfig('hooks', 'preupdate.eol', preupdate) |
|
264 | ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol') | |
265 |
|
265 | |||
266 | def extsetup(ui): |
|
266 | def extsetup(ui): | |
267 | try: |
|
267 | try: | |
@@ -280,7 +280,7 b' def reposetup(ui, repo):' | |||||
280 | for name, fn in filters.iteritems(): |
|
280 | for name, fn in filters.iteritems(): | |
281 | repo.adddatafilter(name, fn) |
|
281 | repo.adddatafilter(name, fn) | |
282 |
|
282 | |||
283 | ui.setconfig('patch', 'eol', 'auto') |
|
283 | ui.setconfig('patch', 'eol', 'auto', 'eol') | |
284 |
|
284 | |||
285 | class eolrepo(repo.__class__): |
|
285 | class eolrepo(repo.__class__): | |
286 |
|
286 |
@@ -207,10 +207,10 b' def dodiff(ui, repo, diffcmd, diffopts, ' | |||||
207 | # Function to quote file/dir names in the argument string. |
|
207 | # Function to quote file/dir names in the argument string. | |
208 | # When not operating in 3-way mode, an empty string is |
|
208 | # When not operating in 3-way mode, an empty string is | |
209 | # returned for parent2 |
|
209 | # returned for parent2 | |
210 |
replace = |
|
210 | replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b, | |
211 |
|
|
211 | 'plabel1': label1a, 'plabel2': label1b, | |
212 |
|
|
212 | 'clabel': label2, 'child': dir2, | |
213 |
|
|
213 | 'root': repo.root} | |
214 | def quote(match): |
|
214 | def quote(match): | |
215 | key = match.group()[1:] |
|
215 | key = match.group()[1:] | |
216 | if not do3way and key == 'parent2': |
|
216 | if not do3way and key == 'parent2': | |
@@ -316,7 +316,7 b' use %(path)s to diff repository (or sele' | |||||
316 | that revision is compared to the working directory, and, when no |
|
316 | that revision is compared to the working directory, and, when no | |
317 | revisions are specified, the working directory files are compared |
|
317 | revisions are specified, the working directory files are compared | |
318 | to its parent.\ |
|
318 | to its parent.\ | |
319 |
''') % |
|
319 | ''') % {'path': util.uirepr(path)} | |
320 |
|
320 | |||
321 | # We must translate the docstring right away since it is |
|
321 | # We must translate the docstring right away since it is | |
322 | # used as a format string. The string will unfortunately |
|
322 | # used as a format string. The string will unfortunately |
@@ -202,8 +202,7 b' class hgcia(object):' | |||||
202 | template = self.diffstat and self.dstemplate or self.deftemplate |
|
202 | template = self.diffstat and self.dstemplate or self.deftemplate | |
203 | template = templater.parsestring(template, quoted=False) |
|
203 | template = templater.parsestring(template, quoted=False) | |
204 | t = cmdutil.changeset_templater(self.ui, self.repo, False, None, |
|
204 | t = cmdutil.changeset_templater(self.ui, self.repo, False, None, | |
205 | style, False) |
|
205 | template, style, False) | |
206 | t.use_template(template) |
|
|||
207 | self.templater = t |
|
206 | self.templater = t | |
208 |
|
207 | |||
209 | def strip(self, path): |
|
208 | def strip(self, path): |
@@ -30,10 +30,12 b' file open in your editor::' | |||||
30 |
|
30 | |||
31 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
31 | # Edit history between c561b4e977df and 7c2fd3b9020c | |
32 | # |
|
32 | # | |
|
33 | # Commits are listed from least to most recent | |||
|
34 | # | |||
33 | # Commands: |
|
35 | # Commands: | |
34 | # p, pick = use commit |
|
36 | # p, pick = use commit | |
35 | # e, edit = use commit, but stop for amending |
|
37 | # e, edit = use commit, but stop for amending | |
36 | # f, fold = use commit, but fold into previous commit (combines N and N-1) |
|
38 | # f, fold = use commit, but combine it with the one above | |
37 | # d, drop = remove commit from history |
|
39 | # d, drop = remove commit from history | |
38 | # m, mess = edit message without changing commit content |
|
40 | # m, mess = edit message without changing commit content | |
39 | # |
|
41 | # | |
@@ -49,10 +51,12 b' would reorganize the file to look like t' | |||||
49 |
|
51 | |||
50 | # Edit history between c561b4e977df and 7c2fd3b9020c |
|
52 | # Edit history between c561b4e977df and 7c2fd3b9020c | |
51 | # |
|
53 | # | |
|
54 | # Commits are listed from least to most recent | |||
|
55 | # | |||
52 | # Commands: |
|
56 | # Commands: | |
53 | # p, pick = use commit |
|
57 | # p, pick = use commit | |
54 | # e, edit = use commit, but stop for amending |
|
58 | # e, edit = use commit, but stop for amending | |
55 | # f, fold = use commit, but fold into previous commit (combines N and N-1) |
|
59 | # f, fold = use commit, but combine it with the one above | |
56 | # d, drop = remove commit from history |
|
60 | # d, drop = remove commit from history | |
57 | # m, mess = edit message without changing commit content |
|
61 | # m, mess = edit message without changing commit content | |
58 | # |
|
62 | # | |
@@ -152,10 +156,8 b' from mercurial import error' | |||||
152 | from mercurial import copies |
|
156 | from mercurial import copies | |
153 | from mercurial import context |
|
157 | from mercurial import context | |
154 | from mercurial import hg |
|
158 | from mercurial import hg | |
155 | from mercurial import lock as lockmod |
|
|||
156 | from mercurial import node |
|
159 | from mercurial import node | |
157 | from mercurial import repair |
|
160 | from mercurial import repair | |
158 | from mercurial import scmutil |
|
|||
159 | from mercurial import util |
|
161 | from mercurial import util | |
160 | from mercurial import obsolete |
|
162 | from mercurial import obsolete | |
161 | from mercurial import merge as mergemod |
|
163 | from mercurial import merge as mergemod | |
@@ -170,10 +172,12 b" testedwith = 'internal'" | |||||
170 | # i18n: command names and abbreviations must remain untranslated |
|
172 | # i18n: command names and abbreviations must remain untranslated | |
171 | editcomment = _("""# Edit history between %s and %s |
|
173 | editcomment = _("""# Edit history between %s and %s | |
172 | # |
|
174 | # | |
|
175 | # Commits are listed from least to most recent | |||
|
176 | # | |||
173 | # Commands: |
|
177 | # Commands: | |
174 | # p, pick = use commit |
|
178 | # p, pick = use commit | |
175 | # e, edit = use commit, but stop for amending |
|
179 | # e, edit = use commit, but stop for amending | |
176 | # f, fold = use commit, but fold into previous commit (combines N and N-1) |
|
180 | # f, fold = use commit, but combine it with the one above | |
177 | # d, drop = remove commit from history |
|
181 | # d, drop = remove commit from history | |
178 | # m, mess = edit message without changing commit content |
|
182 | # m, mess = edit message without changing commit content | |
179 | # |
|
183 | # | |
@@ -193,7 +197,8 b' def commitfuncfor(repo, src):' | |||||
193 | def commitfunc(**kwargs): |
|
197 | def commitfunc(**kwargs): | |
194 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') |
|
198 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') | |
195 | try: |
|
199 | try: | |
196 |
repo.ui.setconfig('phases', 'new-commit', phasemin |
|
200 | repo.ui.setconfig('phases', 'new-commit', phasemin, | |
|
201 | 'histedit') | |||
197 | extra = kwargs.get('extra', {}).copy() |
|
202 | extra = kwargs.get('extra', {}).copy() | |
198 | extra['histedit_source'] = src.hex() |
|
203 | extra['histedit_source'] = src.hex() | |
199 | kwargs['extra'] = extra |
|
204 | kwargs['extra'] = extra | |
@@ -215,11 +220,12 b' def applychanges(ui, repo, ctx, opts):' | |||||
215 | else: |
|
220 | else: | |
216 | try: |
|
221 | try: | |
217 | # ui.forcemerge is an internal variable, do not document |
|
222 | # ui.forcemerge is an internal variable, do not document | |
218 |
repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
223 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
224 | 'histedit') | |||
219 | stats = mergemod.update(repo, ctx.node(), True, True, False, |
|
225 | stats = mergemod.update(repo, ctx.node(), True, True, False, | |
220 | ctx.p1().node()) |
|
226 | ctx.p1().node()) | |
221 | finally: |
|
227 | finally: | |
222 | repo.ui.setconfig('ui', 'forcemerge', '') |
|
228 | repo.ui.setconfig('ui', 'forcemerge', '', 'histedit') | |
223 | repo.setparents(wcpar, node.nullid) |
|
229 | repo.setparents(wcpar, node.nullid) | |
224 | repo.dirstate.write() |
|
230 | repo.dirstate.write() | |
225 | # fix up dirstate for copies and renames |
|
231 | # fix up dirstate for copies and renames | |
@@ -370,7 +376,7 b' def finishfold(ui, repo, ctx, oldctx, ne' | |||||
370 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') |
|
376 | phasebackup = repo.ui.backupconfig('phases', 'new-commit') | |
371 | try: |
|
377 | try: | |
372 | phasemin = max(ctx.phase(), oldctx.phase()) |
|
378 | phasemin = max(ctx.phase(), oldctx.phase()) | |
373 | repo.ui.setconfig('phases', 'new-commit', phasemin) |
|
379 | repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit') | |
374 | n = collapse(repo, ctx, repo[newnode], commitopts) |
|
380 | n = collapse(repo, ctx, repo[newnode], commitopts) | |
375 | finally: |
|
381 | finally: | |
376 | repo.ui.restoreconfig(phasebackup) |
|
382 | repo.ui.restoreconfig(phasebackup) | |
@@ -562,8 +568,11 b' def _histedit(ui, repo, *freeargs, **opt' | |||||
562 | remote = None |
|
568 | remote = None | |
563 | root = findoutgoing(ui, repo, remote, force, opts) |
|
569 | root = findoutgoing(ui, repo, remote, force, opts) | |
564 | else: |
|
570 | else: | |
565 | root = revs[0] |
|
571 | rootrevs = list(repo.set('roots(%lr)', revs)) | |
566 | root = scmutil.revsingle(repo, root).node() |
|
572 | if len(rootrevs) != 1: | |
|
573 | raise util.Abort(_('The specified revisions must have ' + | |||
|
574 | 'exactly one common root')) | |||
|
575 | root = rootrevs[0].node() | |||
567 |
|
576 | |||
568 | keep = opts.get('keep', False) |
|
577 | keep = opts.get('keep', False) | |
569 | revs = between(repo, root, topmost, keep) |
|
578 | revs = between(repo, root, topmost, keep) | |
@@ -643,23 +652,28 b' def _histedit(ui, repo, *freeargs, **opt' | |||||
643 | if os.path.exists(repo.sjoin('undo')): |
|
652 | if os.path.exists(repo.sjoin('undo')): | |
644 | os.unlink(repo.sjoin('undo')) |
|
653 | os.unlink(repo.sjoin('undo')) | |
645 |
|
654 | |||
646 |
|
655 | def gatherchildren(repo, ctx): | ||
647 | def bootstrapcontinue(ui, repo, parentctx, rules, opts): |
|
|||
648 | action, currentnode = rules.pop(0) |
|
|||
649 | ctx = repo[currentnode] |
|
|||
650 | # is there any new commit between the expected parent and "." |
|
656 | # is there any new commit between the expected parent and "." | |
651 | # |
|
657 | # | |
652 | # note: does not take non linear new change in account (but previous |
|
658 | # note: does not take non linear new change in account (but previous | |
653 | # implementation didn't used them anyway (issue3655) |
|
659 | # implementation didn't used them anyway (issue3655) | |
654 |
newchildren = [c.node() for c in repo.set('(%d::.)', |
|
660 | newchildren = [c.node() for c in repo.set('(%d::.)', ctx)] | |
655 |
if |
|
661 | if ctx.node() != node.nullid: | |
656 | if not newchildren: |
|
662 | if not newchildren: | |
657 |
# ` |
|
663 | # `ctx` should match but no result. This means that | |
658 |
# currentnode is not a descendant from |
|
664 | # currentnode is not a descendant from ctx. | |
659 | msg = _('%s is not an ancestor of working directory') |
|
665 | msg = _('%s is not an ancestor of working directory') | |
660 | hint = _('use "histedit --abort" to clear broken state') |
|
666 | hint = _('use "histedit --abort" to clear broken state') | |
661 |
raise util.Abort(msg % |
|
667 | raise util.Abort(msg % ctx, hint=hint) | |
662 |
newchildren.pop(0) # remove |
|
668 | newchildren.pop(0) # remove ctx | |
|
669 | return newchildren | |||
|
670 | ||||
|
671 | def bootstrapcontinue(ui, repo, parentctx, rules, opts): | |||
|
672 | action, currentnode = rules.pop(0) | |||
|
673 | ctx = repo[currentnode] | |||
|
674 | ||||
|
675 | newchildren = gatherchildren(repo, parentctx) | |||
|
676 | ||||
663 | # Commit dirty working directory if necessary |
|
677 | # Commit dirty working directory if necessary | |
664 | new = None |
|
678 | new = None | |
665 | m, a, r, d = repo.status()[:4] |
|
679 | m, a, r, d = repo.status()[:4] | |
@@ -897,7 +911,7 b' def cleanupnode(ui, repo, name, nodes):' | |||||
897 | # This would reduce bundle overhead |
|
911 | # This would reduce bundle overhead | |
898 | repair.strip(ui, repo, c) |
|
912 | repair.strip(ui, repo, c) | |
899 | finally: |
|
913 | finally: | |
900 |
|
|
914 | release(lock) | |
901 |
|
915 | |||
902 | def summaryhook(ui, repo): |
|
916 | def summaryhook(ui, repo): | |
903 | if not os.path.exists(repo.join('histedit-state')): |
|
917 | if not os.path.exists(repo.join('histedit-state')): |
@@ -218,9 +218,8 b' class kwtemplater(object):' | |||||
218 | '''Replaces keywords in data with expanded template.''' |
|
218 | '''Replaces keywords in data with expanded template.''' | |
219 | def kwsub(mobj): |
|
219 | def kwsub(mobj): | |
220 | kw = mobj.group(1) |
|
220 | kw = mobj.group(1) | |
221 | ct = cmdutil.changeset_templater(self.ui, self.repo, |
|
221 | ct = cmdutil.changeset_templater(self.ui, self.repo, False, None, | |
222 |
|
|
222 | self.templates[kw], '', False) | |
223 | ct.use_template(self.templates[kw]) |
|
|||
224 | self.ui.pushbuffer() |
|
223 | self.ui.pushbuffer() | |
225 | ct.show(ctx, root=self.repo.root, file=path) |
|
224 | ct.show(ctx, root=self.repo.root, file=path) | |
226 | ekw = templatefilters.firstline(self.ui.popbuffer()) |
|
225 | ekw = templatefilters.firstline(self.ui.popbuffer()) | |
@@ -386,10 +385,10 b' def demo(ui, repo, *args, **opts):' | |||||
386 | tmpdir = tempfile.mkdtemp('', 'kwdemo.') |
|
385 | tmpdir = tempfile.mkdtemp('', 'kwdemo.') | |
387 | ui.note(_('creating temporary repository at %s\n') % tmpdir) |
|
386 | ui.note(_('creating temporary repository at %s\n') % tmpdir) | |
388 | repo = localrepo.localrepository(repo.baseui, tmpdir, True) |
|
387 | repo = localrepo.localrepository(repo.baseui, tmpdir, True) | |
389 | ui.setconfig('keyword', fn, '') |
|
388 | ui.setconfig('keyword', fn, '', 'keyword') | |
390 | svn = ui.configbool('keywordset', 'svn') |
|
389 | svn = ui.configbool('keywordset', 'svn') | |
391 | # explicitly set keywordset for demo output |
|
390 | # explicitly set keywordset for demo output | |
392 | ui.setconfig('keywordset', 'svn', svn) |
|
391 | ui.setconfig('keywordset', 'svn', svn, 'keyword') | |
393 |
|
392 | |||
394 | uikwmaps = ui.configitems('keywordmaps') |
|
393 | uikwmaps = ui.configitems('keywordmaps') | |
395 | if args or opts.get('rcfile'): |
|
394 | if args or opts.get('rcfile'): | |
@@ -420,7 +419,7 b' def demo(ui, repo, *args, **opts):' | |||||
420 | if uikwmaps: |
|
419 | if uikwmaps: | |
421 | ui.status(_('\tdisabling current template maps\n')) |
|
420 | ui.status(_('\tdisabling current template maps\n')) | |
422 | for k, v in kwmaps.iteritems(): |
|
421 | for k, v in kwmaps.iteritems(): | |
423 | ui.setconfig('keywordmaps', k, v) |
|
422 | ui.setconfig('keywordmaps', k, v, 'keyword') | |
424 | else: |
|
423 | else: | |
425 | ui.status(_('\n\tconfiguration using current keyword template maps\n')) |
|
424 | ui.status(_('\n\tconfiguration using current keyword template maps\n')) | |
426 | if uikwmaps: |
|
425 | if uikwmaps: | |
@@ -446,7 +445,7 b' def demo(ui, repo, *args, **opts):' | |||||
446 | wlock.release() |
|
445 | wlock.release() | |
447 | for name, cmd in ui.configitems('hooks'): |
|
446 | for name, cmd in ui.configitems('hooks'): | |
448 | if name.split('.', 1)[0].find('commit') > -1: |
|
447 | if name.split('.', 1)[0].find('commit') > -1: | |
449 | repo.ui.setconfig('hooks', name, '') |
|
448 | repo.ui.setconfig('hooks', name, '', 'keyword') | |
450 | msg = _('hg keyword configuration and expansion example') |
|
449 | msg = _('hg keyword configuration and expansion example') | |
451 | ui.note(("hg ci -m '%s'\n" % msg)) |
|
450 | ui.note(("hg ci -m '%s'\n" % msg)) | |
452 | repo.commit(text=msg) |
|
451 | repo.commit(text=msg) |
@@ -375,13 +375,6 b' def verifylfiles(ui, repo, all=False, co' | |||||
375 | store = basestore._openstore(repo) |
|
375 | store = basestore._openstore(repo) | |
376 | return store.verify(revs, contents=contents) |
|
376 | return store.verify(revs, contents=contents) | |
377 |
|
377 | |||
378 | def debugdirstate(ui, repo): |
|
|||
379 | '''Show basic information for the largefiles dirstate''' |
|
|||
380 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
|||
381 | for file_, ent in sorted(lfdirstate._map.iteritems()): |
|
|||
382 | mode = '%3o' % (ent[1] & 0777 & ~util.umask) |
|
|||
383 | ui.write("%c %s %10d %s\n" % (ent[0], mode, ent[2], file_)) |
|
|||
384 |
|
||||
385 | def cachelfiles(ui, repo, node, filelist=None): |
|
378 | def cachelfiles(ui, repo, node, filelist=None): | |
386 | '''cachelfiles ensures that all largefiles needed by the specified revision |
|
379 | '''cachelfiles ensures that all largefiles needed by the specified revision | |
387 | are present in the repository's largefile cache. |
|
380 | are present in the repository's largefile cache. | |
@@ -447,6 +440,7 b' def updatelfiles(ui, repo, filelist=None' | |||||
447 | if (os.path.exists(absstandin + '.orig') and |
|
440 | if (os.path.exists(absstandin + '.orig') and | |
448 | os.path.exists(abslfile)): |
|
441 | os.path.exists(abslfile)): | |
449 | shutil.copyfile(abslfile, abslfile + '.orig') |
|
442 | shutil.copyfile(abslfile, abslfile + '.orig') | |
|
443 | util.unlinkpath(absstandin + '.orig') | |||
450 | expecthash = lfutil.readstandin(repo, lfile) |
|
444 | expecthash = lfutil.readstandin(repo, lfile) | |
451 | if (expecthash != '' and |
|
445 | if (expecthash != '' and | |
452 | (not os.path.exists(abslfile) or |
|
446 | (not os.path.exists(abslfile) or |
@@ -15,6 +15,7 b' import stat' | |||||
15 |
|
15 | |||
16 | from mercurial import dirstate, httpconnection, match as match_, util, scmutil |
|
16 | from mercurial import dirstate, httpconnection, match as match_, util, scmutil | |
17 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
|
18 | from mercurial import node | |||
18 |
|
19 | |||
19 | shortname = '.hglf' |
|
20 | shortname = '.hglf' | |
20 | shortnameslash = shortname + '/' |
|
21 | shortnameslash = shortname + '/' | |
@@ -105,7 +106,7 b' class largefilesdirstate(dirstate.dirsta' | |||||
105 | return super(largefilesdirstate, self).forget(unixpath(f)) |
|
106 | return super(largefilesdirstate, self).forget(unixpath(f)) | |
106 | def normallookup(self, f): |
|
107 | def normallookup(self, f): | |
107 | return super(largefilesdirstate, self).normallookup(unixpath(f)) |
|
108 | return super(largefilesdirstate, self).normallookup(unixpath(f)) | |
108 | def _ignore(self): |
|
109 | def _ignore(self, f): | |
109 | return False |
|
110 | return False | |
110 |
|
111 | |||
111 | def openlfdirstate(ui, repo, create=True): |
|
112 | def openlfdirstate(ui, repo, create=True): | |
@@ -365,3 +366,25 b' def getlfilestoupdate(oldstandins, newst' | |||||
365 | if f[0] not in filelist: |
|
366 | if f[0] not in filelist: | |
366 | filelist.append(f[0]) |
|
367 | filelist.append(f[0]) | |
367 | return filelist |
|
368 | return filelist | |
|
369 | ||||
|
370 | def getlfilestoupload(repo, missing, addfunc): | |||
|
371 | for n in missing: | |||
|
372 | parents = [p for p in repo.changelog.parents(n) if p != node.nullid] | |||
|
373 | ctx = repo[n] | |||
|
374 | files = set(ctx.files()) | |||
|
375 | if len(parents) == 2: | |||
|
376 | mc = ctx.manifest() | |||
|
377 | mp1 = ctx.parents()[0].manifest() | |||
|
378 | mp2 = ctx.parents()[1].manifest() | |||
|
379 | for f in mp1: | |||
|
380 | if f not in mc: | |||
|
381 | files.add(f) | |||
|
382 | for f in mp2: | |||
|
383 | if f not in mc: | |||
|
384 | files.add(f) | |||
|
385 | for f in mc: | |||
|
386 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | |||
|
387 | files.add(f) | |||
|
388 | for fn in files: | |||
|
389 | if isstandin(fn) and fn in ctx: | |||
|
390 | addfunc(fn, ctx[fn].data().strip()) |
@@ -12,7 +12,7 b' import os' | |||||
12 | import copy |
|
12 | import copy | |
13 |
|
13 | |||
14 | from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ |
|
14 | from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ | |
15 |
|
|
15 | archival, merge, pathutil, revset | |
16 | from mercurial.i18n import _ |
|
16 | from mercurial.i18n import _ | |
17 | from mercurial.node import hex |
|
17 | from mercurial.node import hex | |
18 | from hgext import rebase |
|
18 | from hgext import rebase | |
@@ -24,9 +24,7 b' import basestore' | |||||
24 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
24 | # -- Utility functions: commonly/repeatedly needed functionality --------------- | |
25 |
|
25 | |||
26 | def installnormalfilesmatchfn(manifest): |
|
26 | def installnormalfilesmatchfn(manifest): | |
27 | '''overrides scmutil.match so that the matcher it returns will ignore all |
|
27 | '''installmatchfn with a matchfn that ignores all largefiles''' | |
28 | largefiles''' |
|
|||
29 | oldmatch = None # for the closure |
|
|||
30 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
28 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
31 | default='relpath'): |
|
29 | default='relpath'): | |
32 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
30 | match = oldmatch(ctx, pats, opts, globbed, default) | |
@@ -42,18 +40,36 b' def installnormalfilesmatchfn(manifest):' | |||||
42 | oldmatch = installmatchfn(overridematch) |
|
40 | oldmatch = installmatchfn(overridematch) | |
43 |
|
41 | |||
44 | def installmatchfn(f): |
|
42 | def installmatchfn(f): | |
|
43 | '''monkey patch the scmutil module with a custom match function. | |||
|
44 | Warning: it is monkey patching the _module_ on runtime! Not thread safe!''' | |||
45 | oldmatch = scmutil.match |
|
45 | oldmatch = scmutil.match | |
46 | setattr(f, 'oldmatch', oldmatch) |
|
46 | setattr(f, 'oldmatch', oldmatch) | |
47 | scmutil.match = f |
|
47 | scmutil.match = f | |
48 | return oldmatch |
|
48 | return oldmatch | |
49 |
|
49 | |||
50 | def restorematchfn(): |
|
50 | def restorematchfn(): | |
51 |
'''restores scmutil.match to what it was before install |
|
51 | '''restores scmutil.match to what it was before installmatchfn | |
52 | was called. no-op if scmutil.match is its original function. |
|
52 | was called. no-op if scmutil.match is its original function. | |
53 |
|
53 | |||
54 |
Note that n calls to install |
|
54 | Note that n calls to installmatchfn will require n calls to | |
55 | restore matchfn to reverse''' |
|
55 | restore matchfn to reverse''' | |
56 |
scmutil.match = getattr(scmutil.match, 'oldmatch' |
|
56 | scmutil.match = getattr(scmutil.match, 'oldmatch') | |
|
57 | ||||
|
58 | def installmatchandpatsfn(f): | |||
|
59 | oldmatchandpats = scmutil.matchandpats | |||
|
60 | setattr(f, 'oldmatchandpats', oldmatchandpats) | |||
|
61 | scmutil.matchandpats = f | |||
|
62 | return oldmatchandpats | |||
|
63 | ||||
|
64 | def restorematchandpatsfn(): | |||
|
65 | '''restores scmutil.matchandpats to what it was before | |||
|
66 | installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats | |||
|
67 | is its original function. | |||
|
68 | ||||
|
69 | Note that n calls to installnormalfilesmatchandpatsfn will require n calls | |||
|
70 | to restore matchfn to reverse''' | |||
|
71 | scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats', | |||
|
72 | scmutil.matchandpats) | |||
57 |
|
73 | |||
58 | def addlargefiles(ui, repo, *pats, **opts): |
|
74 | def addlargefiles(ui, repo, *pats, **opts): | |
59 | large = opts.pop('large', None) |
|
75 | large = opts.pop('large', None) | |
@@ -241,19 +257,30 b' def overridedirty(orig, repo, ignoreupda' | |||||
241 | repo._repo.lfstatus = False |
|
257 | repo._repo.lfstatus = False | |
242 |
|
258 | |||
243 | def overridelog(orig, ui, repo, *pats, **opts): |
|
259 | def overridelog(orig, ui, repo, *pats, **opts): | |
244 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
260 | def overridematchandpats(ctx, pats=[], opts={}, globbed=False, | |
245 | default='relpath'): |
|
261 | default='relpath'): | |
246 | """Matcher that merges root directory with .hglf, suitable for log. |
|
262 | """Matcher that merges root directory with .hglf, suitable for log. | |
247 | It is still possible to match .hglf directly. |
|
263 | It is still possible to match .hglf directly. | |
248 | For any listed files run log on the standin too. |
|
264 | For any listed files run log on the standin too. | |
249 | matchfn tries both the given filename and with .hglf stripped. |
|
265 | matchfn tries both the given filename and with .hglf stripped. | |
250 | """ |
|
266 | """ | |
251 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
267 | matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default) | |
252 | m = copy.copy(match) |
|
268 | m, p = copy.copy(matchandpats) | |
|
269 | ||||
|
270 | pats = set(p) | |||
|
271 | # TODO: handling of patterns in both cases below | |||
|
272 | if m._cwd: | |||
|
273 | back = (m._cwd.count('/') + 1) * '../' | |||
|
274 | pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p) | |||
|
275 | else: | |||
|
276 | pats.update(lfutil.standin(f) for f in p) | |||
|
277 | ||||
253 | for i in range(0, len(m._files)): |
|
278 | for i in range(0, len(m._files)): | |
254 | standin = lfutil.standin(m._files[i]) |
|
279 | standin = lfutil.standin(m._files[i]) | |
255 | if standin in repo[ctx.node()]: |
|
280 | if standin in repo[ctx.node()]: | |
256 | m._files[i] = standin |
|
281 | m._files[i] = standin | |
|
282 | pats.add(standin) | |||
|
283 | ||||
257 | m._fmap = set(m._files) |
|
284 | m._fmap = set(m._files) | |
258 | m._always = False |
|
285 | m._always = False | |
259 | origmatchfn = m.matchfn |
|
286 | origmatchfn = m.matchfn | |
@@ -264,14 +291,16 b' def overridelog(orig, ui, repo, *pats, *' | |||||
264 | r = origmatchfn(f) |
|
291 | r = origmatchfn(f) | |
265 | return r |
|
292 | return r | |
266 | m.matchfn = lfmatchfn |
|
293 | m.matchfn = lfmatchfn | |
267 | return m |
|
294 | ||
268 | oldmatch = installmatchfn(overridematch) |
|
295 | return m, pats | |
|
296 | ||||
|
297 | oldmatchandpats = installmatchandpatsfn(overridematchandpats) | |||
269 | try: |
|
298 | try: | |
270 | repo.lfstatus = True |
|
299 | repo.lfstatus = True | |
271 | return orig(ui, repo, *pats, **opts) |
|
300 | return orig(ui, repo, *pats, **opts) | |
272 | finally: |
|
301 | finally: | |
273 | repo.lfstatus = False |
|
302 | repo.lfstatus = False | |
274 | restorematchfn() |
|
303 | restorematchandpatsfn() | |
275 |
|
304 | |||
276 | def overrideverify(orig, ui, repo, *pats, **opts): |
|
305 | def overrideverify(orig, ui, repo, *pats, **opts): | |
277 | large = opts.pop('large', False) |
|
306 | large = opts.pop('large', False) | |
@@ -286,7 +315,9 b' def overrideverify(orig, ui, repo, *pats' | |||||
286 | def overridedebugstate(orig, ui, repo, *pats, **opts): |
|
315 | def overridedebugstate(orig, ui, repo, *pats, **opts): | |
287 | large = opts.pop('large', False) |
|
316 | large = opts.pop('large', False) | |
288 | if large: |
|
317 | if large: | |
289 | lfcommands.debugdirstate(ui, repo) |
|
318 | class fakerepo(object): | |
|
319 | dirstate = lfutil.openlfdirstate(ui, repo) | |||
|
320 | orig(ui, fakerepo, *pats, **opts) | |||
290 | else: |
|
321 | else: | |
291 | orig(ui, repo, *pats, **opts) |
|
322 | orig(ui, repo, *pats, **opts) | |
292 |
|
323 | |||
@@ -295,15 +326,15 b' def overridedebugstate(orig, ui, repo, *' | |||||
295 | # will get the new files. Filemerge is also overridden so that the merge |
|
326 | # will get the new files. Filemerge is also overridden so that the merge | |
296 | # will merge standins correctly. |
|
327 | # will merge standins correctly. | |
297 | def overrideupdate(orig, ui, repo, *pats, **opts): |
|
328 | def overrideupdate(orig, ui, repo, *pats, **opts): | |
298 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
|||
299 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False, |
|
|||
300 | False, False) |
|
|||
301 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s |
|
|||
302 |
|
||||
303 | # Need to lock between the standins getting updated and their |
|
329 | # Need to lock between the standins getting updated and their | |
304 | # largefiles getting updated |
|
330 | # largefiles getting updated | |
305 | wlock = repo.wlock() |
|
331 | wlock = repo.wlock() | |
306 | try: |
|
332 | try: | |
|
333 | lfdirstate = lfutil.openlfdirstate(ui, repo) | |||
|
334 | s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), | |||
|
335 | [], False, False, False) | |||
|
336 | (unsure, modified, added, removed, missing, unknown, ignored, clean) = s | |||
|
337 | ||||
307 | if opts['check']: |
|
338 | if opts['check']: | |
308 | mod = len(modified) > 0 |
|
339 | mod = len(modified) > 0 | |
309 | for lfile in unsure: |
|
340 | for lfile in unsure: | |
@@ -320,9 +351,9 b' def overrideupdate(orig, ui, repo, *pats' | |||||
320 | if not opts['clean']: |
|
351 | if not opts['clean']: | |
321 | for lfile in unsure + modified + added: |
|
352 | for lfile in unsure + modified + added: | |
322 | lfutil.updatestandin(repo, lfutil.standin(lfile)) |
|
353 | lfutil.updatestandin(repo, lfutil.standin(lfile)) | |
|
354 | return orig(ui, repo, *pats, **opts) | |||
323 | finally: |
|
355 | finally: | |
324 | wlock.release() |
|
356 | wlock.release() | |
325 | return orig(ui, repo, *pats, **opts) |
|
|||
326 |
|
357 | |||
327 | # Before starting the manifest merge, merge.updates will call |
|
358 | # Before starting the manifest merge, merge.updates will call | |
328 | # _checkunknown to check if there are any files in the merged-in |
|
359 | # _checkunknown to check if there are any files in the merged-in | |
@@ -365,11 +396,11 b' def overridecheckunknownfile(origfn, rep' | |||||
365 | # Finally, the merge.applyupdates function will then take care of |
|
396 | # Finally, the merge.applyupdates function will then take care of | |
366 | # writing the files into the working copy and lfcommands.updatelfiles |
|
397 | # writing the files into the working copy and lfcommands.updatelfiles | |
367 | # will update the largefiles. |
|
398 | # will update the largefiles. | |
368 |
def override |
|
399 | def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, | |
369 |
partial, acceptremote |
|
400 | partial, acceptremote, followcopies): | |
370 | overwrite = force and not branchmerge |
|
401 | overwrite = force and not branchmerge | |
371 | actions = origfn(repo, p1, p2, pa, branchmerge, force, partial, |
|
402 | actions = origfn(repo, p1, p2, pas, branchmerge, force, partial, | |
372 | acceptremote) |
|
403 | acceptremote, followcopies) | |
373 |
|
404 | |||
374 | if overwrite: |
|
405 | if overwrite: | |
375 | return actions |
|
406 | return actions | |
@@ -420,16 +451,18 b' def overridefilemerge(origfn, repo, myno' | |||||
420 | if not lfutil.isstandin(orig): |
|
451 | if not lfutil.isstandin(orig): | |
421 | return origfn(repo, mynode, orig, fcd, fco, fca) |
|
452 | return origfn(repo, mynode, orig, fcd, fco, fca) | |
422 |
|
453 | |||
423 | if not fco.cmp(fcd): # files identical? |
|
454 | ahash = fca.data().strip().lower() | |
424 | return None |
|
455 | dhash = fcd.data().strip().lower() | |
425 |
|
456 | ohash = fco.data().strip().lower() | ||
426 | if repo.ui.promptchoice( |
|
457 | if (ohash != ahash and | |
427 | _('largefile %s has a merge conflict\nancestor was %s\n' |
|
458 | ohash != dhash and | |
428 | 'keep (l)ocal %s or\ntake (o)ther %s?' |
|
459 | (dhash == ahash or | |
429 | '$$ &Local $$ &Other') % |
|
460 | repo.ui.promptchoice( | |
430 | (lfutil.splitstandin(orig), |
|
461 | _('largefile %s has a merge conflict\nancestor was %s\n' | |
431 | fca.data().strip(), fcd.data().strip(), fco.data().strip()), |
|
462 | 'keep (l)ocal %s or\ntake (o)ther %s?' | |
432 | 0) == 1: |
|
463 | '$$ &Local $$ &Other') % | |
|
464 | (lfutil.splitstandin(orig), ahash, dhash, ohash), | |||
|
465 | 0) == 1)): | |||
433 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) |
|
466 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) | |
434 | return 0 |
|
467 | return 0 | |
435 |
|
468 | |||
@@ -460,9 +493,9 b' def overridecopy(orig, ui, repo, pats, o' | |||||
460 | # match largefiles and run it again. |
|
493 | # match largefiles and run it again. | |
461 | nonormalfiles = False |
|
494 | nonormalfiles = False | |
462 | nolfiles = False |
|
495 | nolfiles = False | |
|
496 | installnormalfilesmatchfn(repo[None].manifest()) | |||
463 | try: |
|
497 | try: | |
464 | try: |
|
498 | try: | |
465 | installnormalfilesmatchfn(repo[None].manifest()) |
|
|||
466 | result = orig(ui, repo, pats, opts, rename) |
|
499 | result = orig(ui, repo, pats, opts, rename) | |
467 | except util.Abort, e: |
|
500 | except util.Abort, e: | |
468 | if str(e) != _('no files to copy'): |
|
501 | if str(e) != _('no files to copy'): | |
@@ -487,7 +520,6 b' def overridecopy(orig, ui, repo, pats, o' | |||||
487 | wlock = repo.wlock() |
|
520 | wlock = repo.wlock() | |
488 |
|
521 | |||
489 | manifest = repo[None].manifest() |
|
522 | manifest = repo[None].manifest() | |
490 | oldmatch = None # for the closure |
|
|||
491 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
523 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |
492 | default='relpath'): |
|
524 | default='relpath'): | |
493 | newpats = [] |
|
525 | newpats = [] | |
@@ -576,8 +608,7 b' def overridecopy(orig, ui, repo, pats, o' | |||||
576 | # Standins are only updated (to match the hash of largefiles) before |
|
608 | # Standins are only updated (to match the hash of largefiles) before | |
577 | # commits. Update the standins then run the original revert, changing |
|
609 | # commits. Update the standins then run the original revert, changing | |
578 | # the matcher to hit standins instead of largefiles. Based on the |
|
610 | # the matcher to hit standins instead of largefiles. Based on the | |
579 |
# resulting standins update the largefiles. |
|
611 | # resulting standins update the largefiles. | |
580 | # to their proper state |
|
|||
581 | def overriderevert(orig, ui, repo, *pats, **opts): |
|
612 | def overriderevert(orig, ui, repo, *pats, **opts): | |
582 | # Because we put the standins in a bad state (by updating them) |
|
613 | # Because we put the standins in a bad state (by updating them) | |
583 | # and then return them to a correct state we need to lock to |
|
614 | # and then return them to a correct state we need to lock to | |
@@ -594,70 +625,40 b' def overriderevert(orig, ui, repo, *pats' | |||||
594 | if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): |
|
625 | if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))): | |
595 | os.unlink(repo.wjoin(lfutil.standin(lfile))) |
|
626 | os.unlink(repo.wjoin(lfutil.standin(lfile))) | |
596 |
|
627 | |||
|
628 | oldstandins = lfutil.getstandinsstate(repo) | |||
|
629 | ||||
|
630 | def overridematch(ctx, pats=[], opts={}, globbed=False, | |||
|
631 | default='relpath'): | |||
|
632 | match = oldmatch(ctx, pats, opts, globbed, default) | |||
|
633 | m = copy.copy(match) | |||
|
634 | def tostandin(f): | |||
|
635 | if lfutil.standin(f) in ctx: | |||
|
636 | return lfutil.standin(f) | |||
|
637 | elif lfutil.standin(f) in repo[None]: | |||
|
638 | return None | |||
|
639 | return f | |||
|
640 | m._files = [tostandin(f) for f in m._files] | |||
|
641 | m._files = [f for f in m._files if f is not None] | |||
|
642 | m._fmap = set(m._files) | |||
|
643 | m._always = False | |||
|
644 | origmatchfn = m.matchfn | |||
|
645 | def matchfn(f): | |||
|
646 | if lfutil.isstandin(f): | |||
|
647 | return (origmatchfn(lfutil.splitstandin(f)) and | |||
|
648 | (f in repo[None] or f in ctx)) | |||
|
649 | return origmatchfn(f) | |||
|
650 | m.matchfn = matchfn | |||
|
651 | return m | |||
|
652 | oldmatch = installmatchfn(overridematch) | |||
597 | try: |
|
653 | try: | |
598 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
|||
599 | oldmatch = None # for the closure |
|
|||
600 | def overridematch(ctx, pats=[], opts={}, globbed=False, |
|
|||
601 | default='relpath'): |
|
|||
602 | match = oldmatch(ctx, pats, opts, globbed, default) |
|
|||
603 | m = copy.copy(match) |
|
|||
604 | def tostandin(f): |
|
|||
605 | if lfutil.standin(f) in ctx: |
|
|||
606 | return lfutil.standin(f) |
|
|||
607 | elif lfutil.standin(f) in repo[None]: |
|
|||
608 | return None |
|
|||
609 | return f |
|
|||
610 | m._files = [tostandin(f) for f in m._files] |
|
|||
611 | m._files = [f for f in m._files if f is not None] |
|
|||
612 | m._fmap = set(m._files) |
|
|||
613 | m._always = False |
|
|||
614 | origmatchfn = m.matchfn |
|
|||
615 | def matchfn(f): |
|
|||
616 | if lfutil.isstandin(f): |
|
|||
617 | # We need to keep track of what largefiles are being |
|
|||
618 | # matched so we know which ones to update later -- |
|
|||
619 | # otherwise we accidentally revert changes to other |
|
|||
620 | # largefiles. This is repo-specific, so duckpunch the |
|
|||
621 | # repo object to keep the list of largefiles for us |
|
|||
622 | # later. |
|
|||
623 | if origmatchfn(lfutil.splitstandin(f)) and \ |
|
|||
624 | (f in repo[None] or f in ctx): |
|
|||
625 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
|||
626 | lfileslist.append(lfutil.splitstandin(f)) |
|
|||
627 | repo._lfilestoupdate = lfileslist |
|
|||
628 | return True |
|
|||
629 | else: |
|
|||
630 | return False |
|
|||
631 | return origmatchfn(f) |
|
|||
632 | m.matchfn = matchfn |
|
|||
633 | return m |
|
|||
634 | oldmatch = installmatchfn(overridematch) |
|
|||
635 | scmutil.match |
|
|||
636 | matches = overridematch(repo[None], pats, opts) |
|
|||
637 | orig(ui, repo, *pats, **opts) |
|
654 | orig(ui, repo, *pats, **opts) | |
638 | finally: |
|
655 | finally: | |
639 | restorematchfn() |
|
656 | restorematchfn() | |
640 | lfileslist = getattr(repo, '_lfilestoupdate', []) |
|
|||
641 | lfcommands.updatelfiles(ui, repo, filelist=lfileslist, |
|
|||
642 | printmessage=False) |
|
|||
643 |
|
657 | |||
644 | # empty out the largefiles list so we start fresh next time |
|
658 | newstandins = lfutil.getstandinsstate(repo) | |
645 | repo._lfilestoupdate = [] |
|
659 | filelist = lfutil.getlfilestoupdate(oldstandins, newstandins) | |
646 | for lfile in modified: |
|
660 | lfcommands.updatelfiles(ui, repo, filelist, printmessage=False) | |
647 | if lfile in lfileslist: |
|
661 | ||
648 | if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\ |
|
|||
649 | in repo['.']: |
|
|||
650 | lfutil.writestandin(repo, lfutil.standin(lfile), |
|
|||
651 | repo['.'][lfile].data().strip(), |
|
|||
652 | 'x' in repo['.'][lfile].flags()) |
|
|||
653 | lfdirstate = lfutil.openlfdirstate(ui, repo) |
|
|||
654 | for lfile in added: |
|
|||
655 | standin = lfutil.standin(lfile) |
|
|||
656 | if standin not in ctx and (standin in matches or opts.get('all')): |
|
|||
657 | if lfile in lfdirstate: |
|
|||
658 | lfdirstate.drop(lfile) |
|
|||
659 | util.unlinkpath(repo.wjoin(standin)) |
|
|||
660 | lfdirstate.write() |
|
|||
661 | finally: |
|
662 | finally: | |
662 | wlock.release() |
|
663 | wlock.release() | |
663 |
|
664 | |||
@@ -752,7 +753,7 b' def pulledrevsetsymbol(repo, subset, x):' | |||||
752 | firstpulled = repo.firstpulled |
|
753 | firstpulled = repo.firstpulled | |
753 | except AttributeError: |
|
754 | except AttributeError: | |
754 | raise util.Abort(_("pulled() only available in --lfrev")) |
|
755 | raise util.Abort(_("pulled() only available in --lfrev")) | |
755 | return [r for r in subset if r >= firstpulled] |
|
756 | return revset.baseset([r for r in subset if r >= firstpulled]) | |
756 |
|
757 | |||
757 | def overrideclone(orig, ui, source, dest=None, **opts): |
|
758 | def overrideclone(orig, ui, source, dest=None, **opts): | |
758 | d = dest |
|
759 | d = dest | |
@@ -760,8 +761,8 b' def overrideclone(orig, ui, source, dest' | |||||
760 | d = hg.defaultdest(source) |
|
761 | d = hg.defaultdest(source) | |
761 | if opts.get('all_largefiles') and not hg.islocal(d): |
|
762 | if opts.get('all_largefiles') and not hg.islocal(d): | |
762 | raise util.Abort(_( |
|
763 | raise util.Abort(_( | |
763 | '--all-largefiles is incompatible with non-local destination %s' % |
|
764 | '--all-largefiles is incompatible with non-local destination %s') % | |
764 |
d) |
|
765 | d) | |
765 |
|
766 | |||
766 | return orig(ui, source, dest, **opts) |
|
767 | return orig(ui, source, dest, **opts) | |
767 |
|
768 | |||
@@ -981,62 +982,42 b' def overrideforget(orig, ui, repo, *pats' | |||||
981 |
|
982 | |||
982 | return result |
|
983 | return result | |
983 |
|
984 | |||
984 |
def |
|
985 | def outgoinghook(ui, repo, other, opts, missing): | |
985 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
|||
986 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
|||
987 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
|||
988 | if revs: |
|
|||
989 | revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)] |
|
|||
990 |
|
||||
991 | try: |
|
|||
992 | remote = hg.peer(repo, opts, dest) |
|
|||
993 | except error.RepoError: |
|
|||
994 | return None |
|
|||
995 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False) |
|
|||
996 | if not outgoing.missing: |
|
|||
997 | return outgoing.missing |
|
|||
998 | o = repo.changelog.nodesbetween(outgoing.missing, revs)[0] |
|
|||
999 | if opts.get('newest_first'): |
|
|||
1000 | o.reverse() |
|
|||
1001 |
|
||||
1002 | toupload = set() |
|
|||
1003 | for n in o: |
|
|||
1004 | parents = [p for p in repo.changelog.parents(n) if p != node.nullid] |
|
|||
1005 | ctx = repo[n] |
|
|||
1006 | files = set(ctx.files()) |
|
|||
1007 | if len(parents) == 2: |
|
|||
1008 | mc = ctx.manifest() |
|
|||
1009 | mp1 = ctx.parents()[0].manifest() |
|
|||
1010 | mp2 = ctx.parents()[1].manifest() |
|
|||
1011 | for f in mp1: |
|
|||
1012 | if f not in mc: |
|
|||
1013 | files.add(f) |
|
|||
1014 | for f in mp2: |
|
|||
1015 | if f not in mc: |
|
|||
1016 | files.add(f) |
|
|||
1017 | for f in mc: |
|
|||
1018 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): |
|
|||
1019 | files.add(f) |
|
|||
1020 | toupload = toupload.union( |
|
|||
1021 | set([f for f in files if lfutil.isstandin(f) and f in ctx])) |
|
|||
1022 | return sorted(toupload) |
|
|||
1023 |
|
||||
1024 | def overrideoutgoing(orig, ui, repo, dest=None, **opts): |
|
|||
1025 | result = orig(ui, repo, dest, **opts) |
|
|||
1026 |
|
||||
1027 | if opts.pop('large', None): |
|
986 | if opts.pop('large', None): | |
1028 | toupload = getoutgoinglfiles(ui, repo, dest, **opts) |
|
987 | toupload = set() | |
1029 | if toupload is None: |
|
988 | lfutil.getlfilestoupload(repo, missing, | |
1030 | ui.status(_('largefiles: No remote repo\n')) |
|
989 | lambda fn, lfhash: toupload.add(fn)) | |
1031 |
|
|
990 | if not toupload: | |
1032 | ui.status(_('largefiles: no files to upload\n')) |
|
991 | ui.status(_('largefiles: no files to upload\n')) | |
1033 | else: |
|
992 | else: | |
1034 | ui.status(_('largefiles to upload:\n')) |
|
993 | ui.status(_('largefiles to upload:\n')) | |
1035 | for file in toupload: |
|
994 | for file in sorted(toupload): | |
1036 | ui.status(lfutil.splitstandin(file) + '\n') |
|
995 | ui.status(lfutil.splitstandin(file) + '\n') | |
1037 | ui.status('\n') |
|
996 | ui.status('\n') | |
1038 |
|
997 | |||
1039 | return result |
|
998 | def summaryremotehook(ui, repo, opts, changes): | |
|
999 | largeopt = opts.get('large', False) | |||
|
1000 | if changes is None: | |||
|
1001 | if largeopt: | |||
|
1002 | return (False, True) # only outgoing check is needed | |||
|
1003 | else: | |||
|
1004 | return (False, False) | |||
|
1005 | elif largeopt: | |||
|
1006 | url, branch, peer, outgoing = changes[1] | |||
|
1007 | if peer is None: | |||
|
1008 | # i18n: column positioning for "hg summary" | |||
|
1009 | ui.status(_('largefiles: (no remote repo)\n')) | |||
|
1010 | return | |||
|
1011 | ||||
|
1012 | toupload = set() | |||
|
1013 | lfutil.getlfilestoupload(repo, outgoing.missing, | |||
|
1014 | lambda fn, lfhash: toupload.add(fn)) | |||
|
1015 | if not toupload: | |||
|
1016 | # i18n: column positioning for "hg summary" | |||
|
1017 | ui.status(_('largefiles: (no files to upload)\n')) | |||
|
1018 | else: | |||
|
1019 | # i18n: column positioning for "hg summary" | |||
|
1020 | ui.status(_('largefiles: %d to upload\n') % len(toupload)) | |||
1040 |
|
1021 | |||
1041 | def overridesummary(orig, ui, repo, *pats, **opts): |
|
1022 | def overridesummary(orig, ui, repo, *pats, **opts): | |
1042 | try: |
|
1023 | try: | |
@@ -1045,18 +1026,6 b' def overridesummary(orig, ui, repo, *pat' | |||||
1045 | finally: |
|
1026 | finally: | |
1046 | repo.lfstatus = False |
|
1027 | repo.lfstatus = False | |
1047 |
|
1028 | |||
1048 | if opts.pop('large', None): |
|
|||
1049 | toupload = getoutgoinglfiles(ui, repo, None, **opts) |
|
|||
1050 | if toupload is None: |
|
|||
1051 | # i18n: column positioning for "hg summary" |
|
|||
1052 | ui.status(_('largefiles: (no remote repo)\n')) |
|
|||
1053 | elif not toupload: |
|
|||
1054 | # i18n: column positioning for "hg summary" |
|
|||
1055 | ui.status(_('largefiles: (no files to upload)\n')) |
|
|||
1056 | else: |
|
|||
1057 | # i18n: column positioning for "hg summary" |
|
|||
1058 | ui.status(_('largefiles: %d to upload\n') % len(toupload)) |
|
|||
1059 |
|
||||
1060 | def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None, |
|
1029 | def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None, | |
1061 | similarity=None): |
|
1030 | similarity=None): | |
1062 | if not lfutil.islfilesrepo(repo): |
|
1031 | if not lfutil.islfilesrepo(repo): | |
@@ -1146,22 +1115,24 b' def overridecat(orig, ui, repo, file1, *' | |||||
1146 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1115 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
1147 | origmatchfn = m.matchfn |
|
1116 | origmatchfn = m.matchfn | |
1148 | def lfmatchfn(f): |
|
1117 | def lfmatchfn(f): | |
|
1118 | if origmatchfn(f): | |||
|
1119 | return True | |||
1149 | lf = lfutil.splitstandin(f) |
|
1120 | lf = lfutil.splitstandin(f) | |
1150 | if lf is None: |
|
1121 | if lf is None: | |
1151 |
return |
|
1122 | return False | |
1152 | notbad.add(lf) |
|
1123 | notbad.add(lf) | |
1153 | return origmatchfn(lf) |
|
1124 | return origmatchfn(lf) | |
1154 | m.matchfn = lfmatchfn |
|
1125 | m.matchfn = lfmatchfn | |
1155 | origbadfn = m.bad |
|
1126 | origbadfn = m.bad | |
1156 | def lfbadfn(f, msg): |
|
1127 | def lfbadfn(f, msg): | |
1157 | if not f in notbad: |
|
1128 | if not f in notbad: | |
1158 |
|
|
1129 | origbadfn(f, msg) | |
1159 | m.bad = lfbadfn |
|
1130 | m.bad = lfbadfn | |
1160 | for f in ctx.walk(m): |
|
1131 | for f in ctx.walk(m): | |
1161 | fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), |
|
1132 | fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), | |
1162 | pathname=f) |
|
1133 | pathname=f) | |
1163 | lf = lfutil.splitstandin(f) |
|
1134 | lf = lfutil.splitstandin(f) | |
1164 | if lf is None: |
|
1135 | if lf is None or origmatchfn(f): | |
1165 | # duplicating unreachable code from commands.cat |
|
1136 | # duplicating unreachable code from commands.cat | |
1166 | data = ctx[f].data() |
|
1137 | data = ctx[f].data() | |
1167 | if opts.get('decode'): |
|
1138 | if opts.get('decode'): |
@@ -8,7 +8,6 b' import urllib2' | |||||
8 | import re |
|
8 | import re | |
9 |
|
9 | |||
10 | from mercurial import error, httppeer, util, wireproto |
|
10 | from mercurial import error, httppeer, util, wireproto | |
11 | from mercurial.wireproto import batchable, future |
|
|||
12 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
13 |
|
12 | |||
14 | import lfutil |
|
13 | import lfutil | |
@@ -135,9 +134,9 b' def wirereposetup(ui, repo):' | |||||
135 | self._abort(error.ResponseError(_("unexpected response:"), |
|
134 | self._abort(error.ResponseError(_("unexpected response:"), | |
136 | chunk)) |
|
135 | chunk)) | |
137 |
|
136 | |||
138 | @batchable |
|
137 | @wireproto.batchable | |
139 | def statlfile(self, sha): |
|
138 | def statlfile(self, sha): | |
140 | f = future() |
|
139 | f = wireproto.future() | |
141 | result = {'sha': sha} |
|
140 | result = {'sha': sha} | |
142 | yield result, f |
|
141 | yield result, f | |
143 | try: |
|
142 | try: |
@@ -8,9 +8,8 b'' | |||||
8 |
|
8 | |||
9 | import urllib2 |
|
9 | import urllib2 | |
10 |
|
10 | |||
11 | from mercurial import util |
|
11 | from mercurial import util, wireproto | |
12 | from mercurial.i18n import _ |
|
12 | from mercurial.i18n import _ | |
13 | from mercurial.wireproto import remotebatch |
|
|||
14 |
|
13 | |||
15 | import lfutil |
|
14 | import lfutil | |
16 | import basestore |
|
15 | import basestore | |
@@ -30,7 +29,8 b' class remotestore(basestore.basestore):' | |||||
30 | % (source, util.hidepassword(self.url))) |
|
29 | % (source, util.hidepassword(self.url))) | |
31 |
|
30 | |||
32 | def exists(self, hashes): |
|
31 | def exists(self, hashes): | |
33 |
return dict((h, s == 0) for (h, s) in |
|
32 | return dict((h, s == 0) for (h, s) in # dict-from-generator | |
|
33 | self._stat(hashes).iteritems()) | |||
34 |
|
34 | |||
35 | def sendfile(self, filename, hash): |
|
35 | def sendfile(self, filename, hash): | |
36 | self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash)) |
|
36 | self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash)) | |
@@ -96,5 +96,4 b' class remotestore(basestore.basestore):' | |||||
96 |
|
96 | |||
97 | def batch(self): |
|
97 | def batch(self): | |
98 | '''Support for remote batching.''' |
|
98 | '''Support for remote batching.''' | |
99 | return remotebatch(self) |
|
99 | return wireproto.remotebatch(self) | |
100 |
|
@@ -10,8 +10,7 b'' | |||||
10 | import copy |
|
10 | import copy | |
11 | import os |
|
11 | import os | |
12 |
|
12 | |||
13 |
from mercurial import error, manifest, match as match_, util |
|
13 | from mercurial import error, manifest, match as match_, util | |
14 | from mercurial import node as node_ |
|
|||
15 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
16 | from mercurial import localrepo |
|
15 | from mercurial import localrepo | |
17 |
|
16 | |||
@@ -413,37 +412,6 b' def reposetup(ui, repo):' | |||||
413 | " supported in the destination:" |
|
412 | " supported in the destination:" | |
414 | " %s") % (', '.join(sorted(missing))) |
|
413 | " %s") % (', '.join(sorted(missing))) | |
415 | raise util.Abort(msg) |
|
414 | raise util.Abort(msg) | |
416 |
|
||||
417 | outgoing = discovery.findcommonoutgoing(repo, remote.peer(), |
|
|||
418 | force=force) |
|
|||
419 | if outgoing.missing: |
|
|||
420 | toupload = set() |
|
|||
421 | o = self.changelog.nodesbetween(outgoing.missing, revs)[0] |
|
|||
422 | for n in o: |
|
|||
423 | parents = [p for p in self.changelog.parents(n) |
|
|||
424 | if p != node_.nullid] |
|
|||
425 | ctx = self[n] |
|
|||
426 | files = set(ctx.files()) |
|
|||
427 | if len(parents) == 2: |
|
|||
428 | mc = ctx.manifest() |
|
|||
429 | mp1 = ctx.parents()[0].manifest() |
|
|||
430 | mp2 = ctx.parents()[1].manifest() |
|
|||
431 | for f in mp1: |
|
|||
432 | if f not in mc: |
|
|||
433 | files.add(f) |
|
|||
434 | for f in mp2: |
|
|||
435 | if f not in mc: |
|
|||
436 | files.add(f) |
|
|||
437 | for f in mc: |
|
|||
438 | if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, |
|
|||
439 | None): |
|
|||
440 | files.add(f) |
|
|||
441 |
|
||||
442 | toupload = toupload.union( |
|
|||
443 | set([ctx[f].data().strip() |
|
|||
444 | for f in files |
|
|||
445 | if lfutil.isstandin(f) and f in ctx])) |
|
|||
446 | lfcommands.uploadlfiles(ui, self, remote, toupload) |
|
|||
447 | return super(lfilesrepo, self).push(remote, force=force, revs=revs, |
|
415 | return super(lfilesrepo, self).push(remote, force=force, revs=revs, | |
448 | newbranch=newbranch) |
|
416 | newbranch=newbranch) | |
449 |
|
417 | |||
@@ -503,11 +471,20 b' def reposetup(ui, repo):' | |||||
503 |
|
471 | |||
504 | repo.__class__ = lfilesrepo |
|
472 | repo.__class__ = lfilesrepo | |
505 |
|
473 | |||
|
474 | def prepushoutgoinghook(local, remote, outgoing): | |||
|
475 | if outgoing.missing: | |||
|
476 | toupload = set() | |||
|
477 | addfunc = lambda fn, lfhash: toupload.add(lfhash) | |||
|
478 | lfutil.getlfilestoupload(local, outgoing.missing, addfunc) | |||
|
479 | lfcommands.uploadlfiles(ui, local, remote, toupload) | |||
|
480 | repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook) | |||
|
481 | ||||
506 | def checkrequireslfiles(ui, repo, **kwargs): |
|
482 | def checkrequireslfiles(ui, repo, **kwargs): | |
507 | if 'largefiles' not in repo.requirements and util.any( |
|
483 | if 'largefiles' not in repo.requirements and util.any( | |
508 | lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): |
|
484 | lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): | |
509 | repo.requirements.add('largefiles') |
|
485 | repo.requirements.add('largefiles') | |
510 | repo._writerequirements() |
|
486 | repo._writerequirements() | |
511 |
|
487 | |||
512 |
ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles |
|
488 | ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles, | |
513 | ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles) |
|
489 | 'largefiles') | |
|
490 | ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles') |
@@ -9,10 +9,9 b'' | |||||
9 | '''setup for largefiles extension: uisetup''' |
|
9 | '''setup for largefiles extension: uisetup''' | |
10 |
|
10 | |||
11 | from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \ |
|
11 | from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \ | |
12 | httppeer, merge, scmutil, sshpeer, wireproto, revset |
|
12 | httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo | |
13 | from mercurial.i18n import _ |
|
13 | from mercurial.i18n import _ | |
14 | from mercurial.hgweb import hgweb_mod, webcommands |
|
14 | from mercurial.hgweb import hgweb_mod, webcommands | |
15 | from mercurial.subrepo import hgsubrepo |
|
|||
16 |
|
15 | |||
17 | import overrides |
|
16 | import overrides | |
18 | import proto |
|
17 | import proto | |
@@ -42,7 +41,7 b' def uisetup(ui):' | |||||
42 | # Subrepos call status function |
|
41 | # Subrepos call status function | |
43 | entry = extensions.wrapcommand(commands.table, 'status', |
|
42 | entry = extensions.wrapcommand(commands.table, 'status', | |
44 | overrides.overridestatus) |
|
43 | overrides.overridestatus) | |
45 | entry = extensions.wrapfunction(hgsubrepo, 'status', |
|
44 | entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status', | |
46 | overrides.overridestatusfn) |
|
45 | overrides.overridestatusfn) | |
47 |
|
46 | |||
48 | entry = extensions.wrapcommand(commands.table, 'log', |
|
47 | entry = extensions.wrapcommand(commands.table, 'log', | |
@@ -65,14 +64,16 b' def uisetup(ui):' | |||||
65 | debugstateopt = [('', 'large', None, _('display largefiles dirstate'))] |
|
64 | debugstateopt = [('', 'large', None, _('display largefiles dirstate'))] | |
66 | entry[1].extend(debugstateopt) |
|
65 | entry[1].extend(debugstateopt) | |
67 |
|
66 | |||
68 | entry = extensions.wrapcommand(commands.table, 'outgoing', |
|
67 | outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs) | |
69 | overrides.overrideoutgoing) |
|
68 | entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing) | |
70 | outgoingopt = [('', 'large', None, _('display outgoing largefiles'))] |
|
69 | outgoingopt = [('', 'large', None, _('display outgoing largefiles'))] | |
71 | entry[1].extend(outgoingopt) |
|
70 | entry[1].extend(outgoingopt) | |
|
71 | cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook) | |||
72 | entry = extensions.wrapcommand(commands.table, 'summary', |
|
72 | entry = extensions.wrapcommand(commands.table, 'summary', | |
73 | overrides.overridesummary) |
|
73 | overrides.overridesummary) | |
74 | summaryopt = [('', 'large', None, _('display outgoing largefiles'))] |
|
74 | summaryopt = [('', 'large', None, _('display outgoing largefiles'))] | |
75 | entry[1].extend(summaryopt) |
|
75 | entry[1].extend(summaryopt) | |
|
76 | cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook) | |||
76 |
|
77 | |||
77 | entry = extensions.wrapcommand(commands.table, 'update', |
|
78 | entry = extensions.wrapcommand(commands.table, 'update', | |
78 | overrides.overrideupdate) |
|
79 | overrides.overrideupdate) | |
@@ -96,15 +97,15 b' def uisetup(ui):' | |||||
96 | overrides.overridecat) |
|
97 | overrides.overridecat) | |
97 | entry = extensions.wrapfunction(merge, '_checkunknownfile', |
|
98 | entry = extensions.wrapfunction(merge, '_checkunknownfile', | |
98 | overrides.overridecheckunknownfile) |
|
99 | overrides.overridecheckunknownfile) | |
99 |
entry = extensions.wrapfunction(merge, ' |
|
100 | entry = extensions.wrapfunction(merge, 'calculateupdates', | |
100 |
overrides.override |
|
101 | overrides.overridecalculateupdates) | |
101 | entry = extensions.wrapfunction(filemerge, 'filemerge', |
|
102 | entry = extensions.wrapfunction(filemerge, 'filemerge', | |
102 | overrides.overridefilemerge) |
|
103 | overrides.overridefilemerge) | |
103 | entry = extensions.wrapfunction(cmdutil, 'copy', |
|
104 | entry = extensions.wrapfunction(cmdutil, 'copy', | |
104 | overrides.overridecopy) |
|
105 | overrides.overridecopy) | |
105 |
|
106 | |||
106 | # Summary calls dirty on the subrepos |
|
107 | # Summary calls dirty on the subrepos | |
107 | entry = extensions.wrapfunction(hgsubrepo, 'dirty', |
|
108 | entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty', | |
108 | overrides.overridedirty) |
|
109 | overrides.overridedirty) | |
109 |
|
110 | |||
110 | # Backout calls revert so we need to override both the command and the |
|
111 | # Backout calls revert so we need to override both the command and the | |
@@ -118,7 +119,8 b' def uisetup(ui):' | |||||
118 | extensions.wrapfunction(hg, 'merge', overrides.hgmerge) |
|
119 | extensions.wrapfunction(hg, 'merge', overrides.hgmerge) | |
119 |
|
120 | |||
120 | extensions.wrapfunction(archival, 'archive', overrides.overridearchive) |
|
121 | extensions.wrapfunction(archival, 'archive', overrides.overridearchive) | |
121 |
extensions.wrapfunction(hgsubrepo, 'archive', |
|
122 | extensions.wrapfunction(subrepo.hgsubrepo, 'archive', | |
|
123 | overrides.hgsubrepoarchive) | |||
122 | extensions.wrapfunction(cmdutil, 'bailifchanged', |
|
124 | extensions.wrapfunction(cmdutil, 'bailifchanged', | |
123 | overrides.overridebailifchanged) |
|
125 | overrides.overridebailifchanged) | |
124 |
|
126 |
@@ -304,7 +304,7 b' def newcommit(repo, phase, *args, **kwar' | |||||
304 | backup = repo.ui.backupconfig('phases', 'new-commit') |
|
304 | backup = repo.ui.backupconfig('phases', 'new-commit') | |
305 | try: |
|
305 | try: | |
306 | if phase is not None: |
|
306 | if phase is not None: | |
307 | repo.ui.setconfig('phases', 'new-commit', phase) |
|
307 | repo.ui.setconfig('phases', 'new-commit', phase, 'mq') | |
308 | return repo.commit(*args, **kwargs) |
|
308 | return repo.commit(*args, **kwargs) | |
309 | finally: |
|
309 | finally: | |
310 | if phase is not None: |
|
310 | if phase is not None: | |
@@ -826,10 +826,10 b' class queue(object):' | |||||
826 | repo.setparents(p1, merge) |
|
826 | repo.setparents(p1, merge) | |
827 |
|
827 | |||
828 | if all_files and '.hgsubstate' in all_files: |
|
828 | if all_files and '.hgsubstate' in all_files: | |
829 |
wctx = repo[ |
|
829 | wctx = repo[None] | |
830 |
|
|
830 | pctx = repo['.'] | |
831 | overwrite = False |
|
831 | overwrite = False | |
832 |
mergedsubstate = subrepo.submerge(repo, wctx, |
|
832 | mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx, | |
833 | overwrite) |
|
833 | overwrite) | |
834 | files += mergedsubstate.keys() |
|
834 | files += mergedsubstate.keys() | |
835 |
|
835 | |||
@@ -1035,11 +1035,8 b' class queue(object):' | |||||
1035 | self.checkpatchname(patchfn) |
|
1035 | self.checkpatchname(patchfn) | |
1036 | inclsubs = checksubstate(repo) |
|
1036 | inclsubs = checksubstate(repo) | |
1037 | if inclsubs: |
|
1037 | if inclsubs: | |
1038 | inclsubs.append('.hgsubstate') |
|
|||
1039 | substatestate = repo.dirstate['.hgsubstate'] |
|
1038 | substatestate = repo.dirstate['.hgsubstate'] | |
1040 | if opts.get('include') or opts.get('exclude') or pats: |
|
1039 | if opts.get('include') or opts.get('exclude') or pats: | |
1041 | if inclsubs: |
|
|||
1042 | pats = list(pats or []) + inclsubs |
|
|||
1043 | match = scmutil.match(repo[None], pats, opts) |
|
1040 | match = scmutil.match(repo[None], pats, opts) | |
1044 | # detect missing files in pats |
|
1041 | # detect missing files in pats | |
1045 | def badfn(f, msg): |
|
1042 | def badfn(f, msg): | |
@@ -1047,14 +1044,14 b' class queue(object):' | |||||
1047 | raise util.Abort('%s: %s' % (f, msg)) |
|
1044 | raise util.Abort('%s: %s' % (f, msg)) | |
1048 | match.bad = badfn |
|
1045 | match.bad = badfn | |
1049 | changes = repo.status(match=match) |
|
1046 | changes = repo.status(match=match) | |
1050 | m, a, r, d = changes[:4] |
|
|||
1051 | else: |
|
1047 | else: | |
1052 | changes = self.checklocalchanges(repo, force=True) |
|
1048 | changes = self.checklocalchanges(repo, force=True) | |
1053 | m, a, r, d = changes |
|
1049 | commitfiles = list(inclsubs) | |
1054 | match = scmutil.matchfiles(repo, m + a + r + inclsubs) |
|
1050 | for files in changes[:3]: | |
|
1051 | commitfiles.extend(files) | |||
|
1052 | match = scmutil.matchfiles(repo, commitfiles) | |||
1055 | if len(repo[None].parents()) > 1: |
|
1053 | if len(repo[None].parents()) > 1: | |
1056 | raise util.Abort(_('cannot manage merge changesets')) |
|
1054 | raise util.Abort(_('cannot manage merge changesets')) | |
1057 | commitfiles = m + a + r |
|
|||
1058 | self.checktoppatch(repo) |
|
1055 | self.checktoppatch(repo) | |
1059 | insert = self.fullseriesend() |
|
1056 | insert = self.fullseriesend() | |
1060 | wlock = repo.wlock() |
|
1057 | wlock = repo.wlock() | |
@@ -1494,7 +1491,6 b' class queue(object):' | |||||
1494 |
|
1491 | |||
1495 | inclsubs = checksubstate(repo, hex(patchparent)) |
|
1492 | inclsubs = checksubstate(repo, hex(patchparent)) | |
1496 | if inclsubs: |
|
1493 | if inclsubs: | |
1497 | inclsubs.append('.hgsubstate') |
|
|||
1498 | substatestate = repo.dirstate['.hgsubstate'] |
|
1494 | substatestate = repo.dirstate['.hgsubstate'] | |
1499 |
|
1495 | |||
1500 | ph = patchheader(self.join(patchfn), self.plainmode) |
|
1496 | ph = patchheader(self.join(patchfn), self.plainmode) | |
@@ -1987,9 +1983,11 b' class queue(object):' | |||||
1987 | raise util.Abort(_('-e is incompatible with import from -')) |
|
1983 | raise util.Abort(_('-e is incompatible with import from -')) | |
1988 | filename = normname(filename) |
|
1984 | filename = normname(filename) | |
1989 | self.checkreservedname(filename) |
|
1985 | self.checkreservedname(filename) | |
1990 | originpath = self.join(filename) |
|
1986 | if util.url(filename).islocal(): | |
1991 | if not os.path.isfile(originpath): |
|
1987 | originpath = self.join(filename) | |
1992 | raise util.Abort(_("patch %s does not exist") % filename) |
|
1988 | if not os.path.isfile(originpath): | |
|
1989 | raise util.Abort( | |||
|
1990 | _("patch %s does not exist") % filename) | |||
1993 |
|
1991 | |||
1994 | if patchname: |
|
1992 | if patchname: | |
1995 | self.checkpatchname(patchname, force) |
|
1993 | self.checkpatchname(patchname, force) | |
@@ -3269,6 +3267,12 b' def reposetup(ui, repo):' | |||||
3269 | def mq(self): |
|
3267 | def mq(self): | |
3270 | return queue(self.ui, self.baseui, self.path) |
|
3268 | return queue(self.ui, self.baseui, self.path) | |
3271 |
|
3269 | |||
|
3270 | def invalidateall(self): | |||
|
3271 | super(mqrepo, self).invalidateall() | |||
|
3272 | if localrepo.hasunfilteredcache(self, 'mq'): | |||
|
3273 | # recreate mq in case queue path was changed | |||
|
3274 | delattr(self.unfiltered(), 'mq') | |||
|
3275 | ||||
3272 | def abortifwdirpatched(self, errmsg, force=False): |
|
3276 | def abortifwdirpatched(self, errmsg, force=False): | |
3273 | if self.mq.applied and self.mq.checkapplied and not force: |
|
3277 | if self.mq.applied and self.mq.checkapplied and not force: | |
3274 | parents = self.dirstate.parents() |
|
3278 | parents = self.dirstate.parents() | |
@@ -3285,14 +3289,14 b' def reposetup(ui, repo):' | |||||
3285 | return super(mqrepo, self).commit(text, user, date, match, force, |
|
3289 | return super(mqrepo, self).commit(text, user, date, match, force, | |
3286 | editor, extra) |
|
3290 | editor, extra) | |
3287 |
|
3291 | |||
3288 |
def checkpush(self, |
|
3292 | def checkpush(self, pushop): | |
3289 | if self.mq.applied and self.mq.checkapplied and not force: |
|
3293 | if self.mq.applied and self.mq.checkapplied and not pushop.force: | |
3290 | outapplied = [e.node for e in self.mq.applied] |
|
3294 | outapplied = [e.node for e in self.mq.applied] | |
3291 | if revs: |
|
3295 | if pushop.revs: | |
3292 | # Assume applied patches have no non-patch descendants and |
|
3296 | # Assume applied patches have no non-patch descendants and | |
3293 | # are not on remote already. Filtering any changeset not |
|
3297 | # are not on remote already. Filtering any changeset not | |
3294 | # pushed. |
|
3298 | # pushed. | |
3295 | heads = set(revs) |
|
3299 | heads = set(pushop.revs) | |
3296 | for node in reversed(outapplied): |
|
3300 | for node in reversed(outapplied): | |
3297 | if node in heads: |
|
3301 | if node in heads: | |
3298 | break |
|
3302 | break | |
@@ -3303,7 +3307,7 b' def reposetup(ui, repo):' | |||||
3303 | if self[node].phase() < phases.secret: |
|
3307 | if self[node].phase() < phases.secret: | |
3304 | raise util.Abort(_('source has mq patches applied')) |
|
3308 | raise util.Abort(_('source has mq patches applied')) | |
3305 | # no non-secret patches pushed |
|
3309 | # no non-secret patches pushed | |
3306 |
super(mqrepo, self).checkpush( |
|
3310 | super(mqrepo, self).checkpush(pushop) | |
3307 |
|
3311 | |||
3308 | def _findtags(self): |
|
3312 | def _findtags(self): | |
3309 | '''augment tags from base class with patch tags''' |
|
3313 | '''augment tags from base class with patch tags''' | |
@@ -3409,7 +3413,7 b' def revsetmq(repo, subset, x):' | |||||
3409 | """ |
|
3413 | """ | |
3410 | revset.getargs(x, 0, 0, _("mq takes no arguments")) |
|
3414 | revset.getargs(x, 0, 0, _("mq takes no arguments")) | |
3411 | applied = set([repo[r.node].rev() for r in repo.mq.applied]) |
|
3415 | applied = set([repo[r.node].rev() for r in repo.mq.applied]) | |
3412 | return [r for r in subset if r in applied] |
|
3416 | return revset.baseset([r for r in subset if r in applied]) | |
3413 |
|
3417 | |||
3414 | # tell hggettext to extract docstrings from these functions: |
|
3418 | # tell hggettext to extract docstrings from these functions: | |
3415 | i18nfunctions = [revsetmq] |
|
3419 | i18nfunctions = [revsetmq] |
@@ -188,13 +188,12 b' class notifier(object):' | |||||
188 | mapfile = self.ui.config('notify', 'style') |
|
188 | mapfile = self.ui.config('notify', 'style') | |
189 | template = (self.ui.config('notify', hooktype) or |
|
189 | template = (self.ui.config('notify', hooktype) or | |
190 | self.ui.config('notify', 'template')) |
|
190 | self.ui.config('notify', 'template')) | |
191 | self.t = cmdutil.changeset_templater(self.ui, self.repo, |
|
|||
192 | False, None, mapfile, False) |
|
|||
193 | if not mapfile and not template: |
|
191 | if not mapfile and not template: | |
194 | template = deftemplates.get(hooktype) or single_template |
|
192 | template = deftemplates.get(hooktype) or single_template | |
195 | if template: |
|
193 | if template: | |
196 | template = templater.parsestring(template, quoted=False) |
|
194 | template = templater.parsestring(template, quoted=False) | |
197 | self.t.use_template(template) |
|
195 | self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None, | |
|
196 | template, mapfile, False) | |||
198 |
|
197 | |||
199 | def strip(self, path): |
|
198 | def strip(self, path): | |
200 | '''strip leading slashes from local path, turn into web-safe path.''' |
|
199 | '''strip leading slashes from local path, turn into web-safe path.''' |
@@ -129,8 +129,8 b' def uisetup(ui):' | |||||
129 | if (always or auto and |
|
129 | if (always or auto and | |
130 | (cmd in attend or |
|
130 | (cmd in attend or | |
131 | (cmd not in ignore and not attend))): |
|
131 | (cmd not in ignore and not attend))): | |
132 | ui.setconfig('ui', 'formatted', ui.formatted()) |
|
132 | ui.setconfig('ui', 'formatted', ui.formatted(), 'pager') | |
133 | ui.setconfig('ui', 'interactive', False) |
|
133 | ui.setconfig('ui', 'interactive', False, 'pager') | |
134 | if util.safehasattr(signal, "SIGPIPE"): |
|
134 | if util.safehasattr(signal, "SIGPIPE"): | |
135 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) |
|
135 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) | |
136 | _runpager(ui, p) |
|
136 | _runpager(ui, p) |
@@ -291,7 +291,11 b' def patchbomb(ui, repo, *revs, **opts):' | |||||
291 | return [str(r) for r in revs] |
|
291 | return [str(r) for r in revs] | |
292 |
|
292 | |||
293 | def getpatches(revs): |
|
293 | def getpatches(revs): | |
|
294 | prev = repo['.'].rev() | |||
294 | for r in scmutil.revrange(repo, revs): |
|
295 | for r in scmutil.revrange(repo, revs): | |
|
296 | if r == prev and (repo[None].files() or repo[None].deleted()): | |||
|
297 | ui.warn(_('warning: working directory has ' | |||
|
298 | 'uncommitted changes\n')) | |||
295 | output = cStringIO.StringIO() |
|
299 | output = cStringIO.StringIO() | |
296 | cmdutil.export(repo, [r], fp=output, |
|
300 | cmdutil.export(repo, [r], fp=output, | |
297 | opts=patch.diffopts(ui, opts)) |
|
301 | opts=patch.diffopts(ui, opts)) | |
@@ -546,11 +550,11 b' def patchbomb(ui, repo, *revs, **opts):' | |||||
546 | if not sendmail: |
|
550 | if not sendmail: | |
547 | verifycert = ui.config('smtp', 'verifycert') |
|
551 | verifycert = ui.config('smtp', 'verifycert') | |
548 | if opts.get('insecure'): |
|
552 | if opts.get('insecure'): | |
549 | ui.setconfig('smtp', 'verifycert', 'loose') |
|
553 | ui.setconfig('smtp', 'verifycert', 'loose', 'patchbomb') | |
550 | try: |
|
554 | try: | |
551 | sendmail = mail.connect(ui, mbox=mbox) |
|
555 | sendmail = mail.connect(ui, mbox=mbox) | |
552 | finally: |
|
556 | finally: | |
553 | ui.setconfig('smtp', 'verifycert', verifycert) |
|
557 | ui.setconfig('smtp', 'verifycert', verifycert, 'patchbomb') | |
554 | ui.status(_('sending '), subj, ' ...\n') |
|
558 | ui.status(_('sending '), subj, ' ...\n') | |
555 | ui.progress(_('sending'), i, item=subj, total=len(msgs)) |
|
559 | ui.progress(_('sending'), i, item=subj, total=len(msgs)) | |
556 | if not mbox: |
|
560 | if not mbox: |
@@ -289,6 +289,9 b' def rebase(ui, repo, **opts):' | |||||
289 | inclusive=True) |
|
289 | inclusive=True) | |
290 | external = externalparent(repo, state, targetancestors) |
|
290 | external = externalparent(repo, state, targetancestors) | |
291 |
|
291 | |||
|
292 | if dest.closesbranch() and not keepbranchesf: | |||
|
293 | ui.status(_('reopening closed branch head %s\n') % dest) | |||
|
294 | ||||
292 | if keepbranchesf: |
|
295 | if keepbranchesf: | |
293 | # insert _savebranch at the start of extrafns so if |
|
296 | # insert _savebranch at the start of extrafns so if | |
294 | # there's a user-provided extrafn it can clobber branch if |
|
297 | # there's a user-provided extrafn it can clobber branch if | |
@@ -330,14 +333,15 b' def rebase(ui, repo, **opts):' | |||||
330 | repo.ui.debug('resuming interrupted rebase\n') |
|
333 | repo.ui.debug('resuming interrupted rebase\n') | |
331 | else: |
|
334 | else: | |
332 | try: |
|
335 | try: | |
333 |
ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
336 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
337 | 'rebase') | |||
334 | stats = rebasenode(repo, rev, p1, state, collapsef) |
|
338 | stats = rebasenode(repo, rev, p1, state, collapsef) | |
335 | if stats and stats[3] > 0: |
|
339 | if stats and stats[3] > 0: | |
336 | raise error.InterventionRequired( |
|
340 | raise error.InterventionRequired( | |
337 | _('unresolved conflicts (see hg ' |
|
341 | _('unresolved conflicts (see hg ' | |
338 | 'resolve, then hg rebase --continue)')) |
|
342 | 'resolve, then hg rebase --continue)')) | |
339 | finally: |
|
343 | finally: | |
340 | ui.setconfig('ui', 'forcemerge', '') |
|
344 | ui.setconfig('ui', 'forcemerge', '', 'rebase') | |
341 | cmdutil.duplicatecopies(repo, rev, target) |
|
345 | cmdutil.duplicatecopies(repo, rev, target) | |
342 | if not collapsef: |
|
346 | if not collapsef: | |
343 | newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, |
|
347 | newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn, | |
@@ -516,6 +520,12 b' def rebasenode(repo, rev, p1, state, col' | |||||
516 | if state.get(p.rev()) == repo[p1].rev(): |
|
520 | if state.get(p.rev()) == repo[p1].rev(): | |
517 | base = p.node() |
|
521 | base = p.node() | |
518 | break |
|
522 | break | |
|
523 | else: # fallback when base not found | |||
|
524 | base = None | |||
|
525 | ||||
|
526 | # Raise because this function is called wrong (see issue 4106) | |||
|
527 | raise AssertionError('no base found to rebase on ' | |||
|
528 | '(rebasenode called wrong)') | |||
519 | if base is not None: |
|
529 | if base is not None: | |
520 | repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base])) |
|
530 | repo.ui.debug(" detach base %d:%s\n" % (repo[base].rev(), repo[base])) | |
521 | # When collapsing in-place, the parent is the common ancestor, we |
|
531 | # When collapsing in-place, the parent is the common ancestor, we | |
@@ -703,7 +713,8 b' def restorestatus(repo):' | |||||
703 | if new != nullrev and new in seen: |
|
713 | if new != nullrev and new in seen: | |
704 | skipped.add(old) |
|
714 | skipped.add(old) | |
705 | seen.add(new) |
|
715 | seen.add(new) | |
706 |
repo.ui.debug('computed skipped revs: %s\n' % |
|
716 | repo.ui.debug('computed skipped revs: %s\n' % | |
|
717 | (' '.join(str(r) for r in sorted(skipped)) or None)) | |||
707 | repo.ui.debug('rebase status resumed\n') |
|
718 | repo.ui.debug('rebase status resumed\n') | |
708 | return (originalwd, target, state, skipped, |
|
719 | return (originalwd, target, state, skipped, | |
709 | collapse, keep, keepbranches, external, activebookmark) |
|
720 | collapse, keep, keepbranches, external, activebookmark) | |
@@ -790,7 +801,7 b' def buildstate(repo, dest, rebaseset, co' | |||||
790 | repo.ui.debug('source is a child of destination\n') |
|
801 | repo.ui.debug('source is a child of destination\n') | |
791 | return None |
|
802 | return None | |
792 |
|
803 | |||
793 |
repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root |
|
804 | repo.ui.debug('rebase onto %d starting from %s\n' % (dest, root)) | |
794 | state.update(dict.fromkeys(rebaseset, nullrev)) |
|
805 | state.update(dict.fromkeys(rebaseset, nullrev)) | |
795 | # Rebase tries to turn <dest> into a parent of <root> while |
|
806 | # Rebase tries to turn <dest> into a parent of <root> while | |
796 | # preserving the number of parents of rebased changesets: |
|
807 | # preserving the number of parents of rebased changesets: |
@@ -22,10 +22,10 b' shelve".' | |||||
22 | """ |
|
22 | """ | |
23 |
|
23 | |||
24 | from mercurial.i18n import _ |
|
24 | from mercurial.i18n import _ | |
25 | from mercurial.node import nullid, bin, hex |
|
25 | from mercurial.node import nullid, nullrev, bin, hex | |
26 | from mercurial import changegroup, cmdutil, scmutil, phases |
|
26 | from mercurial import changegroup, cmdutil, scmutil, phases, commands | |
27 | from mercurial import error, hg, mdiff, merge, patch, repair, util |
|
27 | from mercurial import error, hg, mdiff, merge, patch, repair, util | |
28 | from mercurial import templatefilters |
|
28 | from mercurial import templatefilters, changegroup, exchange | |
29 | from mercurial import lock as lockmod |
|
29 | from mercurial import lock as lockmod | |
30 | from hgext import rebase |
|
30 | from hgext import rebase | |
31 | import errno |
|
31 | import errno | |
@@ -68,6 +68,18 b' class shelvedfile(object):' | |||||
68 | raise |
|
68 | raise | |
69 | raise util.Abort(_("shelved change '%s' not found") % self.name) |
|
69 | raise util.Abort(_("shelved change '%s' not found") % self.name) | |
70 |
|
70 | |||
|
71 | def applybundle(self): | |||
|
72 | fp = self.opener() | |||
|
73 | try: | |||
|
74 | gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs) | |||
|
75 | changegroup.addchangegroup(self.repo, gen, 'unshelve', | |||
|
76 | 'bundle:' + self.vfs.join(self.fname)) | |||
|
77 | finally: | |||
|
78 | fp.close() | |||
|
79 | ||||
|
80 | def writebundle(self, cg): | |||
|
81 | changegroup.writebundle(cg, self.fname, 'HG10UN', self.vfs) | |||
|
82 | ||||
71 | class shelvedstate(object): |
|
83 | class shelvedstate(object): | |
72 | """Handle persistence during unshelving operations. |
|
84 | """Handle persistence during unshelving operations. | |
73 |
|
85 | |||
@@ -122,22 +134,21 b' def createcmd(ui, repo, pats, opts):' | |||||
122 | """subcommand that creates a new shelve""" |
|
134 | """subcommand that creates a new shelve""" | |
123 |
|
135 | |||
124 | def publicancestors(ctx): |
|
136 | def publicancestors(ctx): | |
125 |
"""Compute the |
|
137 | """Compute the public ancestors of a commit. | |
126 |
|
138 | |||
127 |
Much faster than the revset |
|
139 | Much faster than the revset ancestors(ctx) & draft()""" | |
128 | seen = set() |
|
140 | seen = set([nullrev]) | |
129 | visit = util.deque() |
|
141 | visit = util.deque() | |
130 | visit.append(ctx) |
|
142 | visit.append(ctx) | |
131 | while visit: |
|
143 | while visit: | |
132 | ctx = visit.popleft() |
|
144 | ctx = visit.popleft() | |
|
145 | yield ctx.node() | |||
133 | for parent in ctx.parents(): |
|
146 | for parent in ctx.parents(): | |
134 | rev = parent.rev() |
|
147 | rev = parent.rev() | |
135 | if rev not in seen: |
|
148 | if rev not in seen: | |
136 | seen.add(rev) |
|
149 | seen.add(rev) | |
137 | if parent.mutable(): |
|
150 | if parent.mutable(): | |
138 | visit.append(parent) |
|
151 | visit.append(parent) | |
139 | else: |
|
|||
140 | yield parent.node() |
|
|||
141 |
|
152 | |||
142 | wctx = repo[None] |
|
153 | wctx = repo[None] | |
143 | parents = wctx.parents() |
|
154 | parents = wctx.parents() | |
@@ -173,9 +184,9 b' def createcmd(ui, repo, pats, opts):' | |||||
173 | repo.mq.checkapplied = saved |
|
184 | repo.mq.checkapplied = saved | |
174 |
|
185 | |||
175 | if parent.node() != nullid: |
|
186 | if parent.node() != nullid: | |
176 | desc = parent.description().split('\n', 1)[0] |
|
187 | desc = "changes to '%s'" % parent.description().split('\n', 1)[0] | |
177 | else: |
|
188 | else: | |
178 | desc = '(empty repository)' |
|
189 | desc = '(changes in empty repository)' | |
179 |
|
190 | |||
180 | if not opts['message']: |
|
191 | if not opts['message']: | |
181 | opts['message'] = desc |
|
192 | opts['message'] = desc | |
@@ -228,9 +239,8 b' def createcmd(ui, repo, pats, opts):' | |||||
228 | fp.write('\0'.join(shelvedfiles)) |
|
239 | fp.write('\0'.join(shelvedfiles)) | |
229 |
|
240 | |||
230 | bases = list(publicancestors(repo[node])) |
|
241 | bases = list(publicancestors(repo[node])) | |
231 |
cg = |
|
242 | cg = changegroup.changegroupsubset(repo, bases, [node], 'shelve') | |
232 |
|
|
243 | shelvedfile(repo, name, 'hg').writebundle(cg) | |
233 | 'HG10UN') |
|
|||
234 | cmdutil.export(repo, [node], |
|
244 | cmdutil.export(repo, [node], | |
235 | fp=shelvedfile(repo, name, 'patch').opener('wb'), |
|
245 | fp=shelvedfile(repo, name, 'patch').opener('wb'), | |
236 | opts=mdiff.diffopts(git=True)) |
|
246 | opts=mdiff.diffopts(git=True)) | |
@@ -459,7 +469,9 b' def unshelvecontinue(ui, repo, state, op' | |||||
459 | ('c', 'continue', None, |
|
469 | ('c', 'continue', None, | |
460 | _('continue an incomplete unshelve operation')), |
|
470 | _('continue an incomplete unshelve operation')), | |
461 | ('', 'keep', None, |
|
471 | ('', 'keep', None, | |
462 |
_('keep shelve after unshelving')) |
|
472 | _('keep shelve after unshelving')), | |
|
473 | ('', 'date', '', | |||
|
474 | _('set date for temporary commits (DEPRECATED)'), _('DATE'))], | |||
463 | _('hg unshelve [SHELVED]')) |
|
475 | _('hg unshelve [SHELVED]')) | |
464 | def unshelve(ui, repo, *shelved, **opts): |
|
476 | def unshelve(ui, repo, *shelved, **opts): | |
465 | """restore a shelved change to the working directory |
|
477 | """restore a shelved change to the working directory | |
@@ -518,6 +530,7 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
518 | if not shelvedfile(repo, basename, 'files').exists(): |
|
530 | if not shelvedfile(repo, basename, 'files').exists(): | |
519 | raise util.Abort(_("shelved change '%s' not found") % basename) |
|
531 | raise util.Abort(_("shelved change '%s' not found") % basename) | |
520 |
|
532 | |||
|
533 | oldquiet = ui.quiet | |||
521 | wlock = lock = tr = None |
|
534 | wlock = lock = tr = None | |
522 | try: |
|
535 | try: | |
523 | lock = repo.lock() |
|
536 | lock = repo.lock() | |
@@ -526,17 +539,19 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
526 | tr = repo.transaction('unshelve', report=lambda x: None) |
|
539 | tr = repo.transaction('unshelve', report=lambda x: None) | |
527 | oldtiprev = len(repo) |
|
540 | oldtiprev = len(repo) | |
528 |
|
541 | |||
529 |
|
|
542 | pctx = repo['.'] | |
530 |
tmpwctx = |
|
543 | tmpwctx = pctx | |
531 | # The goal is to have a commit structure like so: |
|
544 | # The goal is to have a commit structure like so: | |
532 |
# ...-> |
|
545 | # ...-> pctx -> tmpwctx -> shelvectx | |
533 | # where tmpwctx is an optional commit with the user's pending changes |
|
546 | # where tmpwctx is an optional commit with the user's pending changes | |
534 | # and shelvectx is the unshelved changes. Then we merge it all down |
|
547 | # and shelvectx is the unshelved changes. Then we merge it all down | |
535 |
# to the original |
|
548 | # to the original pctx. | |
536 |
|
549 | |||
537 | # Store pending changes in a commit |
|
550 | # Store pending changes in a commit | |
538 | m, a, r, d = repo.status()[:4] |
|
551 | m, a, r, d = repo.status()[:4] | |
539 | if m or a or r or d: |
|
552 | if m or a or r or d: | |
|
553 | ui.status(_("temporarily committing pending changes " | |||
|
554 | "(restore with 'hg unshelve --abort')\n")) | |||
540 | def commitfunc(ui, repo, message, match, opts): |
|
555 | def commitfunc(ui, repo, message, match, opts): | |
541 | hasmq = util.safehasattr(repo, 'mq') |
|
556 | hasmq = util.safehasattr(repo, 'mq') | |
542 | if hasmq: |
|
557 | if hasmq: | |
@@ -551,28 +566,24 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
551 |
|
566 | |||
552 | tempopts = {} |
|
567 | tempopts = {} | |
553 | tempopts['message'] = "pending changes temporary commit" |
|
568 | tempopts['message'] = "pending changes temporary commit" | |
554 | oldquiet = ui.quiet |
|
569 | tempopts['date'] = opts.get('date') | |
555 |
|
|
570 | ui.quiet = True | |
556 | ui.quiet = True |
|
571 | node = cmdutil.commit(ui, repo, commitfunc, [], tempopts) | |
557 | node = cmdutil.commit(ui, repo, commitfunc, [], tempopts) |
|
|||
558 | finally: |
|
|||
559 | ui.quiet = oldquiet |
|
|||
560 | tmpwctx = repo[node] |
|
572 | tmpwctx = repo[node] | |
561 |
|
573 | |||
562 | try: |
|
574 | ui.quiet = True | |
563 |
|
|
575 | shelvedfile(repo, basename, 'hg').applybundle() | |
564 | gen = changegroup.readbundle(fp, fp.name) |
|
576 | nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)] | |
565 | repo.addchangegroup(gen, 'unshelve', 'bundle:' + fp.name) |
|
577 | phases.retractboundary(repo, phases.secret, nodes) | |
566 | nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)] |
|
578 | ||
567 | phases.retractboundary(repo, phases.secret, nodes) |
|
579 | ui.quiet = oldquiet | |
568 | finally: |
|
|||
569 | fp.close() |
|
|||
570 |
|
580 | |||
571 | shelvectx = repo['tip'] |
|
581 | shelvectx = repo['tip'] | |
572 |
|
582 | |||
573 | # If the shelve is not immediately on top of the commit |
|
583 | # If the shelve is not immediately on top of the commit | |
574 | # we'll be merging with, rebase it to be on top. |
|
584 | # we'll be merging with, rebase it to be on top. | |
575 | if tmpwctx.node() != shelvectx.parents()[0].node(): |
|
585 | if tmpwctx.node() != shelvectx.parents()[0].node(): | |
|
586 | ui.status(_('rebasing shelved changes\n')) | |||
576 | try: |
|
587 | try: | |
577 | rebase.rebase(ui, repo, **{ |
|
588 | rebase.rebase(ui, repo, **{ | |
578 | 'rev' : [shelvectx.rev()], |
|
589 | 'rev' : [shelvectx.rev()], | |
@@ -584,7 +595,7 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
584 |
|
595 | |||
585 | stripnodes = [repo.changelog.node(rev) |
|
596 | stripnodes = [repo.changelog.node(rev) | |
586 | for rev in xrange(oldtiprev, len(repo))] |
|
597 | for rev in xrange(oldtiprev, len(repo))] | |
587 |
shelvedstate.save(repo, basename, |
|
598 | shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes) | |
588 |
|
599 | |||
589 | util.rename(repo.join('rebasestate'), |
|
600 | util.rename(repo.join('rebasestate'), | |
590 | repo.join('unshelverebasestate')) |
|
601 | repo.join('unshelverebasestate')) | |
@@ -599,7 +610,7 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
599 | # rebase was a no-op, so it produced no child commit |
|
610 | # rebase was a no-op, so it produced no child commit | |
600 | shelvectx = tmpwctx |
|
611 | shelvectx = tmpwctx | |
601 |
|
612 | |||
602 |
mergefiles(ui, repo, |
|
613 | mergefiles(ui, repo, pctx, shelvectx) | |
603 | shelvedstate.clear(repo) |
|
614 | shelvedstate.clear(repo) | |
604 |
|
615 | |||
605 | # The transaction aborting will strip all the commits for us, |
|
616 | # The transaction aborting will strip all the commits for us, | |
@@ -610,6 +621,7 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
610 |
|
621 | |||
611 | unshelvecleanup(ui, repo, basename, opts) |
|
622 | unshelvecleanup(ui, repo, basename, opts) | |
612 | finally: |
|
623 | finally: | |
|
624 | ui.quiet = oldquiet | |||
613 | if tr: |
|
625 | if tr: | |
614 | tr.release() |
|
626 | tr.release() | |
615 | lockmod.release(lock, wlock) |
|
627 | lockmod.release(lock, wlock) | |
@@ -632,8 +644,8 b' def unshelve(ui, repo, *shelved, **opts)' | |||||
632 | ('p', 'patch', None, |
|
644 | ('p', 'patch', None, | |
633 | _('show patch')), |
|
645 | _('show patch')), | |
634 | ('', 'stat', None, |
|
646 | ('', 'stat', None, | |
635 | _('output diffstat-style summary of changes'))], |
|
647 | _('output diffstat-style summary of changes'))] + commands.walkopts, | |
636 | _('hg shelve')) |
|
648 | _('hg shelve [OPTION]... [FILE]...')) | |
637 | def shelvecmd(ui, repo, *pats, **opts): |
|
649 | def shelvecmd(ui, repo, *pats, **opts): | |
638 | '''save and set aside changes from the working directory |
|
650 | '''save and set aside changes from the working directory | |
639 |
|
651 |
@@ -568,8 +568,9 b' def transplant(ui, repo, *revs, **opts):' | |||||
568 | if not heads: |
|
568 | if not heads: | |
569 | heads = repo.heads() |
|
569 | heads = repo.heads() | |
570 | ancestors = [] |
|
570 | ancestors = [] | |
|
571 | ctx = repo[dest] | |||
571 | for head in heads: |
|
572 | for head in heads: | |
572 |
ancestors.append( |
|
573 | ancestors.append(ctx.ancestor(repo[head]).node()) | |
573 | for node in repo.changelog.nodesbetween(ancestors, heads)[0]: |
|
574 | for node in repo.changelog.nodesbetween(ancestors, heads)[0]: | |
574 | if match(node): |
|
575 | if match(node): | |
575 | yield node |
|
576 | yield node | |
@@ -670,7 +671,8 b' def revsettransplanted(repo, subset, x):' | |||||
670 | s = revset.getset(repo, subset, x) |
|
671 | s = revset.getset(repo, subset, x) | |
671 | else: |
|
672 | else: | |
672 | s = subset |
|
673 | s = subset | |
673 | return [r for r in s if repo[r].extra().get('transplant_source')] |
|
674 | return revset.baseset([r for r in s if | |
|
675 | repo[r].extra().get('transplant_source')]) | |||
674 |
|
676 | |||
675 | def kwtransplanted(repo, ctx, **args): |
|
677 | def kwtransplanted(repo, ctx, **args): | |
676 | """:transplanted: String. The node identifier of the transplanted |
|
678 | """:transplanted: String. The node identifier of the transplanted |
@@ -5,7 +5,7 b'' | |||||
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | '''perform automatic newline conversion |
|
8 | '''perform automatic newline conversion (DEPRECATED) | |
9 |
|
9 | |||
10 | Deprecation: The win32text extension requires each user to configure |
|
10 | Deprecation: The win32text extension requires each user to configure | |
11 | the extension again and again for each clone since the configuration |
|
11 | the extension again and again for each clone since the configuration |
@@ -66,6 +66,46 b' def promptchoice(pe):' | |||||
66 | def warningchecker(msgidpat=None): |
|
66 | def warningchecker(msgidpat=None): | |
67 | return checker('warning', msgidpat) |
|
67 | return checker('warning', msgidpat) | |
68 |
|
68 | |||
|
69 | @warningchecker() | |||
|
70 | def taildoublecolons(pe): | |||
|
71 | """Check equality of tail '::'-ness between msgid and msgstr | |||
|
72 | ||||
|
73 | >>> pe = polib.POEntry( | |||
|
74 | ... msgid ='ends with ::', | |||
|
75 | ... msgstr='ends with ::') | |||
|
76 | >>> for e in taildoublecolons(pe): print e | |||
|
77 | >>> pe = polib.POEntry( | |||
|
78 | ... msgid ='ends with ::', | |||
|
79 | ... msgstr='ends without double-colons') | |||
|
80 | >>> for e in taildoublecolons(pe): print e | |||
|
81 | tail '::'-ness differs between msgid and msgstr | |||
|
82 | >>> pe = polib.POEntry( | |||
|
83 | ... msgid ='ends without double-colons', | |||
|
84 | ... msgstr='ends with ::') | |||
|
85 | >>> for e in taildoublecolons(pe): print e | |||
|
86 | tail '::'-ness differs between msgid and msgstr | |||
|
87 | """ | |||
|
88 | if pe.msgid.endswith('::') != pe.msgstr.endswith('::'): | |||
|
89 | yield "tail '::'-ness differs between msgid and msgstr" | |||
|
90 | ||||
|
91 | @warningchecker() | |||
|
92 | def indentation(pe): | |||
|
93 | """Check equality of initial indentation between msgid and msgstr | |||
|
94 | ||||
|
95 | This may report unexpected warning, because this doesn't aware | |||
|
96 | the syntax of rst document and the context of msgstr. | |||
|
97 | ||||
|
98 | >>> pe = polib.POEntry( | |||
|
99 | ... msgid =' indented text', | |||
|
100 | ... msgstr=' narrowed indentation') | |||
|
101 | >>> for e in indentation(pe): print e | |||
|
102 | initial indentation width differs betweeen msgid and msgstr | |||
|
103 | """ | |||
|
104 | idindent = len(pe.msgid) - len(pe.msgid.lstrip()) | |||
|
105 | strindent = len(pe.msgstr) - len(pe.msgstr.lstrip()) | |||
|
106 | if idindent != strindent: | |||
|
107 | yield "initial indentation width differs betweeen msgid and msgstr" | |||
|
108 | ||||
69 | #################### |
|
109 | #################### | |
70 |
|
110 | |||
71 | def check(pofile, fatal=True, warning=False): |
|
111 | def check(pofile, fatal=True, warning=False): |
@@ -20,7 +20,7 b' msgid ""' | |||||
20 | msgstr "" |
|
20 | msgstr "" | |
21 | "Project-Id-Version: Mercurial\n" |
|
21 | "Project-Id-Version: Mercurial\n" | |
22 | "Report-Msgid-Bugs-To: <mercurial-devel@selenic.com>\n" |
|
22 | "Report-Msgid-Bugs-To: <mercurial-devel@selenic.com>\n" | |
23 |
"POT-Creation-Date: 2014-01-2 |
|
23 | "POT-Creation-Date: 2014-01-29 16:47+0100\n" | |
24 | "PO-Revision-Date: 2013-09-30 20:52+0100\n" |
|
24 | "PO-Revision-Date: 2013-09-30 20:52+0100\n" | |
25 | "Last-Translator: Simon Heimberg <simohe@besonet.ch>\n" |
|
25 | "Last-Translator: Simon Heimberg <simohe@besonet.ch>\n" | |
26 | "Language-Team: \n" |
|
26 | "Language-Team: \n" | |
@@ -2928,6 +2928,7 b' msgstr ""' | |||||
2928 | " [repository]\n" |
|
2928 | " [repository]\n" | |
2929 | " native = LF" |
|
2929 | " native = LF" | |
2930 |
|
2930 | |||
|
2931 | #. do not translate: .. note:: | |||
2931 | msgid ".. note::" |
|
2932 | msgid ".. note::" | |
2932 | msgstr "" |
|
2933 | msgstr "" | |
2933 |
|
2934 | |||
@@ -5029,6 +5030,7 b' msgstr ""' | |||||
5029 | " Siehe Hilfe zu 'paths' zu Pfad-Kurznamen und 'urls' für erlaubte\n" |
|
5030 | " Siehe Hilfe zu 'paths' zu Pfad-Kurznamen und 'urls' für erlaubte\n" | |
5030 | " Formate für die Quellangabe." |
|
5031 | " Formate für die Quellangabe." | |
5031 |
|
5032 | |||
|
5033 | #. do not translate: .. container:: | |||
5032 | msgid " .. container:: verbose" |
|
5034 | msgid " .. container:: verbose" | |
5033 | msgstr "" |
|
5035 | msgstr "" | |
5034 |
|
5036 | |||
@@ -6548,6 +6550,7 b' msgstr ""' | |||||
6548 | " Ohne Argumente werden die aktuell aktiven Wächter ausgegeben.\n" |
|
6550 | " Ohne Argumente werden die aktuell aktiven Wächter ausgegeben.\n" | |
6549 | " Mit einem Argument wird der aktuelle Wächter gesetzt." |
|
6551 | " Mit einem Argument wird der aktuelle Wächter gesetzt." | |
6550 |
|
6552 | |||
|
6553 | #. do not translate: .. note:: | |||
6551 | msgid " .. note::" |
|
6554 | msgid " .. note::" | |
6552 | msgstr "" |
|
6555 | msgstr "" | |
6553 |
|
6556 | |||
@@ -15694,6 +15697,7 b' msgid ""' | |||||
15694 | " order until one or more configuration files are detected." |
|
15697 | " order until one or more configuration files are detected." | |
15695 | msgstr "" |
|
15698 | msgstr "" | |
15696 |
|
15699 | |||
|
15700 | #. do not translate: .. note:: | |||
15697 | msgid "" |
|
15701 | msgid "" | |
15698 | ".. note:: The registry key ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node" |
|
15702 | ".. note:: The registry key ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node" | |
15699 | "\\Mercurial``\n" |
|
15703 | "\\Mercurial``\n" | |
@@ -15873,6 +15877,7 b' msgstr ""' | |||||
15873 | msgid " stable5 = latest -b stable" |
|
15877 | msgid " stable5 = latest -b stable" | |
15874 | msgstr "" |
|
15878 | msgstr "" | |
15875 |
|
15879 | |||
|
15880 | #. do not translate: .. note:: | |||
15876 | msgid "" |
|
15881 | msgid "" | |
15877 | ".. note:: It is possible to create aliases with the same names as\n" |
|
15882 | ".. note:: It is possible to create aliases with the same names as\n" | |
15878 | " existing commands, which will then override the original\n" |
|
15883 | " existing commands, which will then override the original\n" | |
@@ -15918,6 +15923,7 b' msgid ""' | |||||
15918 | "echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``." |
|
15923 | "echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``." | |
15919 | msgstr "" |
|
15924 | msgstr "" | |
15920 |
|
15925 | |||
|
15926 | #. do not translate: .. note:: | |||
15921 | msgid "" |
|
15927 | msgid "" | |
15922 | ".. note:: Some global configuration options such as ``-R`` are\n" |
|
15928 | ".. note:: Some global configuration options such as ``-R`` are\n" | |
15923 | " processed before shell aliases and will thus not be passed to\n" |
|
15929 | " processed before shell aliases and will thus not be passed to\n" | |
@@ -16101,6 +16107,7 b' msgid ""' | |||||
16101 | "the command." |
|
16107 | "the command." | |
16102 | msgstr "" |
|
16108 | msgstr "" | |
16103 |
|
16109 | |||
|
16110 | #. do not translate: .. note:: | |||
16104 | msgid "" |
|
16111 | msgid "" | |
16105 | ".. note:: The tempfile mechanism is recommended for Windows systems,\n" |
|
16112 | ".. note:: The tempfile mechanism is recommended for Windows systems,\n" | |
16106 | " where the standard shell I/O redirection operators often have\n" |
|
16113 | " where the standard shell I/O redirection operators often have\n" | |
@@ -16572,6 +16579,7 b' msgid ""' | |||||
16572 | " update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``." |
|
16579 | " update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``." | |
16573 | msgstr "" |
|
16580 | msgstr "" | |
16574 |
|
16581 | |||
|
16582 | #. do not translate: .. note:: | |||
16575 | msgid "" |
|
16583 | msgid "" | |
16576 | ".. note:: It is generally better to use standard hooks rather than the\n" |
|
16584 | ".. note:: It is generally better to use standard hooks rather than the\n" | |
16577 | " generic pre- and post- command hooks as they are guaranteed to be\n" |
|
16585 | " generic pre- and post- command hooks as they are guaranteed to be\n" | |
@@ -16580,6 +16588,7 b' msgid ""' | |||||
16580 | " generate a commit (e.g. tag) and not just the commit command." |
|
16588 | " generate a commit (e.g. tag) and not just the commit command." | |
16581 | msgstr "" |
|
16589 | msgstr "" | |
16582 |
|
16590 | |||
|
16591 | #. do not translate: .. note:: | |||
16583 | msgid "" |
|
16592 | msgid "" | |
16584 | ".. note:: Environment variables with empty values may not be passed to\n" |
|
16593 | ".. note:: Environment variables with empty values may not be passed to\n" | |
16585 | " hooks on platforms such as Windows. As an example, ``$HG_PARENT2``\n" |
|
16594 | " hooks on platforms such as Windows. As an example, ``$HG_PARENT2``\n" | |
@@ -18967,6 +18976,7 b' msgid ""' | |||||
18967 | ":Manual group: Mercurial Manual" |
|
18976 | ":Manual group: Mercurial Manual" | |
18968 | msgstr "" |
|
18977 | msgstr "" | |
18969 |
|
18978 | |||
|
18979 | #. do not translate: .. contents:: | |||
18970 | msgid "" |
|
18980 | msgid "" | |
18971 | ".. contents::\n" |
|
18981 | ".. contents::\n" | |
18972 | " :backlinks: top\n" |
|
18982 | " :backlinks: top\n" | |
@@ -19017,6 +19027,7 b' msgid ""' | |||||
19017 | " repository." |
|
19027 | " repository." | |
19018 | msgstr "" |
|
19028 | msgstr "" | |
19019 |
|
19029 | |||
|
19030 | #. do not translate: .. include:: | |||
19020 | msgid ".. include:: hg.1.gendoc.txt" |
|
19031 | msgid ".. include:: hg.1.gendoc.txt" | |
19021 | msgstr "" |
|
19032 | msgstr "" | |
19022 |
|
19033 | |||
@@ -19121,6 +19132,7 b' msgid ""' | |||||
19121 | "Public License version 2 or any later version." |
|
19132 | "Public License version 2 or any later version." | |
19122 | msgstr "" |
|
19133 | msgstr "" | |
19123 |
|
19134 | |||
|
19135 | #. do not translate: .. include:: | |||
19124 | msgid ".. include:: common.txt\n" |
|
19136 | msgid ".. include:: common.txt\n" | |
19125 | msgstr "" |
|
19137 | msgstr "" | |
19126 |
|
19138 | |||
@@ -19143,6 +19155,7 b' msgid ""' | |||||
19143 | ":Manual group: Mercurial Manual" |
|
19155 | ":Manual group: Mercurial Manual" | |
19144 | msgstr "" |
|
19156 | msgstr "" | |
19145 |
|
19157 | |||
|
19158 | #. do not translate: .. include:: | |||
19146 | msgid ".. include:: hgignore.5.gendoc.txt" |
|
19159 | msgid ".. include:: hgignore.5.gendoc.txt" | |
19147 | msgstr "" |
|
19160 | msgstr "" | |
19148 |
|
19161 | |||
@@ -19170,6 +19183,7 b' msgid ""' | |||||
19170 | "Public License version 2 or any later version." |
|
19183 | "Public License version 2 or any later version." | |
19171 | msgstr "" |
|
19184 | msgstr "" | |
19172 |
|
19185 | |||
|
19186 | #. do not translate: .. include:: | |||
19173 | msgid ".. include:: common.txt" |
|
19187 | msgid ".. include:: common.txt" | |
19174 | msgstr "" |
|
19188 | msgstr "" | |
19175 |
|
19189 | |||
@@ -19281,6 +19295,7 b' msgid ""' | |||||
19281 | "regexp pattern, start it with ``^``." |
|
19295 | "regexp pattern, start it with ``^``." | |
19282 | msgstr "" |
|
19296 | msgstr "" | |
19283 |
|
19297 | |||
|
19298 | #. do not translate: .. note:: | |||
19284 | msgid "" |
|
19299 | msgid "" | |
19285 | ".. note::\n" |
|
19300 | ".. note::\n" | |
19286 | " Patterns specified in other than ``.hgignore`` are always rooted.\n" |
|
19301 | " Patterns specified in other than ``.hgignore`` are always rooted.\n" | |
@@ -19333,6 +19348,7 b' msgid ""' | |||||
19333 | ":Manual group: Mercurial Manual" |
|
19348 | ":Manual group: Mercurial Manual" | |
19334 | msgstr "" |
|
19349 | msgstr "" | |
19335 |
|
19350 | |||
|
19351 | #. do not translate: .. contents:: | |||
19336 | msgid "" |
|
19352 | msgid "" | |
19337 | ".. contents::\n" |
|
19353 | ".. contents::\n" | |
19338 | " :backlinks: top\n" |
|
19354 | " :backlinks: top\n" | |
@@ -19348,6 +19364,7 b' msgstr ""' | |||||
19348 | "Beschreibung\n" |
|
19364 | "Beschreibung\n" | |
19349 | "============" |
|
19365 | "============" | |
19350 |
|
19366 | |||
|
19367 | #. do not translate: .. include:: | |||
19351 | msgid ".. include:: hgrc.5.gendoc.txt" |
|
19368 | msgid ".. include:: hgrc.5.gendoc.txt" | |
19352 | msgstr "" |
|
19369 | msgstr "" | |
19353 |
|
19370 | |||
@@ -19564,6 +19581,7 b' msgstr ""' | |||||
19564 | msgid "8. The merge of the file fails and must be resolved before commit." |
|
19581 | msgid "8. The merge of the file fails and must be resolved before commit." | |
19565 | msgstr "" |
|
19582 | msgstr "" | |
19566 |
|
19583 | |||
|
19584 | #. do not translate: .. note:: | |||
19567 | msgid "" |
|
19585 | msgid "" | |
19568 | ".. note::\n" |
|
19586 | ".. note::\n" | |
19569 | " After selecting a merge program, Mercurial will by default attempt\n" |
|
19587 | " After selecting a merge program, Mercurial will by default attempt\n" | |
@@ -19633,6 +19651,7 b' msgstr ""' | |||||
19633 | msgid "Alternate pattern notations must be specified explicitly." |
|
19651 | msgid "Alternate pattern notations must be specified explicitly." | |
19634 | msgstr "Andere Schreibweisen von Mustern müssen explizit angegeben werden." |
|
19652 | msgstr "Andere Schreibweisen von Mustern müssen explizit angegeben werden." | |
19635 |
|
19653 | |||
|
19654 | #. do not translate: .. note:: | |||
19636 | msgid "" |
|
19655 | msgid "" | |
19637 | ".. note::\n" |
|
19656 | ".. note::\n" | |
19638 | " Patterns specified in ``.hgignore`` are not rooted.\n" |
|
19657 | " Patterns specified in ``.hgignore`` are not rooted.\n" | |
@@ -19804,6 +19823,7 b' msgstr ""' | |||||
19804 | msgid " - secret changesets are neither pushed, pulled, or cloned" |
|
19823 | msgid " - secret changesets are neither pushed, pulled, or cloned" | |
19805 | msgstr "" |
|
19824 | msgstr "" | |
19806 |
|
19825 | |||
|
19826 | #. do not translate: .. note:: | |||
19807 | msgid "" |
|
19827 | msgid "" | |
19808 | ".. note::\n" |
|
19828 | ".. note::\n" | |
19809 | " Pulling a draft changeset from a publishing server does not mark it\n" |
|
19829 | " Pulling a draft changeset from a publishing server does not mark it\n" | |
@@ -19823,12 +19843,14 b' msgstr ""' | |||||
19823 | " [phases]\n" |
|
19843 | " [phases]\n" | |
19824 | " publish = False" |
|
19844 | " publish = False" | |
19825 |
|
19845 | |||
|
19846 | #. do not translate: .. note:: | |||
19826 | msgid "" |
|
19847 | msgid "" | |
19827 | ".. note::\n" |
|
19848 | ".. note::\n" | |
19828 | " Servers running older versions of Mercurial are treated as\n" |
|
19849 | " Servers running older versions of Mercurial are treated as\n" | |
19829 | " publishing." |
|
19850 | " publishing." | |
19830 | msgstr "" |
|
19851 | msgstr "" | |
19831 |
|
19852 | |||
|
19853 | #. do not translate: .. note:: | |||
19832 | msgid "" |
|
19854 | msgid "" | |
19833 | ".. note::\n" |
|
19855 | ".. note::\n" | |
19834 | " Changesets in secret phase are not exchanged with the server. This\n" |
|
19856 | " Changesets in secret phase are not exchanged with the server. This\n" | |
@@ -20216,6 +20238,7 b' msgid ""' | |||||
20216 | " repositories states when committing in the parent repository." |
|
20238 | " repositories states when committing in the parent repository." | |
20217 | msgstr "" |
|
20239 | msgstr "" | |
20218 |
|
20240 | |||
|
20241 | #. do not translate: .. note:: | |||
20219 | msgid "" |
|
20242 | msgid "" | |
20220 | " .. note::\n" |
|
20243 | " .. note::\n" | |
20221 | " The ``.hgsubstate`` file should not be edited manually." |
|
20244 | " The ``.hgsubstate`` file should not be edited manually." |
@@ -5,6 +5,7 b'' | |||||
5 | # license: MIT/X11/Expat |
|
5 | # license: MIT/X11/Expat | |
6 | # |
|
6 | # | |
7 |
|
7 | |||
|
8 | import re | |||
8 | import sys |
|
9 | import sys | |
9 | import polib |
|
10 | import polib | |
10 |
|
11 | |||
@@ -30,6 +31,7 b' if __name__ == "__main__":' | |||||
30 | cache = {} |
|
31 | cache = {} | |
31 | entries = po[:] |
|
32 | entries = po[:] | |
32 | po[:] = [] |
|
33 | po[:] = [] | |
|
34 | findd = re.compile(r' *\.\. (\w+)::') # for finding directives | |||
33 | for entry in entries: |
|
35 | for entry in entries: | |
34 | msgids = entry.msgid.split(u'\n\n') |
|
36 | msgids = entry.msgid.split(u'\n\n') | |
35 | if entry.msgstr: |
|
37 | if entry.msgstr: | |
@@ -49,8 +51,27 b' if __name__ == "__main__":' | |||||
49 |
|
51 | |||
50 | delta = 0 |
|
52 | delta = 0 | |
51 | for msgid, msgstr in zip(msgids, msgstrs): |
|
53 | for msgid, msgstr in zip(msgids, msgstrs): | |
52 | if msgid: |
|
54 | if msgid and msgid != '::': | |
53 | newentry = mkentry(entry, delta, msgid, msgstr) |
|
55 | newentry = mkentry(entry, delta, msgid, msgstr) | |
|
56 | mdirective = findd.match(msgid) | |||
|
57 | if mdirective: | |||
|
58 | if not msgid[mdirective.end():].rstrip(): | |||
|
59 | # only directive, nothing to translate here | |||
|
60 | continue | |||
|
61 | directive = mdirective.group(1) | |||
|
62 | if directive in ('container', 'include'): | |||
|
63 | if msgid.rstrip('\n').count('\n') == 0: | |||
|
64 | # only rst syntax, nothing to translate | |||
|
65 | continue | |||
|
66 | else: | |||
|
67 | # lines following directly, unexpected | |||
|
68 | print 'Warning: text follows line with directive' \ | |||
|
69 | ' %s' % directive | |||
|
70 | comment = 'do not translate: .. %s::' % directive | |||
|
71 | if not newentry.comment: | |||
|
72 | newentry.comment = comment | |||
|
73 | elif comment not in newentry.comment: | |||
|
74 | newentry.comment += '\n' + comment | |||
54 | addentry(po, newentry, cache) |
|
75 | addentry(po, newentry, cache) | |
55 | delta += 2 + msgid.count('\n') |
|
76 | delta += 2 + msgid.count('\n') | |
56 | po.save() |
|
77 | po.save() |
@@ -9,6 +9,62 b' import heapq' | |||||
9 | import util |
|
9 | import util | |
10 | from node import nullrev |
|
10 | from node import nullrev | |
11 |
|
11 | |||
|
12 | def commonancestorsheads(pfunc, *nodes): | |||
|
13 | """Returns a set with the heads of all common ancestors of all nodes, | |||
|
14 | heads(::nodes[0] and ::nodes[1] and ...) . | |||
|
15 | ||||
|
16 | pfunc must return a list of parent vertices for a given vertex. | |||
|
17 | """ | |||
|
18 | if not isinstance(nodes, set): | |||
|
19 | nodes = set(nodes) | |||
|
20 | if nullrev in nodes: | |||
|
21 | return set() | |||
|
22 | if len(nodes) <= 1: | |||
|
23 | return nodes | |||
|
24 | ||||
|
25 | allseen = (1 << len(nodes)) - 1 | |||
|
26 | seen = [0] * (max(nodes) + 1) | |||
|
27 | for i, n in enumerate(nodes): | |||
|
28 | seen[n] = 1 << i | |||
|
29 | poison = 1 << (i + 1) | |||
|
30 | ||||
|
31 | gca = set() | |||
|
32 | interesting = len(nodes) | |||
|
33 | nv = len(seen) - 1 | |||
|
34 | while nv >= 0 and interesting: | |||
|
35 | v = nv | |||
|
36 | nv -= 1 | |||
|
37 | if not seen[v]: | |||
|
38 | continue | |||
|
39 | sv = seen[v] | |||
|
40 | if sv < poison: | |||
|
41 | interesting -= 1 | |||
|
42 | if sv == allseen: | |||
|
43 | gca.add(v) | |||
|
44 | sv |= poison | |||
|
45 | if v in nodes: | |||
|
46 | # history is linear | |||
|
47 | return set([v]) | |||
|
48 | if sv < poison: | |||
|
49 | for p in pfunc(v): | |||
|
50 | sp = seen[p] | |||
|
51 | if p == nullrev: | |||
|
52 | continue | |||
|
53 | if sp == 0: | |||
|
54 | seen[p] = sv | |||
|
55 | interesting += 1 | |||
|
56 | elif sp != sv: | |||
|
57 | seen[p] |= sv | |||
|
58 | else: | |||
|
59 | for p in pfunc(v): | |||
|
60 | if p == nullrev: | |||
|
61 | continue | |||
|
62 | sp = seen[p] | |||
|
63 | if sp and sp < poison: | |||
|
64 | interesting -= 1 | |||
|
65 | seen[p] = sv | |||
|
66 | return gca | |||
|
67 | ||||
12 | def ancestors(pfunc, *orignodes): |
|
68 | def ancestors(pfunc, *orignodes): | |
13 | """ |
|
69 | """ | |
14 | Returns the common ancestors of a and b that are furthest from a |
|
70 | Returns the common ancestors of a and b that are furthest from a | |
@@ -16,59 +72,6 b' def ancestors(pfunc, *orignodes):' | |||||
16 |
|
72 | |||
17 | pfunc must return a list of parent vertices for a given vertex. |
|
73 | pfunc must return a list of parent vertices for a given vertex. | |
18 | """ |
|
74 | """ | |
19 | if not isinstance(orignodes, set): |
|
|||
20 | orignodes = set(orignodes) |
|
|||
21 | if nullrev in orignodes: |
|
|||
22 | return set() |
|
|||
23 | if len(orignodes) <= 1: |
|
|||
24 | return orignodes |
|
|||
25 |
|
||||
26 | def candidates(nodes): |
|
|||
27 | allseen = (1 << len(nodes)) - 1 |
|
|||
28 | seen = [0] * (max(nodes) + 1) |
|
|||
29 | for i, n in enumerate(nodes): |
|
|||
30 | seen[n] = 1 << i |
|
|||
31 | poison = 1 << (i + 1) |
|
|||
32 |
|
||||
33 | gca = set() |
|
|||
34 | interesting = left = len(nodes) |
|
|||
35 | nv = len(seen) - 1 |
|
|||
36 | while nv >= 0 and interesting: |
|
|||
37 | v = nv |
|
|||
38 | nv -= 1 |
|
|||
39 | if not seen[v]: |
|
|||
40 | continue |
|
|||
41 | sv = seen[v] |
|
|||
42 | if sv < poison: |
|
|||
43 | interesting -= 1 |
|
|||
44 | if sv == allseen: |
|
|||
45 | gca.add(v) |
|
|||
46 | sv |= poison |
|
|||
47 | if v in nodes: |
|
|||
48 | left -= 1 |
|
|||
49 | if left <= 1: |
|
|||
50 | # history is linear |
|
|||
51 | return set([v]) |
|
|||
52 | if sv < poison: |
|
|||
53 | for p in pfunc(v): |
|
|||
54 | sp = seen[p] |
|
|||
55 | if p == nullrev: |
|
|||
56 | continue |
|
|||
57 | if sp == 0: |
|
|||
58 | seen[p] = sv |
|
|||
59 | interesting += 1 |
|
|||
60 | elif sp != sv: |
|
|||
61 | seen[p] |= sv |
|
|||
62 | else: |
|
|||
63 | for p in pfunc(v): |
|
|||
64 | if p == nullrev: |
|
|||
65 | continue |
|
|||
66 | sp = seen[p] |
|
|||
67 | if sp and sp < poison: |
|
|||
68 | interesting -= 1 |
|
|||
69 | seen[p] = sv |
|
|||
70 | return gca |
|
|||
71 |
|
||||
72 | def deepest(nodes): |
|
75 | def deepest(nodes): | |
73 | interesting = {} |
|
76 | interesting = {} | |
74 | count = max(nodes) + 1 |
|
77 | count = max(nodes) + 1 | |
@@ -125,95 +128,12 b' def ancestors(pfunc, *orignodes):' | |||||
125 | k |= i |
|
128 | k |= i | |
126 | return set(n for (i, n) in mapping if k & i) |
|
129 | return set(n for (i, n) in mapping if k & i) | |
127 |
|
130 | |||
128 |
gca = c |
|
131 | gca = commonancestorsheads(pfunc, *orignodes) | |
129 |
|
132 | |||
130 | if len(gca) <= 1: |
|
133 | if len(gca) <= 1: | |
131 | return gca |
|
134 | return gca | |
132 | return deepest(gca) |
|
135 | return deepest(gca) | |
133 |
|
136 | |||
134 | def genericancestor(a, b, pfunc): |
|
|||
135 | """ |
|
|||
136 | Returns the common ancestor of a and b that is furthest from a |
|
|||
137 | root (as measured by longest path) or None if no ancestor is |
|
|||
138 | found. If there are multiple common ancestors at the same |
|
|||
139 | distance, the first one found is returned. |
|
|||
140 |
|
||||
141 | pfunc must return a list of parent vertices for a given vertex |
|
|||
142 | """ |
|
|||
143 |
|
||||
144 | if a == b: |
|
|||
145 | return a |
|
|||
146 |
|
||||
147 | a, b = sorted([a, b]) |
|
|||
148 |
|
||||
149 | # find depth from root of all ancestors |
|
|||
150 | # depth is stored as a negative for heapq |
|
|||
151 | parentcache = {} |
|
|||
152 | visit = [a, b] |
|
|||
153 | depth = {} |
|
|||
154 | while visit: |
|
|||
155 | vertex = visit[-1] |
|
|||
156 | pl = [p for p in pfunc(vertex) if p != nullrev] |
|
|||
157 | parentcache[vertex] = pl |
|
|||
158 | if not pl: |
|
|||
159 | depth[vertex] = 0 |
|
|||
160 | visit.pop() |
|
|||
161 | else: |
|
|||
162 | for p in pl: |
|
|||
163 | if p == a or p == b: # did we find a or b as a parent? |
|
|||
164 | return p # we're done |
|
|||
165 | if p not in depth: |
|
|||
166 | visit.append(p) |
|
|||
167 | if visit[-1] == vertex: |
|
|||
168 | # -(maximum distance of parents + 1) |
|
|||
169 | depth[vertex] = min([depth[p] for p in pl]) - 1 |
|
|||
170 | visit.pop() |
|
|||
171 |
|
||||
172 | # traverse ancestors in order of decreasing distance from root |
|
|||
173 | def ancestors(vertex): |
|
|||
174 | h = [(depth[vertex], vertex)] |
|
|||
175 | seen = set() |
|
|||
176 | while h: |
|
|||
177 | d, n = heapq.heappop(h) |
|
|||
178 | if n not in seen: |
|
|||
179 | seen.add(n) |
|
|||
180 | yield (d, n) |
|
|||
181 | for p in parentcache[n]: |
|
|||
182 | heapq.heappush(h, (depth[p], p)) |
|
|||
183 |
|
||||
184 | def generations(vertex): |
|
|||
185 | sg, s = None, set() |
|
|||
186 | for g, v in ancestors(vertex): |
|
|||
187 | if g != sg: |
|
|||
188 | if sg: |
|
|||
189 | yield sg, s |
|
|||
190 | sg, s = g, set((v,)) |
|
|||
191 | else: |
|
|||
192 | s.add(v) |
|
|||
193 | yield sg, s |
|
|||
194 |
|
||||
195 | x = generations(a) |
|
|||
196 | y = generations(b) |
|
|||
197 | gx = x.next() |
|
|||
198 | gy = y.next() |
|
|||
199 |
|
||||
200 | # increment each ancestor list until it is closer to root than |
|
|||
201 | # the other, or they match |
|
|||
202 | try: |
|
|||
203 | while True: |
|
|||
204 | if gx[0] == gy[0]: |
|
|||
205 | for v in gx[1]: |
|
|||
206 | if v in gy[1]: |
|
|||
207 | return v |
|
|||
208 | gy = y.next() |
|
|||
209 | gx = x.next() |
|
|||
210 | elif gx[0] > gy[0]: |
|
|||
211 | gy = y.next() |
|
|||
212 | else: |
|
|||
213 | gx = x.next() |
|
|||
214 | except StopIteration: |
|
|||
215 | return None |
|
|||
216 |
|
||||
217 | def missingancestors(revs, bases, pfunc): |
|
137 | def missingancestors(revs, bases, pfunc): | |
218 | """Return all the ancestors of revs that are not ancestors of bases. |
|
138 | """Return all the ancestors of revs that are not ancestors of bases. | |
219 |
|
139 |
@@ -363,22 +363,6 b' def updatefromremote(ui, repo, remotemar' | |||||
363 | writer(msg) |
|
363 | writer(msg) | |
364 | localmarks.write() |
|
364 | localmarks.write() | |
365 |
|
365 | |||
366 | def updateremote(ui, repo, remote, revs): |
|
|||
367 | ui.debug("checking for updated bookmarks\n") |
|
|||
368 | revnums = map(repo.changelog.rev, revs or []) |
|
|||
369 | ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)] |
|
|||
370 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid |
|
|||
371 | ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), |
|
|||
372 | srchex=hex) |
|
|||
373 |
|
||||
374 | for b, scid, dcid in advsrc: |
|
|||
375 | if ancestors and repo[scid].rev() not in ancestors: |
|
|||
376 | continue |
|
|||
377 | if remote.pushkey('bookmarks', b, dcid, scid): |
|
|||
378 | ui.status(_("updating bookmark %s\n") % b) |
|
|||
379 | else: |
|
|||
380 | ui.warn(_('updating bookmark %s failed!\n') % b) |
|
|||
381 |
|
||||
382 | def pushtoremote(ui, repo, remote, targets): |
|
366 | def pushtoremote(ui, repo, remote, targets): | |
383 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid |
|
367 | (addsrc, adddst, advsrc, advdst, diverge, differ, invalid | |
384 | ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), |
|
368 | ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), |
@@ -8,6 +8,7 b'' | |||||
8 | from node import bin, hex, nullid, nullrev |
|
8 | from node import bin, hex, nullid, nullrev | |
9 | import encoding |
|
9 | import encoding | |
10 | import util |
|
10 | import util | |
|
11 | import time | |||
11 |
|
12 | |||
12 | def _filename(repo): |
|
13 | def _filename(repo): | |
13 | """name of a branchcache file for a given repo or repoview""" |
|
14 | """name of a branchcache file for a given repo or repoview""" | |
@@ -206,8 +207,10 b' class branchcache(dict):' | |||||
206 | if self.filteredhash is not None: |
|
207 | if self.filteredhash is not None: | |
207 | cachekey.append(hex(self.filteredhash)) |
|
208 | cachekey.append(hex(self.filteredhash)) | |
208 | f.write(" ".join(cachekey) + '\n') |
|
209 | f.write(" ".join(cachekey) + '\n') | |
|
210 | nodecount = 0 | |||
209 | for label, nodes in sorted(self.iteritems()): |
|
211 | for label, nodes in sorted(self.iteritems()): | |
210 | for node in nodes: |
|
212 | for node in nodes: | |
|
213 | nodecount += 1 | |||
211 | if node in self._closednodes: |
|
214 | if node in self._closednodes: | |
212 | state = 'c' |
|
215 | state = 'c' | |
213 | else: |
|
216 | else: | |
@@ -215,6 +218,9 b' class branchcache(dict):' | |||||
215 | f.write("%s %s %s\n" % (hex(node), state, |
|
218 | f.write("%s %s %s\n" % (hex(node), state, | |
216 | encoding.fromlocal(label))) |
|
219 | encoding.fromlocal(label))) | |
217 | f.close() |
|
220 | f.close() | |
|
221 | repo.ui.log('branchcache', | |||
|
222 | 'wrote %s branch cache with %d labels and %d nodes\n', | |||
|
223 | repo.filtername, len(self), nodecount) | |||
218 | except (IOError, OSError, util.Abort): |
|
224 | except (IOError, OSError, util.Abort): | |
219 | # Abort may be raise by read only opener |
|
225 | # Abort may be raise by read only opener | |
220 | pass |
|
226 | pass | |
@@ -224,6 +230,7 b' class branchcache(dict):' | |||||
224 | missing heads, and a generator of nodes that are strictly a superset of |
|
230 | missing heads, and a generator of nodes that are strictly a superset of | |
225 | heads missing, this function updates self to be correct. |
|
231 | heads missing, this function updates self to be correct. | |
226 | """ |
|
232 | """ | |
|
233 | starttime = time.time() | |||
227 | cl = repo.changelog |
|
234 | cl = repo.changelog | |
228 | # collect new branch entries |
|
235 | # collect new branch entries | |
229 | newbranches = {} |
|
236 | newbranches = {} | |
@@ -272,3 +279,7 b' class branchcache(dict):' | |||||
272 | self.tipnode = cl.node(tiprev) |
|
279 | self.tipnode = cl.node(tiprev) | |
273 | self.tiprev = tiprev |
|
280 | self.tiprev = tiprev | |
274 | self.filteredhash = self._hashfiltered(repo) |
|
281 | self.filteredhash = self._hashfiltered(repo) | |
|
282 | ||||
|
283 | duration = time.time() - starttime | |||
|
284 | repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n', | |||
|
285 | repo.filtername, duration) |
@@ -14,7 +14,7 b' were part of the actual repository.' | |||||
14 | from node import nullid |
|
14 | from node import nullid | |
15 | from i18n import _ |
|
15 | from i18n import _ | |
16 | import os, tempfile, shutil |
|
16 | import os, tempfile, shutil | |
17 | import changegroup, util, mdiff, discovery, cmdutil, scmutil |
|
17 | import changegroup, util, mdiff, discovery, cmdutil, scmutil, exchange | |
18 | import localrepo, changelog, manifest, filelog, revlog, error |
|
18 | import localrepo, changelog, manifest, filelog, revlog, error | |
19 |
|
19 | |||
20 | class bundlerevlog(revlog.revlog): |
|
20 | class bundlerevlog(revlog.revlog): | |
@@ -193,7 +193,7 b' class bundlerepository(localrepo.localre' | |||||
193 | self._tempparent = tempfile.mkdtemp() |
|
193 | self._tempparent = tempfile.mkdtemp() | |
194 | localrepo.instance(ui, self._tempparent, 1) |
|
194 | localrepo.instance(ui, self._tempparent, 1) | |
195 | localrepo.localrepository.__init__(self, ui, self._tempparent) |
|
195 | localrepo.localrepository.__init__(self, ui, self._tempparent) | |
196 | self.ui.setconfig('phases', 'publish', False) |
|
196 | self.ui.setconfig('phases', 'publish', False, 'bundlerepo') | |
197 |
|
197 | |||
198 | if path: |
|
198 | if path: | |
199 | self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename |
|
199 | self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename | |
@@ -202,10 +202,10 b' class bundlerepository(localrepo.localre' | |||||
202 |
|
202 | |||
203 | self.tempfile = None |
|
203 | self.tempfile = None | |
204 | f = util.posixfile(bundlename, "rb") |
|
204 | f = util.posixfile(bundlename, "rb") | |
205 |
self.bundle = change |
|
205 | self.bundle = exchange.readbundle(ui, f, bundlename) | |
206 | if self.bundle.compressed(): |
|
206 | if self.bundle.compressed(): | |
207 |
fdtemp, temp = |
|
207 | fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-", | |
208 |
suffix=".hg10un" |
|
208 | suffix=".hg10un") | |
209 | self.tempfile = temp |
|
209 | self.tempfile = temp | |
210 | fptemp = os.fdopen(fdtemp, 'wb') |
|
210 | fptemp = os.fdopen(fdtemp, 'wb') | |
211 |
|
211 | |||
@@ -219,8 +219,8 b' class bundlerepository(localrepo.localre' | |||||
219 | finally: |
|
219 | finally: | |
220 | fptemp.close() |
|
220 | fptemp.close() | |
221 |
|
221 | |||
222 |
f = |
|
222 | f = self.vfs.open(self.tempfile, mode="rb") | |
223 |
self.bundle = change |
|
223 | self.bundle = exchange.readbundle(ui, f, bundlename, self.vfs) | |
224 |
|
224 | |||
225 | # dict with the mapping 'filename' -> position in the bundle |
|
225 | # dict with the mapping 'filename' -> position in the bundle | |
226 | self.bundlefilespos = {} |
|
226 | self.bundlefilespos = {} | |
@@ -280,7 +280,7 b' class bundlerepository(localrepo.localre' | |||||
280 | """Close assigned bundle file immediately.""" |
|
280 | """Close assigned bundle file immediately.""" | |
281 | self.bundle.close() |
|
281 | self.bundle.close() | |
282 | if self.tempfile is not None: |
|
282 | if self.tempfile is not None: | |
283 |
|
|
283 | self.vfs.unlink(self.tempfile) | |
284 | if self._tempparent: |
|
284 | if self._tempparent: | |
285 | shutil.rmtree(self._tempparent, True) |
|
285 | shutil.rmtree(self._tempparent, True) | |
286 |
|
286 |
@@ -5,10 +5,12 b'' | |||||
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
|
8 | import weakref | |||
8 | from i18n import _ |
|
9 | from i18n import _ | |
9 | from node import nullrev, hex |
|
10 | from node import nullrev, nullid, hex, short | |
10 | import mdiff, util, dagutil |
|
11 | import mdiff, util, dagutil | |
11 | import struct, os, bz2, zlib, tempfile |
|
12 | import struct, os, bz2, zlib, tempfile | |
|
13 | import discovery, error, phases, branchmap | |||
12 |
|
14 | |||
13 | _BUNDLE10_DELTA_HEADER = "20s20s20s20s" |
|
15 | _BUNDLE10_DELTA_HEADER = "20s20s20s20s" | |
14 |
|
16 | |||
@@ -57,7 +59,7 b' bundletypes = {' | |||||
57 | # hgweb uses this list to communicate its preferred type |
|
59 | # hgweb uses this list to communicate its preferred type | |
58 | bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] |
|
60 | bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] | |
59 |
|
61 | |||
60 | def writebundle(cg, filename, bundletype): |
|
62 | def writebundle(cg, filename, bundletype, vfs=None): | |
61 | """Write a bundle file and return its filename. |
|
63 | """Write a bundle file and return its filename. | |
62 |
|
64 | |||
63 | Existing files will not be overwritten. |
|
65 | Existing files will not be overwritten. | |
@@ -70,7 +72,10 b' def writebundle(cg, filename, bundletype' | |||||
70 | cleanup = None |
|
72 | cleanup = None | |
71 | try: |
|
73 | try: | |
72 | if filename: |
|
74 | if filename: | |
73 | fh = open(filename, "wb") |
|
75 | if vfs: | |
|
76 | fh = vfs.open(filename, "wb") | |||
|
77 | else: | |||
|
78 | fh = open(filename, "wb") | |||
74 | else: |
|
79 | else: | |
75 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
80 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") | |
76 | fh = os.fdopen(fd, "wb") |
|
81 | fh = os.fdopen(fd, "wb") | |
@@ -86,23 +91,8 b' def writebundle(cg, filename, bundletype' | |||||
86 | # an empty chunkgroup is the end of the changegroup |
|
91 | # an empty chunkgroup is the end of the changegroup | |
87 | # a changegroup has at least 2 chunkgroups (changelog and manifest). |
|
92 | # a changegroup has at least 2 chunkgroups (changelog and manifest). | |
88 | # after that, an empty chunkgroup is the end of the changegroup |
|
93 | # after that, an empty chunkgroup is the end of the changegroup | |
89 | empty = False |
|
94 | for chunk in cg.getchunks(): | |
90 | count = 0 |
|
95 | fh.write(z.compress(chunk)) | |
91 | while not empty or count <= 2: |
|
|||
92 | empty = True |
|
|||
93 | count += 1 |
|
|||
94 | while True: |
|
|||
95 | chunk = getchunk(cg) |
|
|||
96 | if not chunk: |
|
|||
97 | break |
|
|||
98 | empty = False |
|
|||
99 | fh.write(z.compress(chunkheader(len(chunk)))) |
|
|||
100 | pos = 0 |
|
|||
101 | while pos < len(chunk): |
|
|||
102 | next = pos + 2**20 |
|
|||
103 | fh.write(z.compress(chunk[pos:next])) |
|
|||
104 | pos = next |
|
|||
105 | fh.write(z.compress(closechunk())) |
|
|||
106 | fh.write(z.flush()) |
|
96 | fh.write(z.flush()) | |
107 | cleanup = None |
|
97 | cleanup = None | |
108 | return filename |
|
98 | return filename | |
@@ -110,7 +100,10 b' def writebundle(cg, filename, bundletype' | |||||
110 | if fh is not None: |
|
100 | if fh is not None: | |
111 | fh.close() |
|
101 | fh.close() | |
112 | if cleanup is not None: |
|
102 | if cleanup is not None: | |
113 | os.unlink(cleanup) |
|
103 | if filename and vfs: | |
|
104 | vfs.unlink(cleanup) | |||
|
105 | else: | |||
|
106 | os.unlink(cleanup) | |||
114 |
|
107 | |||
115 | def decompressor(fh, alg): |
|
108 | def decompressor(fh, alg): | |
116 | if alg == 'UN': |
|
109 | if alg == 'UN': | |
@@ -173,7 +166,7 b' class unbundle10(object):' | |||||
173 | if not l: |
|
166 | if not l: | |
174 | return {} |
|
167 | return {} | |
175 | fname = readexactly(self._stream, l) |
|
168 | fname = readexactly(self._stream, l) | |
176 |
return |
|
169 | return {'filename': fname} | |
177 |
|
170 | |||
178 | def _deltaheader(self, headertuple, prevnode): |
|
171 | def _deltaheader(self, headertuple, prevnode): | |
179 | node, p1, p2, cs = headertuple |
|
172 | node, p1, p2, cs = headertuple | |
@@ -191,8 +184,36 b' class unbundle10(object):' | |||||
191 | header = struct.unpack(self.deltaheader, headerdata) |
|
184 | header = struct.unpack(self.deltaheader, headerdata) | |
192 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
185 | delta = readexactly(self._stream, l - self.deltaheadersize) | |
193 | node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode) |
|
186 | node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode) | |
194 |
return |
|
187 | return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs, | |
195 |
|
|
188 | 'deltabase': deltabase, 'delta': delta} | |
|
189 | ||||
|
190 | def getchunks(self): | |||
|
191 | """returns all the chunks contains in the bundle | |||
|
192 | ||||
|
193 | Used when you need to forward the binary stream to a file or another | |||
|
194 | network API. To do so, it parse the changegroup data, otherwise it will | |||
|
195 | block in case of sshrepo because it don't know the end of the stream. | |||
|
196 | """ | |||
|
197 | # an empty chunkgroup is the end of the changegroup | |||
|
198 | # a changegroup has at least 2 chunkgroups (changelog and manifest). | |||
|
199 | # after that, an empty chunkgroup is the end of the changegroup | |||
|
200 | empty = False | |||
|
201 | count = 0 | |||
|
202 | while not empty or count <= 2: | |||
|
203 | empty = True | |||
|
204 | count += 1 | |||
|
205 | while True: | |||
|
206 | chunk = getchunk(self) | |||
|
207 | if not chunk: | |||
|
208 | break | |||
|
209 | empty = False | |||
|
210 | yield chunkheader(len(chunk)) | |||
|
211 | pos = 0 | |||
|
212 | while pos < len(chunk): | |||
|
213 | next = pos + 2**20 | |||
|
214 | yield chunk[pos:next] | |||
|
215 | pos = next | |||
|
216 | yield closechunk() | |||
196 |
|
217 | |||
197 | class headerlessfixup(object): |
|
218 | class headerlessfixup(object): | |
198 | def __init__(self, fh, h): |
|
219 | def __init__(self, fh, h): | |
@@ -206,23 +227,6 b' class headerlessfixup(object):' | |||||
206 | return d |
|
227 | return d | |
207 | return readexactly(self._fh, n) |
|
228 | return readexactly(self._fh, n) | |
208 |
|
229 | |||
209 | def readbundle(fh, fname): |
|
|||
210 | header = readexactly(fh, 6) |
|
|||
211 |
|
||||
212 | if not fname: |
|
|||
213 | fname = "stream" |
|
|||
214 | if not header.startswith('HG') and header.startswith('\0'): |
|
|||
215 | fh = headerlessfixup(fh, header) |
|
|||
216 | header = "HG10UN" |
|
|||
217 |
|
||||
218 | magic, version, alg = header[0:2], header[2:4], header[4:6] |
|
|||
219 |
|
||||
220 | if magic != 'HG': |
|
|||
221 | raise util.Abort(_('%s: not a Mercurial bundle') % fname) |
|
|||
222 | if version != '10': |
|
|||
223 | raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) |
|
|||
224 | return unbundle10(fh, alg) |
|
|||
225 |
|
||||
226 | class bundle10(object): |
|
230 | class bundle10(object): | |
227 | deltaheader = _BUNDLE10_DELTA_HEADER |
|
231 | deltaheader = _BUNDLE10_DELTA_HEADER | |
228 | def __init__(self, repo, bundlecaps=None): |
|
232 | def __init__(self, repo, bundlecaps=None): | |
@@ -428,3 +432,310 b' class bundle10(object):' | |||||
428 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode): |
|
432 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode): | |
429 | # do nothing with basenode, it is implicitly the previous one in HG10 |
|
433 | # do nothing with basenode, it is implicitly the previous one in HG10 | |
430 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) |
|
434 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) | |
|
435 | ||||
|
436 | def _changegroupinfo(repo, nodes, source): | |||
|
437 | if repo.ui.verbose or source == 'bundle': | |||
|
438 | repo.ui.status(_("%d changesets found\n") % len(nodes)) | |||
|
439 | if repo.ui.debugflag: | |||
|
440 | repo.ui.debug("list of changesets:\n") | |||
|
441 | for node in nodes: | |||
|
442 | repo.ui.debug("%s\n" % hex(node)) | |||
|
443 | ||||
|
444 | def getsubset(repo, outgoing, bundler, source, fastpath=False): | |||
|
445 | repo = repo.unfiltered() | |||
|
446 | commonrevs = outgoing.common | |||
|
447 | csets = outgoing.missing | |||
|
448 | heads = outgoing.missingheads | |||
|
449 | # We go through the fast path if we get told to, or if all (unfiltered | |||
|
450 | # heads have been requested (since we then know there all linkrevs will | |||
|
451 | # be pulled by the client). | |||
|
452 | heads.sort() | |||
|
453 | fastpathlinkrev = fastpath or ( | |||
|
454 | repo.filtername is None and heads == sorted(repo.heads())) | |||
|
455 | ||||
|
456 | repo.hook('preoutgoing', throw=True, source=source) | |||
|
457 | _changegroupinfo(repo, csets, source) | |||
|
458 | gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source) | |||
|
459 | return unbundle10(util.chunkbuffer(gengroup), 'UN') | |||
|
460 | ||||
|
461 | def changegroupsubset(repo, roots, heads, source): | |||
|
462 | """Compute a changegroup consisting of all the nodes that are | |||
|
463 | descendants of any of the roots and ancestors of any of the heads. | |||
|
464 | Return a chunkbuffer object whose read() method will return | |||
|
465 | successive changegroup chunks. | |||
|
466 | ||||
|
467 | It is fairly complex as determining which filenodes and which | |||
|
468 | manifest nodes need to be included for the changeset to be complete | |||
|
469 | is non-trivial. | |||
|
470 | ||||
|
471 | Another wrinkle is doing the reverse, figuring out which changeset in | |||
|
472 | the changegroup a particular filenode or manifestnode belongs to. | |||
|
473 | """ | |||
|
474 | cl = repo.changelog | |||
|
475 | if not roots: | |||
|
476 | roots = [nullid] | |||
|
477 | # TODO: remove call to nodesbetween. | |||
|
478 | csets, roots, heads = cl.nodesbetween(roots, heads) | |||
|
479 | discbases = [] | |||
|
480 | for n in roots: | |||
|
481 | discbases.extend([p for p in cl.parents(n) if p != nullid]) | |||
|
482 | outgoing = discovery.outgoing(cl, discbases, heads) | |||
|
483 | bundler = bundle10(repo) | |||
|
484 | return getsubset(repo, outgoing, bundler, source) | |||
|
485 | ||||
|
486 | def getlocalbundle(repo, source, outgoing, bundlecaps=None): | |||
|
487 | """Like getbundle, but taking a discovery.outgoing as an argument. | |||
|
488 | ||||
|
489 | This is only implemented for local repos and reuses potentially | |||
|
490 | precomputed sets in outgoing.""" | |||
|
491 | if not outgoing.missing: | |||
|
492 | return None | |||
|
493 | bundler = bundle10(repo, bundlecaps) | |||
|
494 | return getsubset(repo, outgoing, bundler, source) | |||
|
495 | ||||
|
496 | def getbundle(repo, source, heads=None, common=None, bundlecaps=None): | |||
|
497 | """Like changegroupsubset, but returns the set difference between the | |||
|
498 | ancestors of heads and the ancestors common. | |||
|
499 | ||||
|
500 | If heads is None, use the local heads. If common is None, use [nullid]. | |||
|
501 | ||||
|
502 | The nodes in common might not all be known locally due to the way the | |||
|
503 | current discovery protocol works. | |||
|
504 | """ | |||
|
505 | cl = repo.changelog | |||
|
506 | if common: | |||
|
507 | hasnode = cl.hasnode | |||
|
508 | common = [n for n in common if hasnode(n)] | |||
|
509 | else: | |||
|
510 | common = [nullid] | |||
|
511 | if not heads: | |||
|
512 | heads = cl.heads() | |||
|
513 | outgoing = discovery.outgoing(cl, common, heads) | |||
|
514 | return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps) | |||
|
515 | ||||
|
516 | def changegroup(repo, basenodes, source): | |||
|
517 | # to avoid a race we use changegroupsubset() (issue1320) | |||
|
518 | return changegroupsubset(repo, basenodes, repo.heads(), source) | |||
|
519 | ||||
|
520 | def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles): | |||
|
521 | revisions = 0 | |||
|
522 | files = 0 | |||
|
523 | while True: | |||
|
524 | chunkdata = source.filelogheader() | |||
|
525 | if not chunkdata: | |||
|
526 | break | |||
|
527 | f = chunkdata["filename"] | |||
|
528 | repo.ui.debug("adding %s revisions\n" % f) | |||
|
529 | pr() | |||
|
530 | fl = repo.file(f) | |||
|
531 | o = len(fl) | |||
|
532 | if not fl.addgroup(source, revmap, trp): | |||
|
533 | raise util.Abort(_("received file revlog group is empty")) | |||
|
534 | revisions += len(fl) - o | |||
|
535 | files += 1 | |||
|
536 | if f in needfiles: | |||
|
537 | needs = needfiles[f] | |||
|
538 | for new in xrange(o, len(fl)): | |||
|
539 | n = fl.node(new) | |||
|
540 | if n in needs: | |||
|
541 | needs.remove(n) | |||
|
542 | else: | |||
|
543 | raise util.Abort( | |||
|
544 | _("received spurious file revlog entry")) | |||
|
545 | if not needs: | |||
|
546 | del needfiles[f] | |||
|
547 | repo.ui.progress(_('files'), None) | |||
|
548 | ||||
|
549 | for f, needs in needfiles.iteritems(): | |||
|
550 | fl = repo.file(f) | |||
|
551 | for n in needs: | |||
|
552 | try: | |||
|
553 | fl.rev(n) | |||
|
554 | except error.LookupError: | |||
|
555 | raise util.Abort( | |||
|
556 | _('missing file data for %s:%s - run hg verify') % | |||
|
557 | (f, hex(n))) | |||
|
558 | ||||
|
559 | return revisions, files | |||
|
560 | ||||
|
561 | def addchangegroup(repo, source, srctype, url, emptyok=False): | |||
|
562 | """Add the changegroup returned by source.read() to this repo. | |||
|
563 | srctype is a string like 'push', 'pull', or 'unbundle'. url is | |||
|
564 | the URL of the repo where this changegroup is coming from. | |||
|
565 | ||||
|
566 | Return an integer summarizing the change to this repo: | |||
|
567 | - nothing changed or no source: 0 | |||
|
568 | - more heads than before: 1+added heads (2..n) | |||
|
569 | - fewer heads than before: -1-removed heads (-2..-n) | |||
|
570 | - number of heads stays the same: 1 | |||
|
571 | """ | |||
|
572 | repo = repo.unfiltered() | |||
|
573 | def csmap(x): | |||
|
574 | repo.ui.debug("add changeset %s\n" % short(x)) | |||
|
575 | return len(cl) | |||
|
576 | ||||
|
577 | def revmap(x): | |||
|
578 | return cl.rev(x) | |||
|
579 | ||||
|
580 | if not source: | |||
|
581 | return 0 | |||
|
582 | ||||
|
583 | repo.hook('prechangegroup', throw=True, source=srctype, url=url) | |||
|
584 | ||||
|
585 | changesets = files = revisions = 0 | |||
|
586 | efiles = set() | |||
|
587 | ||||
|
588 | # write changelog data to temp files so concurrent readers will not see | |||
|
589 | # inconsistent view | |||
|
590 | cl = repo.changelog | |||
|
591 | cl.delayupdate() | |||
|
592 | oldheads = cl.heads() | |||
|
593 | ||||
|
594 | tr = repo.transaction("\n".join([srctype, util.hidepassword(url)])) | |||
|
595 | try: | |||
|
596 | trp = weakref.proxy(tr) | |||
|
597 | # pull off the changeset group | |||
|
598 | repo.ui.status(_("adding changesets\n")) | |||
|
599 | clstart = len(cl) | |||
|
600 | class prog(object): | |||
|
601 | step = _('changesets') | |||
|
602 | count = 1 | |||
|
603 | ui = repo.ui | |||
|
604 | total = None | |||
|
605 | def __call__(repo): | |||
|
606 | repo.ui.progress(repo.step, repo.count, unit=_('chunks'), | |||
|
607 | total=repo.total) | |||
|
608 | repo.count += 1 | |||
|
609 | pr = prog() | |||
|
610 | source.callback = pr | |||
|
611 | ||||
|
612 | source.changelogheader() | |||
|
613 | srccontent = cl.addgroup(source, csmap, trp) | |||
|
614 | if not (srccontent or emptyok): | |||
|
615 | raise util.Abort(_("received changelog group is empty")) | |||
|
616 | clend = len(cl) | |||
|
617 | changesets = clend - clstart | |||
|
618 | for c in xrange(clstart, clend): | |||
|
619 | efiles.update(repo[c].files()) | |||
|
620 | efiles = len(efiles) | |||
|
621 | repo.ui.progress(_('changesets'), None) | |||
|
622 | ||||
|
623 | # pull off the manifest group | |||
|
624 | repo.ui.status(_("adding manifests\n")) | |||
|
625 | pr.step = _('manifests') | |||
|
626 | pr.count = 1 | |||
|
627 | pr.total = changesets # manifests <= changesets | |||
|
628 | # no need to check for empty manifest group here: | |||
|
629 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |||
|
630 | # no new manifest will be created and the manifest group will | |||
|
631 | # be empty during the pull | |||
|
632 | source.manifestheader() | |||
|
633 | repo.manifest.addgroup(source, revmap, trp) | |||
|
634 | repo.ui.progress(_('manifests'), None) | |||
|
635 | ||||
|
636 | needfiles = {} | |||
|
637 | if repo.ui.configbool('server', 'validate', default=False): | |||
|
638 | # validate incoming csets have their manifests | |||
|
639 | for cset in xrange(clstart, clend): | |||
|
640 | mfest = repo.changelog.read(repo.changelog.node(cset))[0] | |||
|
641 | mfest = repo.manifest.readdelta(mfest) | |||
|
642 | # store file nodes we must see | |||
|
643 | for f, n in mfest.iteritems(): | |||
|
644 | needfiles.setdefault(f, set()).add(n) | |||
|
645 | ||||
|
646 | # process the files | |||
|
647 | repo.ui.status(_("adding file changes\n")) | |||
|
648 | pr.step = _('files') | |||
|
649 | pr.count = 1 | |||
|
650 | pr.total = efiles | |||
|
651 | source.callback = None | |||
|
652 | ||||
|
653 | newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr, | |||
|
654 | needfiles) | |||
|
655 | revisions += newrevs | |||
|
656 | files += newfiles | |||
|
657 | ||||
|
658 | dh = 0 | |||
|
659 | if oldheads: | |||
|
660 | heads = cl.heads() | |||
|
661 | dh = len(heads) - len(oldheads) | |||
|
662 | for h in heads: | |||
|
663 | if h not in oldheads and repo[h].closesbranch(): | |||
|
664 | dh -= 1 | |||
|
665 | htext = "" | |||
|
666 | if dh: | |||
|
667 | htext = _(" (%+d heads)") % dh | |||
|
668 | ||||
|
669 | repo.ui.status(_("added %d changesets" | |||
|
670 | " with %d changes to %d files%s\n") | |||
|
671 | % (changesets, revisions, files, htext)) | |||
|
672 | repo.invalidatevolatilesets() | |||
|
673 | ||||
|
674 | if changesets > 0: | |||
|
675 | p = lambda: cl.writepending() and repo.root or "" | |||
|
676 | if 'node' not in tr.hookargs: | |||
|
677 | tr.hookargs['node'] = hex(cl.node(clstart)) | |||
|
678 | repo.hook('pretxnchangegroup', throw=True, source=srctype, | |||
|
679 | url=url, pending=p, **tr.hookargs) | |||
|
680 | ||||
|
681 | added = [cl.node(r) for r in xrange(clstart, clend)] | |||
|
682 | publishing = repo.ui.configbool('phases', 'publish', True) | |||
|
683 | if srctype in ('push', 'serve'): | |||
|
684 | # Old servers can not push the boundary themselves. | |||
|
685 | # New servers won't push the boundary if changeset already | |||
|
686 | # exists locally as secret | |||
|
687 | # | |||
|
688 | # We should not use added here but the list of all change in | |||
|
689 | # the bundle | |||
|
690 | if publishing: | |||
|
691 | phases.advanceboundary(repo, phases.public, srccontent) | |||
|
692 | else: | |||
|
693 | phases.advanceboundary(repo, phases.draft, srccontent) | |||
|
694 | phases.retractboundary(repo, phases.draft, added) | |||
|
695 | elif srctype != 'strip': | |||
|
696 | # publishing only alter behavior during push | |||
|
697 | # | |||
|
698 | # strip should not touch boundary at all | |||
|
699 | phases.retractboundary(repo, phases.draft, added) | |||
|
700 | ||||
|
701 | # make changelog see real files again | |||
|
702 | cl.finalize(trp) | |||
|
703 | ||||
|
704 | tr.close() | |||
|
705 | ||||
|
706 | if changesets > 0: | |||
|
707 | if srctype != 'strip': | |||
|
708 | # During strip, branchcache is invalid but coming call to | |||
|
709 | # `destroyed` will repair it. | |||
|
710 | # In other case we can safely update cache on disk. | |||
|
711 | branchmap.updatecache(repo.filtered('served')) | |||
|
712 | def runhooks(): | |||
|
713 | # These hooks run when the lock releases, not when the | |||
|
714 | # transaction closes. So it's possible for the changelog | |||
|
715 | # to have changed since we last saw it. | |||
|
716 | if clstart >= len(repo): | |||
|
717 | return | |||
|
718 | ||||
|
719 | # forcefully update the on-disk branch cache | |||
|
720 | repo.ui.debug("updating the branch cache\n") | |||
|
721 | repo.hook("changegroup", source=srctype, url=url, | |||
|
722 | **tr.hookargs) | |||
|
723 | ||||
|
724 | for n in added: | |||
|
725 | repo.hook("incoming", node=hex(n), source=srctype, | |||
|
726 | url=url) | |||
|
727 | ||||
|
728 | newheads = [h for h in repo.heads() if h not in oldheads] | |||
|
729 | repo.ui.log("incoming", | |||
|
730 | "%s incoming changes - new heads: %s\n", | |||
|
731 | len(added), | |||
|
732 | ', '.join([hex(c[:6]) for c in newheads])) | |||
|
733 | repo._afterlock(runhooks) | |||
|
734 | ||||
|
735 | finally: | |||
|
736 | tr.release() | |||
|
737 | # never return 0 here: | |||
|
738 | if dh < 0: | |||
|
739 | return dh - 1 | |||
|
740 | else: | |||
|
741 | return dh + 1 |
This diff has been collapsed as it changes many lines, (553 lines changed) Show them Hide them | |||||
@@ -10,7 +10,7 b' from i18n import _' | |||||
10 | import os, sys, errno, re, tempfile |
|
10 | import os, sys, errno, re, tempfile | |
11 | import util, scmutil, templater, patch, error, templatekw, revlog, copies |
|
11 | import util, scmutil, templater, patch, error, templatekw, revlog, copies | |
12 | import match as matchmod |
|
12 | import match as matchmod | |
13 |
import |
|
13 | import context, repair, graphmod, revset, phases, obsolete, pathutil | |
14 | import changelog |
|
14 | import changelog | |
15 | import bookmarks |
|
15 | import bookmarks | |
16 | import lock as lockmod |
|
16 | import lock as lockmod | |
@@ -223,7 +223,7 b' def openrevlog(repo, cmd, file_, opts):' | |||||
223 | r = None |
|
223 | r = None | |
224 | if repo: |
|
224 | if repo: | |
225 | if cl: |
|
225 | if cl: | |
226 | r = repo.changelog |
|
226 | r = repo.unfiltered().changelog | |
227 | elif mf: |
|
227 | elif mf: | |
228 | r = repo.manifest |
|
228 | r = repo.manifest | |
229 | elif file_: |
|
229 | elif file_: | |
@@ -542,6 +542,131 b' def service(opts, parentfn=None, initfn=' | |||||
542 | if runfn: |
|
542 | if runfn: | |
543 | return runfn() |
|
543 | return runfn() | |
544 |
|
544 | |||
|
545 | def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc): | |||
|
546 | """Utility function used by commands.import to import a single patch | |||
|
547 | ||||
|
548 | This function is explicitly defined here to help the evolve extension to | |||
|
549 | wrap this part of the import logic. | |||
|
550 | ||||
|
551 | The API is currently a bit ugly because it a simple code translation from | |||
|
552 | the import command. Feel free to make it better. | |||
|
553 | ||||
|
554 | :hunk: a patch (as a binary string) | |||
|
555 | :parents: nodes that will be parent of the created commit | |||
|
556 | :opts: the full dict of option passed to the import command | |||
|
557 | :msgs: list to save commit message to. | |||
|
558 | (used in case we need to save it when failing) | |||
|
559 | :updatefunc: a function that update a repo to a given node | |||
|
560 | updatefunc(<repo>, <node>) | |||
|
561 | """ | |||
|
562 | tmpname, message, user, date, branch, nodeid, p1, p2 = \ | |||
|
563 | patch.extract(ui, hunk) | |||
|
564 | ||||
|
565 | editor = commiteditor | |||
|
566 | if opts.get('edit'): | |||
|
567 | editor = commitforceeditor | |||
|
568 | update = not opts.get('bypass') | |||
|
569 | strip = opts["strip"] | |||
|
570 | sim = float(opts.get('similarity') or 0) | |||
|
571 | if not tmpname: | |||
|
572 | return (None, None) | |||
|
573 | msg = _('applied to working directory') | |||
|
574 | ||||
|
575 | try: | |||
|
576 | cmdline_message = logmessage(ui, opts) | |||
|
577 | if cmdline_message: | |||
|
578 | # pickup the cmdline msg | |||
|
579 | message = cmdline_message | |||
|
580 | elif message: | |||
|
581 | # pickup the patch msg | |||
|
582 | message = message.strip() | |||
|
583 | else: | |||
|
584 | # launch the editor | |||
|
585 | message = None | |||
|
586 | ui.debug('message:\n%s\n' % message) | |||
|
587 | ||||
|
588 | if len(parents) == 1: | |||
|
589 | parents.append(repo[nullid]) | |||
|
590 | if opts.get('exact'): | |||
|
591 | if not nodeid or not p1: | |||
|
592 | raise util.Abort(_('not a Mercurial patch')) | |||
|
593 | p1 = repo[p1] | |||
|
594 | p2 = repo[p2 or nullid] | |||
|
595 | elif p2: | |||
|
596 | try: | |||
|
597 | p1 = repo[p1] | |||
|
598 | p2 = repo[p2] | |||
|
599 | # Without any options, consider p2 only if the | |||
|
600 | # patch is being applied on top of the recorded | |||
|
601 | # first parent. | |||
|
602 | if p1 != parents[0]: | |||
|
603 | p1 = parents[0] | |||
|
604 | p2 = repo[nullid] | |||
|
605 | except error.RepoError: | |||
|
606 | p1, p2 = parents | |||
|
607 | else: | |||
|
608 | p1, p2 = parents | |||
|
609 | ||||
|
610 | n = None | |||
|
611 | if update: | |||
|
612 | if p1 != parents[0]: | |||
|
613 | updatefunc(repo, p1.node()) | |||
|
614 | if p2 != parents[1]: | |||
|
615 | repo.setparents(p1.node(), p2.node()) | |||
|
616 | ||||
|
617 | if opts.get('exact') or opts.get('import_branch'): | |||
|
618 | repo.dirstate.setbranch(branch or 'default') | |||
|
619 | ||||
|
620 | files = set() | |||
|
621 | patch.patch(ui, repo, tmpname, strip=strip, files=files, | |||
|
622 | eolmode=None, similarity=sim / 100.0) | |||
|
623 | files = list(files) | |||
|
624 | if opts.get('no_commit'): | |||
|
625 | if message: | |||
|
626 | msgs.append(message) | |||
|
627 | else: | |||
|
628 | if opts.get('exact') or p2: | |||
|
629 | # If you got here, you either use --force and know what | |||
|
630 | # you are doing or used --exact or a merge patch while | |||
|
631 | # being updated to its first parent. | |||
|
632 | m = None | |||
|
633 | else: | |||
|
634 | m = scmutil.matchfiles(repo, files or []) | |||
|
635 | n = repo.commit(message, opts.get('user') or user, | |||
|
636 | opts.get('date') or date, match=m, | |||
|
637 | editor=editor) | |||
|
638 | else: | |||
|
639 | if opts.get('exact') or opts.get('import_branch'): | |||
|
640 | branch = branch or 'default' | |||
|
641 | else: | |||
|
642 | branch = p1.branch() | |||
|
643 | store = patch.filestore() | |||
|
644 | try: | |||
|
645 | files = set() | |||
|
646 | try: | |||
|
647 | patch.patchrepo(ui, repo, p1, store, tmpname, strip, | |||
|
648 | files, eolmode=None) | |||
|
649 | except patch.PatchError, e: | |||
|
650 | raise util.Abort(str(e)) | |||
|
651 | memctx = context.makememctx(repo, (p1.node(), p2.node()), | |||
|
652 | message, | |||
|
653 | opts.get('user') or user, | |||
|
654 | opts.get('date') or date, | |||
|
655 | branch, files, store, | |||
|
656 | editor=commiteditor) | |||
|
657 | repo.savecommitmessage(memctx.description()) | |||
|
658 | n = memctx.commit() | |||
|
659 | finally: | |||
|
660 | store.close() | |||
|
661 | if opts.get('exact') and hex(n) != nodeid: | |||
|
662 | raise util.Abort(_('patch is damaged or loses information')) | |||
|
663 | if n: | |||
|
664 | # i18n: refers to a short changeset id | |||
|
665 | msg = _('created %s') % short(n) | |||
|
666 | return (msg, n) | |||
|
667 | finally: | |||
|
668 | os.unlink(tmpname) | |||
|
669 | ||||
545 | def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, |
|
670 | def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, | |
546 | opts=None): |
|
671 | opts=None): | |
547 | '''export changesets as hg patches.''' |
|
672 | '''export changesets as hg patches.''' | |
@@ -629,7 +754,7 b' def diffordiffstat(ui, repo, diffopts, n' | |||||
629 | if listsubrepos: |
|
754 | if listsubrepos: | |
630 | ctx1 = repo[node1] |
|
755 | ctx1 = repo[node1] | |
631 | ctx2 = repo[node2] |
|
756 | ctx2 = repo[node2] | |
632 |
for subpath, sub in s |
|
757 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): | |
633 | tempnode2 = node2 |
|
758 | tempnode2 = node2 | |
634 | try: |
|
759 | try: | |
635 | if node2 is not None: |
|
760 | if node2 is not None: | |
@@ -823,7 +948,7 b' class changeset_printer(object):' | |||||
823 | class changeset_templater(changeset_printer): |
|
948 | class changeset_templater(changeset_printer): | |
824 | '''format changeset information.''' |
|
949 | '''format changeset information.''' | |
825 |
|
950 | |||
826 | def __init__(self, ui, repo, patch, diffopts, mapfile, buffered): |
|
951 | def __init__(self, ui, repo, patch, diffopts, tmpl, mapfile, buffered): | |
827 | changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) |
|
952 | changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) | |
828 | formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) |
|
953 | formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) | |
829 | defaulttempl = { |
|
954 | defaulttempl = { | |
@@ -836,11 +961,10 b' class changeset_templater(changeset_prin' | |||||
836 | defaulttempl['filecopy'] = defaulttempl['file_copy'] |
|
961 | defaulttempl['filecopy'] = defaulttempl['file_copy'] | |
837 | self.t = templater.templater(mapfile, {'formatnode': formatnode}, |
|
962 | self.t = templater.templater(mapfile, {'formatnode': formatnode}, | |
838 | cache=defaulttempl) |
|
963 | cache=defaulttempl) | |
839 | self.cache = {} |
|
964 | if tmpl: | |
|
965 | self.t.cache['changeset'] = tmpl | |||
840 |
|
966 | |||
841 | def use_template(self, t): |
|
967 | self.cache = {} | |
842 | '''set template string to use''' |
|
|||
843 | self.t.cache['changeset'] = t |
|
|||
844 |
|
968 | |||
845 | def _meaningful_parentrevs(self, ctx): |
|
969 | def _meaningful_parentrevs(self, ctx): | |
846 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
970 | """Return list of meaningful (or all if debug) parentrevs for rev. | |
@@ -922,6 +1046,66 b' class changeset_templater(changeset_prin' | |||||
922 | except SyntaxError, inst: |
|
1046 | except SyntaxError, inst: | |
923 | raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) |
|
1047 | raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) | |
924 |
|
1048 | |||
|
1049 | def gettemplate(ui, tmpl, style): | |||
|
1050 | """ | |||
|
1051 | Find the template matching the given template spec or style. | |||
|
1052 | """ | |||
|
1053 | ||||
|
1054 | # ui settings | |||
|
1055 | if not tmpl and not style: | |||
|
1056 | tmpl = ui.config('ui', 'logtemplate') | |||
|
1057 | if tmpl: | |||
|
1058 | try: | |||
|
1059 | tmpl = templater.parsestring(tmpl) | |||
|
1060 | except SyntaxError: | |||
|
1061 | tmpl = templater.parsestring(tmpl, quoted=False) | |||
|
1062 | return tmpl, None | |||
|
1063 | else: | |||
|
1064 | style = util.expandpath(ui.config('ui', 'style', '')) | |||
|
1065 | ||||
|
1066 | if style: | |||
|
1067 | mapfile = style | |||
|
1068 | if not os.path.split(mapfile)[0]: | |||
|
1069 | mapname = (templater.templatepath('map-cmdline.' + mapfile) | |||
|
1070 | or templater.templatepath(mapfile)) | |||
|
1071 | if mapname: | |||
|
1072 | mapfile = mapname | |||
|
1073 | return None, mapfile | |||
|
1074 | ||||
|
1075 | if not tmpl: | |||
|
1076 | return None, None | |||
|
1077 | ||||
|
1078 | # looks like a literal template? | |||
|
1079 | if '{' in tmpl: | |||
|
1080 | return tmpl, None | |||
|
1081 | ||||
|
1082 | # perhaps a stock style? | |||
|
1083 | if not os.path.split(tmpl)[0]: | |||
|
1084 | mapname = (templater.templatepath('map-cmdline.' + tmpl) | |||
|
1085 | or templater.templatepath(tmpl)) | |||
|
1086 | if mapname and os.path.isfile(mapname): | |||
|
1087 | return None, mapname | |||
|
1088 | ||||
|
1089 | # perhaps it's a reference to [templates] | |||
|
1090 | t = ui.config('templates', tmpl) | |||
|
1091 | if t: | |||
|
1092 | try: | |||
|
1093 | tmpl = templater.parsestring(t) | |||
|
1094 | except SyntaxError: | |||
|
1095 | tmpl = templater.parsestring(t, quoted=False) | |||
|
1096 | return tmpl, None | |||
|
1097 | ||||
|
1098 | # perhaps it's a path to a map or a template | |||
|
1099 | if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl): | |||
|
1100 | # is it a mapfile for a style? | |||
|
1101 | if os.path.basename(tmpl).startswith("map-"): | |||
|
1102 | return None, os.path.realpath(tmpl) | |||
|
1103 | tmpl = open(tmpl).read() | |||
|
1104 | return tmpl, None | |||
|
1105 | ||||
|
1106 | # constant string? | |||
|
1107 | return tmpl, None | |||
|
1108 | ||||
925 | def show_changeset(ui, repo, opts, buffered=False): |
|
1109 | def show_changeset(ui, repo, opts, buffered=False): | |
926 | """show one changeset using template or regular display. |
|
1110 | """show one changeset using template or regular display. | |
927 |
|
1111 | |||
@@ -938,42 +1122,30 b' def show_changeset(ui, repo, opts, buffe' | |||||
938 | if opts.get('patch') or opts.get('stat'): |
|
1122 | if opts.get('patch') or opts.get('stat'): | |
939 | patch = scmutil.matchall(repo) |
|
1123 | patch = scmutil.matchall(repo) | |
940 |
|
1124 | |||
941 | tmpl = opts.get('template') |
|
1125 | tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style')) | |
942 | style = None |
|
|||
943 | if not tmpl: |
|
|||
944 | style = opts.get('style') |
|
|||
945 |
|
1126 | |||
946 | # ui settings |
|
1127 | if not tmpl and not mapfile: | |
947 | if not (tmpl or style): |
|
|||
948 | tmpl = ui.config('ui', 'logtemplate') |
|
|||
949 | if tmpl: |
|
|||
950 | try: |
|
|||
951 | tmpl = templater.parsestring(tmpl) |
|
|||
952 | except SyntaxError: |
|
|||
953 | tmpl = templater.parsestring(tmpl, quoted=False) |
|
|||
954 | else: |
|
|||
955 | style = util.expandpath(ui.config('ui', 'style', '')) |
|
|||
956 |
|
||||
957 | if not (tmpl or style): |
|
|||
958 | return changeset_printer(ui, repo, patch, opts, buffered) |
|
1128 | return changeset_printer(ui, repo, patch, opts, buffered) | |
959 |
|
1129 | |||
960 | mapfile = None |
|
|||
961 | if style and not tmpl: |
|
|||
962 | mapfile = style |
|
|||
963 | if not os.path.split(mapfile)[0]: |
|
|||
964 | mapname = (templater.templatepath('map-cmdline.' + mapfile) |
|
|||
965 | or templater.templatepath(mapfile)) |
|
|||
966 | if mapname: |
|
|||
967 | mapfile = mapname |
|
|||
968 |
|
||||
969 | try: |
|
1130 | try: | |
970 | t = changeset_templater(ui, repo, patch, opts, mapfile, buffered) |
|
1131 | t = changeset_templater(ui, repo, patch, opts, tmpl, mapfile, buffered) | |
971 | except SyntaxError, inst: |
|
1132 | except SyntaxError, inst: | |
972 | raise util.Abort(inst.args[0]) |
|
1133 | raise util.Abort(inst.args[0]) | |
973 | if tmpl: |
|
|||
974 | t.use_template(tmpl) |
|
|||
975 | return t |
|
1134 | return t | |
976 |
|
1135 | |||
|
1136 | def showmarker(ui, marker): | |||
|
1137 | """utility function to display obsolescence marker in a readable way | |||
|
1138 | ||||
|
1139 | To be used by debug function.""" | |||
|
1140 | ui.write(hex(marker.precnode())) | |||
|
1141 | for repl in marker.succnodes(): | |||
|
1142 | ui.write(' ') | |||
|
1143 | ui.write(hex(repl)) | |||
|
1144 | ui.write(' %X ' % marker._data[2]) | |||
|
1145 | ui.write('{%s}' % (', '.join('%r: %r' % t for t in | |||
|
1146 | sorted(marker.metadata().items())))) | |||
|
1147 | ui.write('\n') | |||
|
1148 | ||||
977 | def finddate(ui, repo, date): |
|
1149 | def finddate(ui, repo, date): | |
978 | """Find the tipmost changeset that matches the given date spec""" |
|
1150 | """Find the tipmost changeset that matches the given date spec""" | |
979 |
|
1151 | |||
@@ -995,19 +1167,11 b' def finddate(ui, repo, date):' | |||||
995 |
|
1167 | |||
996 | raise util.Abort(_("revision matching date not found")) |
|
1168 | raise util.Abort(_("revision matching date not found")) | |
997 |
|
1169 | |||
998 |
def increasingwindows( |
|
1170 | def increasingwindows(windowsize=8, sizelimit=512): | |
999 | if start < end: |
|
1171 | while True: | |
1000 | while start < end: |
|
1172 | yield windowsize | |
1001 | yield start, min(windowsize, end - start) |
|
1173 | if windowsize < sizelimit: | |
1002 |
|
|
1174 | windowsize *= 2 | |
1003 | if windowsize < sizelimit: |
|
|||
1004 | windowsize *= 2 |
|
|||
1005 | else: |
|
|||
1006 | while start > end: |
|
|||
1007 | yield start, min(windowsize, start - end - 1) |
|
|||
1008 | start -= windowsize |
|
|||
1009 | if windowsize < sizelimit: |
|
|||
1010 | windowsize *= 2 |
|
|||
1011 |
|
1175 | |||
1012 | class FileWalkError(Exception): |
|
1176 | class FileWalkError(Exception): | |
1013 | pass |
|
1177 | pass | |
@@ -1132,7 +1296,7 b' def walkchangerevs(repo, match, opts, pr' | |||||
1132 | elif follow: |
|
1296 | elif follow: | |
1133 | revs = repo.revs('reverse(:.)') |
|
1297 | revs = repo.revs('reverse(:.)') | |
1134 | else: |
|
1298 | else: | |
1135 |
revs = |
|
1299 | revs = revset.spanset(repo) | |
1136 | revs.reverse() |
|
1300 | revs.reverse() | |
1137 | if not revs: |
|
1301 | if not revs: | |
1138 | return [] |
|
1302 | return [] | |
@@ -1148,7 +1312,7 b' def walkchangerevs(repo, match, opts, pr' | |||||
1148 |
|
1312 | |||
1149 | if not slowpath and not match.files(): |
|
1313 | if not slowpath and not match.files(): | |
1150 | # No files, no patterns. Display all revs. |
|
1314 | # No files, no patterns. Display all revs. | |
1151 |
wanted = |
|
1315 | wanted = revs | |
1152 |
|
1316 | |||
1153 | if not slowpath and match.files(): |
|
1317 | if not slowpath and match.files(): | |
1154 | # We only have to read through the filelog to find wanted revisions |
|
1318 | # We only have to read through the filelog to find wanted revisions | |
@@ -1250,14 +1414,7 b' def walkchangerevs(repo, match, opts, pr' | |||||
1250 | stop = min(revs[0], revs[-1]) |
|
1414 | stop = min(revs[0], revs[-1]) | |
1251 | for x in xrange(rev, stop - 1, -1): |
|
1415 | for x in xrange(rev, stop - 1, -1): | |
1252 | if ff.match(x): |
|
1416 | if ff.match(x): | |
1253 |
wanted |
|
1417 | wanted = wanted - [x] | |
1254 |
|
||||
1255 | # Choose a small initial window if we will probably only visit a |
|
|||
1256 | # few commits. |
|
|||
1257 | limit = loglimit(opts) |
|
|||
1258 | windowsize = 8 |
|
|||
1259 | if limit: |
|
|||
1260 | windowsize = min(limit, windowsize) |
|
|||
1261 |
|
1418 | |||
1262 | # Now that wanted is correctly initialized, we can iterate over the |
|
1419 | # Now that wanted is correctly initialized, we can iterate over the | |
1263 | # revision range, yielding only revisions in wanted. |
|
1420 | # revision range, yielding only revisions in wanted. | |
@@ -1270,8 +1427,18 b' def walkchangerevs(repo, match, opts, pr' | |||||
1270 | def want(rev): |
|
1427 | def want(rev): | |
1271 | return rev in wanted |
|
1428 | return rev in wanted | |
1272 |
|
1429 | |||
1273 | for i, window in increasingwindows(0, len(revs), windowsize): |
|
1430 | it = iter(revs) | |
1274 | nrevs = [rev for rev in revs[i:i + window] if want(rev)] |
|
1431 | stopiteration = False | |
|
1432 | for windowsize in increasingwindows(): | |||
|
1433 | nrevs = [] | |||
|
1434 | for i in xrange(windowsize): | |||
|
1435 | try: | |||
|
1436 | rev = it.next() | |||
|
1437 | if want(rev): | |||
|
1438 | nrevs.append(rev) | |||
|
1439 | except (StopIteration): | |||
|
1440 | stopiteration = True | |||
|
1441 | break | |||
1275 | for rev in sorted(nrevs): |
|
1442 | for rev in sorted(nrevs): | |
1276 | fns = fncache.get(rev) |
|
1443 | fns = fncache.get(rev) | |
1277 | ctx = change(rev) |
|
1444 | ctx = change(rev) | |
@@ -1284,9 +1451,13 b' def walkchangerevs(repo, match, opts, pr' | |||||
1284 | prepare(ctx, fns) |
|
1451 | prepare(ctx, fns) | |
1285 | for rev in nrevs: |
|
1452 | for rev in nrevs: | |
1286 | yield change(rev) |
|
1453 | yield change(rev) | |
|
1454 | ||||
|
1455 | if stopiteration: | |||
|
1456 | break | |||
|
1457 | ||||
1287 | return iterate() |
|
1458 | return iterate() | |
1288 |
|
1459 | |||
1289 |
def _makeg |
|
1460 | def _makelogfilematcher(repo, pats, followfirst): | |
1290 | # When displaying a revision with --patch --follow FILE, we have |
|
1461 | # When displaying a revision with --patch --follow FILE, we have | |
1291 | # to know which file of the revision must be diffed. With |
|
1462 | # to know which file of the revision must be diffed. With | |
1292 | # --follow, we want the names of the ancestors of FILE in the |
|
1463 | # --follow, we want the names of the ancestors of FILE in the | |
@@ -1314,7 +1485,7 b' def _makegraphfilematcher(repo, pats, fo' | |||||
1314 |
|
1485 | |||
1315 | return filematcher |
|
1486 | return filematcher | |
1316 |
|
1487 | |||
1317 |
def _make |
|
1488 | def _makelogrevset(repo, pats, opts, revs): | |
1318 | """Return (expr, filematcher) where expr is a revset string built |
|
1489 | """Return (expr, filematcher) where expr is a revset string built | |
1319 | from log options and file patterns or None. If --stat or --patch |
|
1490 | from log options and file patterns or None. If --stat or --patch | |
1320 | are not passed filematcher is None. Otherwise it is a callable |
|
1491 | are not passed filematcher is None. Otherwise it is a callable | |
@@ -1344,8 +1515,12 b' def _makegraphlogrevset(repo, pats, opts' | |||||
1344 | follow = opts.get('follow') or opts.get('follow_first') |
|
1515 | follow = opts.get('follow') or opts.get('follow_first') | |
1345 | followfirst = opts.get('follow_first') and 1 or 0 |
|
1516 | followfirst = opts.get('follow_first') and 1 or 0 | |
1346 | # --follow with FILE behaviour depends on revs... |
|
1517 | # --follow with FILE behaviour depends on revs... | |
1347 | startrev = revs[0] |
|
1518 | it = iter(revs) | |
1348 | followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0 |
|
1519 | startrev = it.next() | |
|
1520 | try: | |||
|
1521 | followdescendants = startrev < it.next() | |||
|
1522 | except (StopIteration): | |||
|
1523 | followdescendants = False | |||
1349 |
|
1524 | |||
1350 | # branch and only_branch are really aliases and must be handled at |
|
1525 | # branch and only_branch are really aliases and must be handled at | |
1351 | # the same time |
|
1526 | # the same time | |
@@ -1421,7 +1596,7 b' def _makegraphlogrevset(repo, pats, opts' | |||||
1421 | filematcher = None |
|
1596 | filematcher = None | |
1422 | if opts.get('patch') or opts.get('stat'): |
|
1597 | if opts.get('patch') or opts.get('stat'): | |
1423 | if follow: |
|
1598 | if follow: | |
1424 |
filematcher = _makeg |
|
1599 | filematcher = _makelogfilematcher(repo, pats, followfirst) | |
1425 | else: |
|
1600 | else: | |
1426 | filematcher = lambda rev: match |
|
1601 | filematcher = lambda rev: match | |
1427 |
|
1602 | |||
@@ -1464,18 +1639,18 b' def getgraphlogrevs(repo, pats, opts):' | |||||
1464 | possiblyunsorted = False # whether revs might need sorting |
|
1639 | possiblyunsorted = False # whether revs might need sorting | |
1465 | if opts.get('rev'): |
|
1640 | if opts.get('rev'): | |
1466 | revs = scmutil.revrange(repo, opts['rev']) |
|
1641 | revs = scmutil.revrange(repo, opts['rev']) | |
1467 |
# Don't sort here because _make |
|
1642 | # Don't sort here because _makelogrevset might depend on the | |
1468 | # order of revs |
|
1643 | # order of revs | |
1469 | possiblyunsorted = True |
|
1644 | possiblyunsorted = True | |
1470 | else: |
|
1645 | else: | |
1471 | if follow and len(repo) > 0: |
|
1646 | if follow and len(repo) > 0: | |
1472 | revs = repo.revs('reverse(:.)') |
|
1647 | revs = repo.revs('reverse(:.)') | |
1473 | else: |
|
1648 | else: | |
1474 |
revs = |
|
1649 | revs = revset.spanset(repo) | |
1475 | revs.reverse() |
|
1650 | revs.reverse() | |
1476 | if not revs: |
|
1651 | if not revs: | |
1477 |
return |
|
1652 | return revset.baseset(), None, None | |
1478 |
expr, filematcher = _make |
|
1653 | expr, filematcher = _makelogrevset(repo, pats, opts, revs) | |
1479 | if possiblyunsorted: |
|
1654 | if possiblyunsorted: | |
1480 | revs.sort(reverse=True) |
|
1655 | revs.sort(reverse=True) | |
1481 | if expr: |
|
1656 | if expr: | |
@@ -1489,7 +1664,60 b' def getgraphlogrevs(repo, pats, opts):' | |||||
1489 | revs = matcher(repo, revs) |
|
1664 | revs = matcher(repo, revs) | |
1490 | revs.sort(reverse=True) |
|
1665 | revs.sort(reverse=True) | |
1491 | if limit is not None: |
|
1666 | if limit is not None: | |
1492 |
revs = revs |
|
1667 | limitedrevs = revset.baseset() | |
|
1668 | for idx, rev in enumerate(revs): | |||
|
1669 | if idx >= limit: | |||
|
1670 | break | |||
|
1671 | limitedrevs.append(rev) | |||
|
1672 | revs = limitedrevs | |||
|
1673 | ||||
|
1674 | return revs, expr, filematcher | |||
|
1675 | ||||
|
1676 | def getlogrevs(repo, pats, opts): | |||
|
1677 | """Return (revs, expr, filematcher) where revs is an iterable of | |||
|
1678 | revision numbers, expr is a revset string built from log options | |||
|
1679 | and file patterns or None, and used to filter 'revs'. If --stat or | |||
|
1680 | --patch are not passed filematcher is None. Otherwise it is a | |||
|
1681 | callable taking a revision number and returning a match objects | |||
|
1682 | filtering the files to be detailed when displaying the revision. | |||
|
1683 | """ | |||
|
1684 | limit = loglimit(opts) | |||
|
1685 | # Default --rev value depends on --follow but --follow behaviour | |||
|
1686 | # depends on revisions resolved from --rev... | |||
|
1687 | follow = opts.get('follow') or opts.get('follow_first') | |||
|
1688 | if opts.get('rev'): | |||
|
1689 | revs = scmutil.revrange(repo, opts['rev']) | |||
|
1690 | elif follow: | |||
|
1691 | revs = revset.baseset(repo.revs('reverse(:.)')) | |||
|
1692 | else: | |||
|
1693 | revs = revset.spanset(repo) | |||
|
1694 | revs.reverse() | |||
|
1695 | if not revs: | |||
|
1696 | return revset.baseset([]), None, None | |||
|
1697 | expr, filematcher = _makelogrevset(repo, pats, opts, revs) | |||
|
1698 | if expr: | |||
|
1699 | # Revset matchers often operate faster on revisions in changelog | |||
|
1700 | # order, because most filters deal with the changelog. | |||
|
1701 | if not opts.get('rev'): | |||
|
1702 | revs.reverse() | |||
|
1703 | matcher = revset.match(repo.ui, expr) | |||
|
1704 | # Revset matches can reorder revisions. "A or B" typically returns | |||
|
1705 | # returns the revision matching A then the revision matching B. Sort | |||
|
1706 | # again to fix that. | |||
|
1707 | revs = matcher(repo, revs) | |||
|
1708 | if not opts.get('rev'): | |||
|
1709 | revs.sort(reverse=True) | |||
|
1710 | if limit is not None: | |||
|
1711 | count = 0 | |||
|
1712 | limitedrevs = revset.baseset([]) | |||
|
1713 | it = iter(revs) | |||
|
1714 | while count < limit: | |||
|
1715 | try: | |||
|
1716 | limitedrevs.append(it.next()) | |||
|
1717 | except (StopIteration): | |||
|
1718 | break | |||
|
1719 | count += 1 | |||
|
1720 | revs = limitedrevs | |||
1493 |
|
1721 | |||
1494 | return revs, expr, filematcher |
|
1722 | return revs, expr, filematcher | |
1495 |
|
1723 | |||
@@ -1531,7 +1759,7 b' def graphlog(ui, repo, *pats, **opts):' | |||||
1531 | if opts.get('copies'): |
|
1759 | if opts.get('copies'): | |
1532 | endrev = None |
|
1760 | endrev = None | |
1533 | if opts.get('rev'): |
|
1761 | if opts.get('rev'): | |
1534 |
endrev = |
|
1762 | endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 | |
1535 | getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) |
|
1763 | getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) | |
1536 | displayer = show_changeset(ui, repo, opts, buffered=True) |
|
1764 | displayer = show_changeset(ui, repo, opts, buffered=True) | |
1537 | showparents = [ctx.node() for ctx in repo[None].parents()] |
|
1765 | showparents = [ctx.node() for ctx in repo[None].parents()] | |
@@ -1632,6 +1860,59 b' def forget(ui, repo, match, prefix, expl' | |||||
1632 | forgot.extend(forget) |
|
1860 | forgot.extend(forget) | |
1633 | return bad, forgot |
|
1861 | return bad, forgot | |
1634 |
|
1862 | |||
|
1863 | def cat(ui, repo, ctx, matcher, prefix, **opts): | |||
|
1864 | err = 1 | |||
|
1865 | ||||
|
1866 | def write(path): | |||
|
1867 | fp = makefileobj(repo, opts.get('output'), ctx.node(), | |||
|
1868 | pathname=os.path.join(prefix, path)) | |||
|
1869 | data = ctx[path].data() | |||
|
1870 | if opts.get('decode'): | |||
|
1871 | data = repo.wwritedata(path, data) | |||
|
1872 | fp.write(data) | |||
|
1873 | fp.close() | |||
|
1874 | ||||
|
1875 | # Automation often uses hg cat on single files, so special case it | |||
|
1876 | # for performance to avoid the cost of parsing the manifest. | |||
|
1877 | if len(matcher.files()) == 1 and not matcher.anypats(): | |||
|
1878 | file = matcher.files()[0] | |||
|
1879 | mf = repo.manifest | |||
|
1880 | mfnode = ctx._changeset[0] | |||
|
1881 | if mf.find(mfnode, file)[0]: | |||
|
1882 | write(file) | |||
|
1883 | return 0 | |||
|
1884 | ||||
|
1885 | # Don't warn about "missing" files that are really in subrepos | |||
|
1886 | bad = matcher.bad | |||
|
1887 | ||||
|
1888 | def badfn(path, msg): | |||
|
1889 | for subpath in ctx.substate: | |||
|
1890 | if path.startswith(subpath): | |||
|
1891 | return | |||
|
1892 | bad(path, msg) | |||
|
1893 | ||||
|
1894 | matcher.bad = badfn | |||
|
1895 | ||||
|
1896 | for abs in ctx.walk(matcher): | |||
|
1897 | write(abs) | |||
|
1898 | err = 0 | |||
|
1899 | ||||
|
1900 | matcher.bad = bad | |||
|
1901 | ||||
|
1902 | for subpath in sorted(ctx.substate): | |||
|
1903 | sub = ctx.sub(subpath) | |||
|
1904 | try: | |||
|
1905 | submatch = matchmod.narrowmatcher(subpath, matcher) | |||
|
1906 | ||||
|
1907 | if not sub.cat(ui, submatch, os.path.join(prefix, sub._path), | |||
|
1908 | **opts): | |||
|
1909 | err = 0 | |||
|
1910 | except error.RepoLookupError: | |||
|
1911 | ui.status(_("skipping missing subrepository: %s\n") | |||
|
1912 | % os.path.join(prefix, subpath)) | |||
|
1913 | ||||
|
1914 | return err | |||
|
1915 | ||||
1635 | def duplicatecopies(repo, rev, fromrev): |
|
1916 | def duplicatecopies(repo, rev, fromrev): | |
1636 | '''reproduce copies from fromrev to rev in the dirstate''' |
|
1917 | '''reproduce copies from fromrev to rev in the dirstate''' | |
1637 | for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems(): |
|
1918 | for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems(): | |
@@ -1768,6 +2049,8 b' def amend(ui, repo, commitfunc, old, ext' | |||||
1768 | if not message: |
|
2049 | if not message: | |
1769 | editmsg = True |
|
2050 | editmsg = True | |
1770 | message = old.description() |
|
2051 | message = old.description() | |
|
2052 | elif opts.get('edit'): | |||
|
2053 | editmsg = True | |||
1771 |
|
2054 | |||
1772 | pureextra = extra.copy() |
|
2055 | pureextra = extra.copy() | |
1773 | extra['amend_source'] = old.hex() |
|
2056 | extra['amend_source'] = old.hex() | |
@@ -1802,10 +2085,10 b' def amend(ui, repo, commitfunc, old, ext' | |||||
1802 | commitphase = 'secret' |
|
2085 | commitphase = 'secret' | |
1803 | else: |
|
2086 | else: | |
1804 | commitphase = old.phase() |
|
2087 | commitphase = old.phase() | |
1805 | repo.ui.setconfig('phases', 'new-commit', commitphase) |
|
2088 | repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend') | |
1806 | newid = repo.commitctx(new) |
|
2089 | newid = repo.commitctx(new) | |
1807 | finally: |
|
2090 | finally: | |
1808 | repo.ui.setconfig('phases', 'new-commit', ph) |
|
2091 | repo.ui.setconfig('phases', 'new-commit', ph, 'amend') | |
1809 | if newid != old.node(): |
|
2092 | if newid != old.node(): | |
1810 | # Reroute the working copy parent to the new changeset |
|
2093 | # Reroute the working copy parent to the new changeset | |
1811 | repo.setparents(newid, nullid) |
|
2094 | repo.setparents(newid, nullid) | |
@@ -1875,7 +2158,7 b' def commitforceeditor(repo, ctx, subs):' | |||||
1875 | # run editor in the repository root |
|
2158 | # run editor in the repository root | |
1876 | olddir = os.getcwd() |
|
2159 | olddir = os.getcwd() | |
1877 | os.chdir(repo.root) |
|
2160 | os.chdir(repo.root) | |
1878 | text = repo.ui.edit("\n".join(edittext), ctx.user()) |
|
2161 | text = repo.ui.edit("\n".join(edittext), ctx.user(), ctx.extra()) | |
1879 | text = re.sub("(?m)^HG:.*(\n|$)", "", text) |
|
2162 | text = re.sub("(?m)^HG:.*(\n|$)", "", text) | |
1880 | os.chdir(olddir) |
|
2163 | os.chdir(olddir) | |
1881 |
|
2164 | |||
@@ -2062,54 +2345,8 b' def revert(ui, repo, ctx, parents, *pats' | |||||
2062 | handle(revert, False) |
|
2345 | handle(revert, False) | |
2063 | else: |
|
2346 | else: | |
2064 | handle(remove, False) |
|
2347 | handle(remove, False) | |
2065 |
|
||||
2066 | if not opts.get('dry_run'): |
|
2348 | if not opts.get('dry_run'): | |
2067 | def checkout(f): |
|
2349 | _performrevert(repo, parents, ctx, revert, add, remove, undelete) | |
2068 | fc = ctx[f] |
|
|||
2069 | repo.wwrite(f, fc.data(), fc.flags()) |
|
|||
2070 |
|
||||
2071 | audit_path = pathutil.pathauditor(repo.root) |
|
|||
2072 | for f in remove[0]: |
|
|||
2073 | if repo.dirstate[f] == 'a': |
|
|||
2074 | repo.dirstate.drop(f) |
|
|||
2075 | continue |
|
|||
2076 | audit_path(f) |
|
|||
2077 | try: |
|
|||
2078 | util.unlinkpath(repo.wjoin(f)) |
|
|||
2079 | except OSError: |
|
|||
2080 | pass |
|
|||
2081 | repo.dirstate.remove(f) |
|
|||
2082 |
|
||||
2083 | normal = None |
|
|||
2084 | if node == parent: |
|
|||
2085 | # We're reverting to our parent. If possible, we'd like status |
|
|||
2086 | # to report the file as clean. We have to use normallookup for |
|
|||
2087 | # merges to avoid losing information about merged/dirty files. |
|
|||
2088 | if p2 != nullid: |
|
|||
2089 | normal = repo.dirstate.normallookup |
|
|||
2090 | else: |
|
|||
2091 | normal = repo.dirstate.normal |
|
|||
2092 | for f in revert[0]: |
|
|||
2093 | checkout(f) |
|
|||
2094 | if normal: |
|
|||
2095 | normal(f) |
|
|||
2096 |
|
||||
2097 | for f in add[0]: |
|
|||
2098 | checkout(f) |
|
|||
2099 | repo.dirstate.add(f) |
|
|||
2100 |
|
||||
2101 | normal = repo.dirstate.normallookup |
|
|||
2102 | if node == parent and p2 == nullid: |
|
|||
2103 | normal = repo.dirstate.normal |
|
|||
2104 | for f in undelete[0]: |
|
|||
2105 | checkout(f) |
|
|||
2106 | normal(f) |
|
|||
2107 |
|
||||
2108 | copied = copies.pathcopies(repo[parent], ctx) |
|
|||
2109 |
|
||||
2110 | for f in add[0] + undelete[0] + revert[0]: |
|
|||
2111 | if f in copied: |
|
|||
2112 | repo.dirstate.copy(copied[f], f) |
|
|||
2113 |
|
2350 | |||
2114 | if targetsubs: |
|
2351 | if targetsubs: | |
2115 | # Revert the subrepos on the revert list |
|
2352 | # Revert the subrepos on the revert list | |
@@ -2118,6 +2355,63 b' def revert(ui, repo, ctx, parents, *pats' | |||||
2118 | finally: |
|
2355 | finally: | |
2119 | wlock.release() |
|
2356 | wlock.release() | |
2120 |
|
2357 | |||
|
2358 | def _performrevert(repo, parents, ctx, revert, add, remove, undelete): | |||
|
2359 | """function that actually perform all the action computed for revert | |||
|
2360 | ||||
|
2361 | This is an independent function to let extension to plug in and react to | |||
|
2362 | the imminent revert. | |||
|
2363 | ||||
|
2364 | Make sure you have the working directory locked when calling this function. | |||
|
2365 | """ | |||
|
2366 | parent, p2 = parents | |||
|
2367 | node = ctx.node() | |||
|
2368 | def checkout(f): | |||
|
2369 | fc = ctx[f] | |||
|
2370 | repo.wwrite(f, fc.data(), fc.flags()) | |||
|
2371 | ||||
|
2372 | audit_path = pathutil.pathauditor(repo.root) | |||
|
2373 | for f in remove[0]: | |||
|
2374 | if repo.dirstate[f] == 'a': | |||
|
2375 | repo.dirstate.drop(f) | |||
|
2376 | continue | |||
|
2377 | audit_path(f) | |||
|
2378 | try: | |||
|
2379 | util.unlinkpath(repo.wjoin(f)) | |||
|
2380 | except OSError: | |||
|
2381 | pass | |||
|
2382 | repo.dirstate.remove(f) | |||
|
2383 | ||||
|
2384 | normal = None | |||
|
2385 | if node == parent: | |||
|
2386 | # We're reverting to our parent. If possible, we'd like status | |||
|
2387 | # to report the file as clean. We have to use normallookup for | |||
|
2388 | # merges to avoid losing information about merged/dirty files. | |||
|
2389 | if p2 != nullid: | |||
|
2390 | normal = repo.dirstate.normallookup | |||
|
2391 | else: | |||
|
2392 | normal = repo.dirstate.normal | |||
|
2393 | for f in revert[0]: | |||
|
2394 | checkout(f) | |||
|
2395 | if normal: | |||
|
2396 | normal(f) | |||
|
2397 | ||||
|
2398 | for f in add[0]: | |||
|
2399 | checkout(f) | |||
|
2400 | repo.dirstate.add(f) | |||
|
2401 | ||||
|
2402 | normal = repo.dirstate.normallookup | |||
|
2403 | if node == parent and p2 == nullid: | |||
|
2404 | normal = repo.dirstate.normal | |||
|
2405 | for f in undelete[0]: | |||
|
2406 | checkout(f) | |||
|
2407 | normal(f) | |||
|
2408 | ||||
|
2409 | copied = copies.pathcopies(repo[parent], ctx) | |||
|
2410 | ||||
|
2411 | for f in add[0] + undelete[0] + revert[0]: | |||
|
2412 | if f in copied: | |||
|
2413 | repo.dirstate.copy(copied[f], f) | |||
|
2414 | ||||
2121 | def command(table): |
|
2415 | def command(table): | |
2122 | '''returns a function object bound to table which can be used as |
|
2416 | '''returns a function object bound to table which can be used as | |
2123 | a decorator for populating table as a command table''' |
|
2417 | a decorator for populating table as a command table''' | |
@@ -2133,9 +2427,24 b' def command(table):' | |||||
2133 |
|
2427 | |||
2134 | return cmd |
|
2428 | return cmd | |
2135 |
|
2429 | |||
|
2430 | # a list of (ui, repo, otherpeer, opts, missing) functions called by | |||
|
2431 | # commands.outgoing. "missing" is "missing" of the result of | |||
|
2432 | # "findcommonoutgoing()" | |||
|
2433 | outgoinghooks = util.hooks() | |||
|
2434 | ||||
2136 | # a list of (ui, repo) functions called by commands.summary |
|
2435 | # a list of (ui, repo) functions called by commands.summary | |
2137 | summaryhooks = util.hooks() |
|
2436 | summaryhooks = util.hooks() | |
2138 |
|
2437 | |||
|
2438 | # a list of (ui, repo, opts, changes) functions called by commands.summary. | |||
|
2439 | # | |||
|
2440 | # functions should return tuple of booleans below, if 'changes' is None: | |||
|
2441 | # (whether-incomings-are-needed, whether-outgoings-are-needed) | |||
|
2442 | # | |||
|
2443 | # otherwise, 'changes' is a tuple of tuples below: | |||
|
2444 | # - (sourceurl, sourcebranch, sourcepeer, incoming) | |||
|
2445 | # - (desturl, destbranch, destpeer, outgoing) | |||
|
2446 | summaryremotehooks = util.hooks() | |||
|
2447 | ||||
2139 | # A list of state files kept by multistep operations like graft. |
|
2448 | # A list of state files kept by multistep operations like graft. | |
2140 | # Since graft cannot be aborted, it is considered 'clearable' by update. |
|
2449 | # Since graft cannot be aborted, it is considered 'clearable' by update. | |
2141 | # note: bisect is intentionally excluded |
|
2450 | # note: bisect is intentionally excluded |
This diff has been collapsed as it changes many lines, (643 lines changed) Show them Hide them | |||||
@@ -9,6 +9,7 b' from node import hex, bin, nullid, nullr' | |||||
9 | from lock import release |
|
9 | from lock import release | |
10 | from i18n import _ |
|
10 | from i18n import _ | |
11 | import os, re, difflib, time, tempfile, errno |
|
11 | import os, re, difflib, time, tempfile, errno | |
|
12 | import sys | |||
12 | import hg, scmutil, util, revlog, copies, error, bookmarks |
|
13 | import hg, scmutil, util, revlog, copies, error, bookmarks | |
13 | import patch, help, encoding, templatekw, discovery |
|
14 | import patch, help, encoding, templatekw, discovery | |
14 | import archival, changegroup, cmdutil, hbisect |
|
15 | import archival, changegroup, cmdutil, hbisect | |
@@ -19,7 +20,7 b' import minirst, revset, fileset' | |||||
19 | import dagparser, context, simplemerge, graphmod |
|
20 | import dagparser, context, simplemerge, graphmod | |
20 | import random |
|
21 | import random | |
21 | import setdiscovery, treediscovery, dagutil, pvec, localrepo |
|
22 | import setdiscovery, treediscovery, dagutil, pvec, localrepo | |
22 | import phases, obsolete |
|
23 | import phases, obsolete, exchange | |
23 |
|
24 | |||
24 | table = {} |
|
25 | table = {} | |
25 |
|
26 | |||
@@ -89,8 +90,8 b' commitopts2 = [' | |||||
89 |
|
90 | |||
90 | templateopts = [ |
|
91 | templateopts = [ | |
91 | ('', 'style', '', |
|
92 | ('', 'style', '', | |
92 | _('display using template map file'), _('STYLE')), |
|
93 | _('display using template map file (DEPRECATED)'), _('STYLE')), | |
93 | ('', 'template', '', |
|
94 | ('T', 'template', '', | |
94 | _('display with template'), _('TEMPLATE')), |
|
95 | _('display with template'), _('TEMPLATE')), | |
95 | ] |
|
96 | ] | |
96 |
|
97 | |||
@@ -437,9 +438,8 b' def backout(ui, repo, node=None, rev=Non' | |||||
437 | node = scmutil.revsingle(repo, rev).node() |
|
438 | node = scmutil.revsingle(repo, rev).node() | |
438 |
|
439 | |||
439 | op1, op2 = repo.dirstate.parents() |
|
440 | op1, op2 = repo.dirstate.parents() | |
440 |
|
|
441 | if node not in repo.changelog.commonancestorsheads(op1, node): | |
441 | if a != node: |
|
442 | raise util.Abort(_('cannot backout change that is not an ancestor')) | |
442 | raise util.Abort(_('cannot backout change on a different branch')) |
|
|||
443 |
|
443 | |||
444 | p1, p2 = repo.changelog.parents(node) |
|
444 | p1, p2 = repo.changelog.parents(node) | |
445 | if p1 == nullid: |
|
445 | if p1 == nullid: | |
@@ -465,7 +465,8 b' def backout(ui, repo, node=None, rev=Non' | |||||
465 | rctx = scmutil.revsingle(repo, hex(parent)) |
|
465 | rctx = scmutil.revsingle(repo, hex(parent)) | |
466 | if not opts.get('merge') and op1 != node: |
|
466 | if not opts.get('merge') and op1 != node: | |
467 | try: |
|
467 | try: | |
468 |
ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
468 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
469 | 'backout') | |||
469 | stats = mergemod.update(repo, parent, True, True, False, |
|
470 | stats = mergemod.update(repo, parent, True, True, False, | |
470 | node, False) |
|
471 | node, False) | |
471 | repo.setparents(op1, op2) |
|
472 | repo.setparents(op1, op2) | |
@@ -479,7 +480,7 b' def backout(ui, repo, node=None, rev=Non' | |||||
479 | ui.status(msg % short(node)) |
|
480 | ui.status(msg % short(node)) | |
480 | return stats[3] > 0 |
|
481 | return stats[3] > 0 | |
481 | finally: |
|
482 | finally: | |
482 | ui.setconfig('ui', 'forcemerge', '') |
|
483 | ui.setconfig('ui', 'forcemerge', '', '') | |
483 | else: |
|
484 | else: | |
484 | hg.clean(repo, node, show_stats=False) |
|
485 | hg.clean(repo, node, show_stats=False) | |
485 | repo.dirstate.setbranch(branch) |
|
486 | repo.dirstate.setbranch(branch) | |
@@ -510,10 +511,11 b' def backout(ui, repo, node=None, rev=Non' | |||||
510 | ui.status(_('merging with changeset %s\n') |
|
511 | ui.status(_('merging with changeset %s\n') | |
511 | % nice(repo.changelog.tip())) |
|
512 | % nice(repo.changelog.tip())) | |
512 | try: |
|
513 | try: | |
513 |
ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
514 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
515 | 'backout') | |||
514 | return hg.merge(repo, hex(repo.changelog.tip())) |
|
516 | return hg.merge(repo, hex(repo.changelog.tip())) | |
515 | finally: |
|
517 | finally: | |
516 | ui.setconfig('ui', 'forcemerge', '') |
|
518 | ui.setconfig('ui', 'forcemerge', '', '') | |
517 | finally: |
|
519 | finally: | |
518 | wlock.release() |
|
520 | wlock.release() | |
519 | return 0 |
|
521 | return 0 | |
@@ -1126,8 +1128,8 b' def bundle(ui, repo, fname, dest=None, *' | |||||
1126 | "a destination")) |
|
1128 | "a destination")) | |
1127 | common = [repo.lookup(rev) for rev in base] |
|
1129 | common = [repo.lookup(rev) for rev in base] | |
1128 | heads = revs and map(repo.lookup, revs) or revs |
|
1130 | heads = revs and map(repo.lookup, revs) or revs | |
1129 |
cg = |
|
1131 | cg = changegroup.getbundle(repo, 'bundle', heads=heads, common=common, | |
1130 | bundlecaps=bundlecaps) |
|
1132 | bundlecaps=bundlecaps) | |
1131 | outgoing = None |
|
1133 | outgoing = None | |
1132 | else: |
|
1134 | else: | |
1133 | dest = ui.expandpath(dest or 'default-push', dest or 'default') |
|
1135 | dest = ui.expandpath(dest or 'default-push', dest or 'default') | |
@@ -1139,7 +1141,7 b' def bundle(ui, repo, fname, dest=None, *' | |||||
1139 | onlyheads=heads, |
|
1141 | onlyheads=heads, | |
1140 | force=opts.get('force'), |
|
1142 | force=opts.get('force'), | |
1141 | portable=True) |
|
1143 | portable=True) | |
1142 |
cg = |
|
1144 | cg = changegroup.getlocalbundle(repo, 'bundle', outgoing, bundlecaps) | |
1143 | if not cg: |
|
1145 | if not cg: | |
1144 | scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded) |
|
1146 | scmutil.nochangesfound(ui, repo, outgoing and outgoing.excluded) | |
1145 | return 1 |
|
1147 | return 1 | |
@@ -1160,42 +1162,24 b' def cat(ui, repo, file1, *pats, **opts):' | |||||
1160 | no revision is given, the parent of the working directory is used. |
|
1162 | no revision is given, the parent of the working directory is used. | |
1161 |
|
1163 | |||
1162 | Output may be to a file, in which case the name of the file is |
|
1164 | Output may be to a file, in which case the name of the file is | |
1163 |
given using a format string. The formatting rules a |
|
1165 | given using a format string. The formatting rules as follows: | |
1164 | for the export command, with the following additions: |
|
1166 | ||
1165 |
|
1167 | :``%%``: literal "%" character | ||
1166 | :``%s``: basename of file being printed |
|
1168 | :``%s``: basename of file being printed | |
1167 | :``%d``: dirname of file being printed, or '.' if in repository root |
|
1169 | :``%d``: dirname of file being printed, or '.' if in repository root | |
1168 | :``%p``: root-relative path name of file being printed |
|
1170 | :``%p``: root-relative path name of file being printed | |
|
1171 | :``%H``: changeset hash (40 hexadecimal digits) | |||
|
1172 | :``%R``: changeset revision number | |||
|
1173 | :``%h``: short-form changeset hash (12 hexadecimal digits) | |||
|
1174 | :``%r``: zero-padded changeset revision number | |||
|
1175 | :``%b``: basename of the exporting repository | |||
1169 |
|
1176 | |||
1170 | Returns 0 on success. |
|
1177 | Returns 0 on success. | |
1171 | """ |
|
1178 | """ | |
1172 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1179 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
1173 | err = 1 |
|
|||
1174 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1180 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
1175 |
|
1181 | |||
1176 | def write(path): |
|
1182 | return cmdutil.cat(ui, repo, ctx, m, '', **opts) | |
1177 | fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), |
|
|||
1178 | pathname=path) |
|
|||
1179 | data = ctx[path].data() |
|
|||
1180 | if opts.get('decode'): |
|
|||
1181 | data = repo.wwritedata(path, data) |
|
|||
1182 | fp.write(data) |
|
|||
1183 | fp.close() |
|
|||
1184 |
|
||||
1185 | # Automation often uses hg cat on single files, so special case it |
|
|||
1186 | # for performance to avoid the cost of parsing the manifest. |
|
|||
1187 | if len(m.files()) == 1 and not m.anypats(): |
|
|||
1188 | file = m.files()[0] |
|
|||
1189 | mf = repo.manifest |
|
|||
1190 | mfnode = ctx._changeset[0] |
|
|||
1191 | if mf.find(mfnode, file)[0]: |
|
|||
1192 | write(file) |
|
|||
1193 | return 0 |
|
|||
1194 |
|
||||
1195 | for abs in ctx.walk(m): |
|
|||
1196 | write(abs) |
|
|||
1197 | err = 0 |
|
|||
1198 | return err |
|
|||
1199 |
|
1183 | |||
1200 | @command('^clone', |
|
1184 | @command('^clone', | |
1201 | [('U', 'noupdate', None, |
|
1185 | [('U', 'noupdate', None, | |
@@ -1322,6 +1306,8 b' def clone(ui, source, dest=None, **opts)' | |||||
1322 | _('mark a branch as closed, hiding it from the branch list')), |
|
1306 | _('mark a branch as closed, hiding it from the branch list')), | |
1323 | ('', 'amend', None, _('amend the parent of the working dir')), |
|
1307 | ('', 'amend', None, _('amend the parent of the working dir')), | |
1324 | ('s', 'secret', None, _('use the secret phase for committing')), |
|
1308 | ('s', 'secret', None, _('use the secret phase for committing')), | |
|
1309 | ('e', 'edit', None, | |||
|
1310 | _('further edit commit message already specified')), | |||
1325 | ] + walkopts + commitopts + commitopts2 + subrepoopts, |
|
1311 | ] + walkopts + commitopts + commitopts2 + subrepoopts, | |
1326 | _('[OPTION]... [FILE]...')) |
|
1312 | _('[OPTION]... [FILE]...')) | |
1327 | def commit(ui, repo, *pats, **opts): |
|
1313 | def commit(ui, repo, *pats, **opts): | |
@@ -1360,11 +1346,13 b' def commit(ui, repo, *pats, **opts):' | |||||
1360 |
|
1346 | |||
1361 | Returns 0 on success, 1 if nothing changed. |
|
1347 | Returns 0 on success, 1 if nothing changed. | |
1362 | """ |
|
1348 | """ | |
|
1349 | forceeditor = opts.get('edit') | |||
|
1350 | ||||
1363 | if opts.get('subrepos'): |
|
1351 | if opts.get('subrepos'): | |
1364 | if opts.get('amend'): |
|
1352 | if opts.get('amend'): | |
1365 | raise util.Abort(_('cannot amend with --subrepos')) |
|
1353 | raise util.Abort(_('cannot amend with --subrepos')) | |
1366 | # Let --subrepos on the command line override config setting. |
|
1354 | # Let --subrepos on the command line override config setting. | |
1367 | ui.setconfig('ui', 'commitsubrepos', True) |
|
1355 | ui.setconfig('ui', 'commitsubrepos', True, 'commit') | |
1368 |
|
1356 | |||
1369 | # Save this for restoring it later |
|
1357 | # Save this for restoring it later | |
1370 | oldcommitphase = ui.config('phases', 'new-commit') |
|
1358 | oldcommitphase = ui.config('phases', 'new-commit') | |
@@ -1397,23 +1385,12 b' def commit(ui, repo, *pats, **opts):' | |||||
1397 | if (not obsolete._enabled) and old.children(): |
|
1385 | if (not obsolete._enabled) and old.children(): | |
1398 | raise util.Abort(_('cannot amend changeset with children')) |
|
1386 | raise util.Abort(_('cannot amend changeset with children')) | |
1399 |
|
1387 | |||
1400 | e = cmdutil.commiteditor |
|
|||
1401 | if opts.get('force_editor'): |
|
|||
1402 | e = cmdutil.commitforceeditor |
|
|||
1403 |
|
||||
1404 | # commitfunc is used only for temporary amend commit by cmdutil.amend |
|
1388 | # commitfunc is used only for temporary amend commit by cmdutil.amend | |
1405 | def commitfunc(ui, repo, message, match, opts): |
|
1389 | def commitfunc(ui, repo, message, match, opts): | |
1406 | editor = e |
|
|||
1407 | # message contains text from -m or -l, if it's empty, |
|
|||
1408 | # open the editor with the old message |
|
|||
1409 | if not message: |
|
|||
1410 | message = old.description() |
|
|||
1411 | editor = cmdutil.commitforceeditor |
|
|||
1412 | return repo.commit(message, |
|
1390 | return repo.commit(message, | |
1413 | opts.get('user') or old.user(), |
|
1391 | opts.get('user') or old.user(), | |
1414 | opts.get('date') or old.date(), |
|
1392 | opts.get('date') or old.date(), | |
1415 | match, |
|
1393 | match, | |
1416 | editor=editor, |
|
|||
1417 | extra=extra) |
|
1394 | extra=extra) | |
1418 |
|
1395 | |||
1419 | current = repo._bookmarkcurrent |
|
1396 | current = repo._bookmarkcurrent | |
@@ -1433,21 +1410,23 b' def commit(ui, repo, *pats, **opts):' | |||||
1433 | newmarks.write() |
|
1410 | newmarks.write() | |
1434 | else: |
|
1411 | else: | |
1435 | e = cmdutil.commiteditor |
|
1412 | e = cmdutil.commiteditor | |
1436 |
if |
|
1413 | if forceeditor: | |
1437 | e = cmdutil.commitforceeditor |
|
1414 | e = cmdutil.commitforceeditor | |
1438 |
|
1415 | |||
1439 | def commitfunc(ui, repo, message, match, opts): |
|
1416 | def commitfunc(ui, repo, message, match, opts): | |
1440 | try: |
|
1417 | try: | |
1441 | if opts.get('secret'): |
|
1418 | if opts.get('secret'): | |
1442 | ui.setconfig('phases', 'new-commit', 'secret') |
|
1419 | ui.setconfig('phases', 'new-commit', 'secret', 'commit') | |
1443 | # Propagate to subrepos |
|
1420 | # Propagate to subrepos | |
1444 |
repo.baseui.setconfig('phases', 'new-commit', 'secret' |
|
1421 | repo.baseui.setconfig('phases', 'new-commit', 'secret', | |
|
1422 | 'commit') | |||
1445 |
|
1423 | |||
1446 | return repo.commit(message, opts.get('user'), opts.get('date'), |
|
1424 | return repo.commit(message, opts.get('user'), opts.get('date'), | |
1447 | match, editor=e, extra=extra) |
|
1425 | match, editor=e, extra=extra) | |
1448 | finally: |
|
1426 | finally: | |
1449 | ui.setconfig('phases', 'new-commit', oldcommitphase) |
|
1427 | ui.setconfig('phases', 'new-commit', oldcommitphase, 'commit') | |
1450 |
repo.baseui.setconfig('phases', 'new-commit', oldcommitphase |
|
1428 | repo.baseui.setconfig('phases', 'new-commit', oldcommitphase, | |
|
1429 | 'commit') | |||
1451 |
|
1430 | |||
1452 |
|
1431 | |||
1453 | node = cmdutil.commit(ui, repo, commitfunc, pats, opts) |
|
1432 | node = cmdutil.commit(ui, repo, commitfunc, pats, opts) | |
@@ -1463,6 +1442,103 b' def commit(ui, repo, *pats, **opts):' | |||||
1463 |
|
1442 | |||
1464 | cmdutil.commitstatus(repo, node, branch, bheads, opts) |
|
1443 | cmdutil.commitstatus(repo, node, branch, bheads, opts) | |
1465 |
|
1444 | |||
|
1445 | @command('config|showconfig|debugconfig', | |||
|
1446 | [('u', 'untrusted', None, _('show untrusted configuration options')), | |||
|
1447 | ('e', 'edit', None, _('edit user config')), | |||
|
1448 | ('l', 'local', None, _('edit repository config')), | |||
|
1449 | ('g', 'global', None, _('edit global config'))], | |||
|
1450 | _('[-u] [NAME]...')) | |||
|
1451 | def config(ui, repo, *values, **opts): | |||
|
1452 | """show combined config settings from all hgrc files | |||
|
1453 | ||||
|
1454 | With no arguments, print names and values of all config items. | |||
|
1455 | ||||
|
1456 | With one argument of the form section.name, print just the value | |||
|
1457 | of that config item. | |||
|
1458 | ||||
|
1459 | With multiple arguments, print names and values of all config | |||
|
1460 | items with matching section names. | |||
|
1461 | ||||
|
1462 | With --edit, start an editor on the user-level config file. With | |||
|
1463 | --global, edit the system-wide config file. With --local, edit the | |||
|
1464 | repository-level config file. | |||
|
1465 | ||||
|
1466 | With --debug, the source (filename and line number) is printed | |||
|
1467 | for each config item. | |||
|
1468 | ||||
|
1469 | See :hg:`help config` for more information about config files. | |||
|
1470 | ||||
|
1471 | Returns 0 on success. | |||
|
1472 | ||||
|
1473 | """ | |||
|
1474 | ||||
|
1475 | if opts.get('edit') or opts.get('local') or opts.get('global'): | |||
|
1476 | if opts.get('local') and opts.get('global'): | |||
|
1477 | raise util.Abort(_("can't use --local and --global together")) | |||
|
1478 | ||||
|
1479 | if opts.get('local'): | |||
|
1480 | if not repo: | |||
|
1481 | raise util.Abort(_("can't use --local outside a repository")) | |||
|
1482 | paths = [repo.join('hgrc')] | |||
|
1483 | elif opts.get('global'): | |||
|
1484 | paths = scmutil.systemrcpath() | |||
|
1485 | else: | |||
|
1486 | paths = scmutil.userrcpath() | |||
|
1487 | ||||
|
1488 | for f in paths: | |||
|
1489 | if os.path.exists(f): | |||
|
1490 | break | |||
|
1491 | else: | |||
|
1492 | f = paths[0] | |||
|
1493 | fp = open(f, "w") | |||
|
1494 | fp.write( | |||
|
1495 | '# example config (see "hg help config" for more info)\n' | |||
|
1496 | '\n' | |||
|
1497 | '[ui]\n' | |||
|
1498 | '# name and email, e.g.\n' | |||
|
1499 | '# username = Jane Doe <jdoe@example.com>\n' | |||
|
1500 | 'username =\n' | |||
|
1501 | '\n' | |||
|
1502 | '[extensions]\n' | |||
|
1503 | '# uncomment these lines to enable some popular extensions\n' | |||
|
1504 | '# (see "hg help extensions" for more info)\n' | |||
|
1505 | '# pager =\n' | |||
|
1506 | '# progress =\n' | |||
|
1507 | '# color =\n') | |||
|
1508 | fp.close() | |||
|
1509 | ||||
|
1510 | editor = ui.geteditor() | |||
|
1511 | util.system("%s \"%s\"" % (editor, f), | |||
|
1512 | onerr=util.Abort, errprefix=_("edit failed"), | |||
|
1513 | out=ui.fout) | |||
|
1514 | return | |||
|
1515 | ||||
|
1516 | for f in scmutil.rcpath(): | |||
|
1517 | ui.debug('read config from: %s\n' % f) | |||
|
1518 | untrusted = bool(opts.get('untrusted')) | |||
|
1519 | if values: | |||
|
1520 | sections = [v for v in values if '.' not in v] | |||
|
1521 | items = [v for v in values if '.' in v] | |||
|
1522 | if len(items) > 1 or items and sections: | |||
|
1523 | raise util.Abort(_('only one config item permitted')) | |||
|
1524 | for section, name, value in ui.walkconfig(untrusted=untrusted): | |||
|
1525 | value = str(value).replace('\n', '\\n') | |||
|
1526 | sectname = section + '.' + name | |||
|
1527 | if values: | |||
|
1528 | for v in values: | |||
|
1529 | if v == section: | |||
|
1530 | ui.debug('%s: ' % | |||
|
1531 | ui.configsource(section, name, untrusted)) | |||
|
1532 | ui.write('%s=%s\n' % (sectname, value)) | |||
|
1533 | elif v == sectname: | |||
|
1534 | ui.debug('%s: ' % | |||
|
1535 | ui.configsource(section, name, untrusted)) | |||
|
1536 | ui.write(value, '\n') | |||
|
1537 | else: | |||
|
1538 | ui.debug('%s: ' % | |||
|
1539 | ui.configsource(section, name, untrusted)) | |||
|
1540 | ui.write('%s=%s\n' % (sectname, value)) | |||
|
1541 | ||||
1466 | @command('copy|cp', |
|
1542 | @command('copy|cp', | |
1467 | [('A', 'after', None, _('record a copy that has already occurred')), |
|
1543 | [('A', 'after', None, _('record a copy that has already occurred')), | |
1468 | ('f', 'force', None, _('forcibly copy over an existing managed file')), |
|
1544 | ('f', 'force', None, _('forcibly copy over an existing managed file')), | |
@@ -1665,7 +1741,7 b' def debugbundle(ui, bundlepath, all=None' | |||||
1665 | """lists the contents of a bundle""" |
|
1741 | """lists the contents of a bundle""" | |
1666 | f = hg.openpath(ui, bundlepath) |
|
1742 | f = hg.openpath(ui, bundlepath) | |
1667 | try: |
|
1743 | try: | |
1668 |
gen = change |
|
1744 | gen = exchange.readbundle(ui, f, bundlepath) | |
1669 | if all: |
|
1745 | if all: | |
1670 | ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n")) |
|
1746 | ui.write(("format: id, p1, p2, cset, delta base, len(delta)\n")) | |
1671 |
|
1747 | |||
@@ -1945,7 +2021,7 b' def debugfileset(ui, repo, expr, **opts)' | |||||
1945 | tree = fileset.parse(expr)[0] |
|
2021 | tree = fileset.parse(expr)[0] | |
1946 | ui.note(tree, "\n") |
|
2022 | ui.note(tree, "\n") | |
1947 |
|
2023 | |||
1948 |
for f in |
|
2024 | for f in ctx.getfileset(expr): | |
1949 | ui.write("%s\n" % f) |
|
2025 | ui.write("%s\n" % f) | |
1950 |
|
2026 | |||
1951 | @command('debugfsinfo', [], _('[PATH]')) |
|
2027 | @command('debugfsinfo', [], _('[PATH]')) | |
@@ -2089,7 +2165,10 b' def debuginstall(ui):' | |||||
2089 | ui.write(_(" (check that your locale is properly set)\n")) |
|
2165 | ui.write(_(" (check that your locale is properly set)\n")) | |
2090 | problems += 1 |
|
2166 | problems += 1 | |
2091 |
|
2167 | |||
2092 |
# Python |
|
2168 | # Python | |
|
2169 | ui.status(_("checking Python executable (%s)\n") % sys.executable) | |||
|
2170 | ui.status(_("checking Python version (%s)\n") | |||
|
2171 | % ("%s.%s.%s" % sys.version_info[:3])) | |||
2093 | ui.status(_("checking Python lib (%s)...\n") |
|
2172 | ui.status(_("checking Python lib (%s)...\n") | |
2094 | % os.path.dirname(os.__file__)) |
|
2173 | % os.path.dirname(os.__file__)) | |
2095 |
|
2174 | |||
@@ -2109,10 +2188,21 b' def debuginstall(ui):' | |||||
2109 | import templater |
|
2188 | import templater | |
2110 | p = templater.templatepath() |
|
2189 | p = templater.templatepath() | |
2111 | ui.status(_("checking templates (%s)...\n") % ' '.join(p)) |
|
2190 | ui.status(_("checking templates (%s)...\n") % ' '.join(p)) | |
2112 |
|
|
2191 | if p: | |
2113 |
|
|
2192 | m = templater.templatepath("map-cmdline.default") | |
2114 | except Exception, inst: |
|
2193 | if m: | |
2115 | ui.write(" %s\n" % inst) |
|
2194 | # template found, check if it is working | |
|
2195 | try: | |||
|
2196 | templater.templater(m) | |||
|
2197 | except Exception, inst: | |||
|
2198 | ui.write(" %s\n" % inst) | |||
|
2199 | p = None | |||
|
2200 | else: | |||
|
2201 | ui.write(_(" template 'default' not found\n")) | |||
|
2202 | p = None | |||
|
2203 | else: | |||
|
2204 | ui.write(_(" no template directories found\n")) | |||
|
2205 | if not p: | |||
2116 | ui.write(_(" (templates seem to have been installed incorrectly)\n")) |
|
2206 | ui.write(_(" (templates seem to have been installed incorrectly)\n")) | |
2117 | problems += 1 |
|
2207 | problems += 1 | |
2118 |
|
2208 | |||
@@ -2218,14 +2308,7 b' def debugobsolete(ui, repo, precursor=No' | |||||
2218 | l.release() |
|
2308 | l.release() | |
2219 | else: |
|
2309 | else: | |
2220 | for m in obsolete.allmarkers(repo): |
|
2310 | for m in obsolete.allmarkers(repo): | |
2221 | ui.write(hex(m.precnode())) |
|
2311 | cmdutil.showmarker(ui, m) | |
2222 | for repl in m.succnodes(): |
|
|||
2223 | ui.write(' ') |
|
|||
2224 | ui.write(hex(repl)) |
|
|||
2225 | ui.write(' %X ' % m._data[2]) |
|
|||
2226 | ui.write('{%s}' % (', '.join('%r: %r' % t for t in |
|
|||
2227 | sorted(m.metadata().items())))) |
|
|||
2228 | ui.write('\n') |
|
|||
2229 |
|
2312 | |||
2230 | @command('debugpathcomplete', |
|
2313 | @command('debugpathcomplete', | |
2231 | [('f', 'full', None, _('complete an entire path')), |
|
2314 | [('f', 'full', None, _('complete an entire path')), | |
@@ -2384,7 +2467,7 b' def debugrevlog(ui, repo, file_=None, **' | |||||
2384 |
|
2467 | |||
2385 | if opts.get("dump"): |
|
2468 | if opts.get("dump"): | |
2386 | numrevs = len(r) |
|
2469 | numrevs = len(r) | |
2387 | ui.write("# rev p1rev p2rev start end deltastart base p1 p2" |
|
2470 | ui.write("# rev p1rev p2rev start end deltastart base p1 p2" | |
2388 | " rawsize totalsize compression heads\n") |
|
2471 | " rawsize totalsize compression heads\n") | |
2389 | ts = 0 |
|
2472 | ts = 0 | |
2390 | heads = set() |
|
2473 | heads = set() | |
@@ -2398,7 +2481,7 b' def debugrevlog(ui, repo, file_=None, **' | |||||
2398 | ts = ts + rs |
|
2481 | ts = ts + rs | |
2399 | heads -= set(r.parentrevs(rev)) |
|
2482 | heads -= set(r.parentrevs(rev)) | |
2400 | heads.add(rev) |
|
2483 | heads.add(rev) | |
2401 | ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" % |
|
2484 | ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d %11d %5d\n" % | |
2402 | (rev, p1, p2, r.start(rev), r.end(rev), |
|
2485 | (rev, p1, p2, r.start(rev), r.end(rev), | |
2403 | r.start(dbase), r.start(cbase), |
|
2486 | r.start(dbase), r.start(cbase), | |
2404 | r.start(p1), r.start(p2), |
|
2487 | r.start(p1), r.start(p2), | |
@@ -2546,8 +2629,10 b' def debugrevlog(ui, repo, file_=None, **' | |||||
2546 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, |
|
2629 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, | |
2547 | numdeltas)) |
|
2630 | numdeltas)) | |
2548 |
|
2631 | |||
2549 |
@command('debugrevspec', |
|
2632 | @command('debugrevspec', | |
2550 | def debugrevspec(ui, repo, expr): |
|
2633 | [('', 'optimize', None, _('print parsed tree after optimizing'))], | |
|
2634 | ('REVSPEC')) | |||
|
2635 | def debugrevspec(ui, repo, expr, **opts): | |||
2551 | """parse and apply a revision specification |
|
2636 | """parse and apply a revision specification | |
2552 |
|
2637 | |||
2553 | Use --verbose to print the parsed tree before and after aliases |
|
2638 | Use --verbose to print the parsed tree before and after aliases | |
@@ -2559,8 +2644,11 b' def debugrevspec(ui, repo, expr):' | |||||
2559 | newtree = revset.findaliases(ui, tree) |
|
2644 | newtree = revset.findaliases(ui, tree) | |
2560 | if newtree != tree: |
|
2645 | if newtree != tree: | |
2561 | ui.note(revset.prettyformat(newtree), "\n") |
|
2646 | ui.note(revset.prettyformat(newtree), "\n") | |
|
2647 | if opts["optimize"]: | |||
|
2648 | weight, optimizedtree = revset.optimize(newtree, True) | |||
|
2649 | ui.note("* optimized:\n", revset.prettyformat(optimizedtree), "\n") | |||
2562 | func = revset.match(ui, expr) |
|
2650 | func = revset.match(ui, expr) | |
2563 |
for c in func(repo, r |
|
2651 | for c in func(repo, revset.spanset(repo)): | |
2564 | ui.write("%s\n" % c) |
|
2652 | ui.write("%s\n" % c) | |
2565 |
|
2653 | |||
2566 | @command('debugsetparents', [], _('REV1 [REV2]')) |
|
2654 | @command('debugsetparents', [], _('REV1 [REV2]')) | |
@@ -3090,11 +3178,12 b' def graft(ui, repo, *revs, **opts):' | |||||
3090 | # perform the graft merge with p1(rev) as 'ancestor' |
|
3178 | # perform the graft merge with p1(rev) as 'ancestor' | |
3091 | try: |
|
3179 | try: | |
3092 | # ui.forcemerge is an internal variable, do not document |
|
3180 | # ui.forcemerge is an internal variable, do not document | |
3093 |
repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
3181 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
3182 | 'graft') | |||
3094 | stats = mergemod.update(repo, ctx.node(), True, True, False, |
|
3183 | stats = mergemod.update(repo, ctx.node(), True, True, False, | |
3095 | ctx.p1().node()) |
|
3184 | ctx.p1().node()) | |
3096 | finally: |
|
3185 | finally: | |
3097 | repo.ui.setconfig('ui', 'forcemerge', '') |
|
3186 | repo.ui.setconfig('ui', 'forcemerge', '', 'graft') | |
3098 | # report any conflicts |
|
3187 | # report any conflicts | |
3099 | if stats and stats[3] > 0: |
|
3188 | if stats and stats[3] > 0: | |
3100 | # write out state for --continue |
|
3189 | # write out state for --continue | |
@@ -3204,6 +3293,20 b' def grep(ui, repo, pattern, *pats, **opt' | |||||
3204 | def __eq__(self, other): |
|
3293 | def __eq__(self, other): | |
3205 | return self.line == other.line |
|
3294 | return self.line == other.line | |
3206 |
|
3295 | |||
|
3296 | def __iter__(self): | |||
|
3297 | yield (self.line[:self.colstart], '') | |||
|
3298 | yield (self.line[self.colstart:self.colend], 'grep.match') | |||
|
3299 | rest = self.line[self.colend:] | |||
|
3300 | while rest != '': | |||
|
3301 | match = regexp.search(rest) | |||
|
3302 | if not match: | |||
|
3303 | yield (rest, '') | |||
|
3304 | break | |||
|
3305 | mstart, mend = match.span() | |||
|
3306 | yield (rest[:mstart], '') | |||
|
3307 | yield (rest[mstart:mend], 'grep.match') | |||
|
3308 | rest = rest[mend:] | |||
|
3309 | ||||
3207 | matches = {} |
|
3310 | matches = {} | |
3208 | copies = {} |
|
3311 | copies = {} | |
3209 | def grepbody(fn, rev, body): |
|
3312 | def grepbody(fn, rev, body): | |
@@ -3232,7 +3335,7 b' def grep(ui, repo, pattern, *pats, **opt' | |||||
3232 | rev = ctx.rev() |
|
3335 | rev = ctx.rev() | |
3233 | datefunc = ui.quiet and util.shortdate or util.datestr |
|
3336 | datefunc = ui.quiet and util.shortdate or util.datestr | |
3234 | found = False |
|
3337 | found = False | |
3235 | filerevmatches = {} |
|
3338 | @util.cachefunc | |
3236 | def binary(): |
|
3339 | def binary(): | |
3237 | flog = getfile(fn) |
|
3340 | flog = getfile(fn) | |
3238 | return util.binary(flog.read(ctx.filenode(fn))) |
|
3341 | return util.binary(flog.read(ctx.filenode(fn))) | |
@@ -3243,7 +3346,6 b' def grep(ui, repo, pattern, *pats, **opt' | |||||
3243 | iter = [('', l) for l in states] |
|
3346 | iter = [('', l) for l in states] | |
3244 | for change, l in iter: |
|
3347 | for change, l in iter: | |
3245 | cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')] |
|
3348 | cols = [(fn, 'grep.filename'), (str(rev), 'grep.rev')] | |
3246 | before, match, after = None, None, None |
|
|||
3247 |
|
3349 | |||
3248 | if opts.get('line_number'): |
|
3350 | if opts.get('line_number'): | |
3249 | cols.append((str(l.linenum), 'grep.linenumber')) |
|
3351 | cols.append((str(l.linenum), 'grep.linenumber')) | |
@@ -3253,29 +3355,21 b' def grep(ui, repo, pattern, *pats, **opt' | |||||
3253 | cols.append((ui.shortuser(ctx.user()), 'grep.user')) |
|
3355 | cols.append((ui.shortuser(ctx.user()), 'grep.user')) | |
3254 | if opts.get('date'): |
|
3356 | if opts.get('date'): | |
3255 | cols.append((datefunc(ctx.date()), 'grep.date')) |
|
3357 | cols.append((datefunc(ctx.date()), 'grep.date')) | |
3256 | if opts.get('files_with_matches'): |
|
|||
3257 | c = (fn, rev) |
|
|||
3258 | if c in filerevmatches: |
|
|||
3259 | continue |
|
|||
3260 | filerevmatches[c] = 1 |
|
|||
3261 | else: |
|
|||
3262 | before = l.line[:l.colstart] |
|
|||
3263 | match = l.line[l.colstart:l.colend] |
|
|||
3264 | after = l.line[l.colend:] |
|
|||
3265 | for col, label in cols[:-1]: |
|
3358 | for col, label in cols[:-1]: | |
3266 | ui.write(col, label=label) |
|
3359 | ui.write(col, label=label) | |
3267 | ui.write(sep, label='grep.sep') |
|
3360 | ui.write(sep, label='grep.sep') | |
3268 | ui.write(cols[-1][0], label=cols[-1][1]) |
|
3361 | ui.write(cols[-1][0], label=cols[-1][1]) | |
3269 | if before is not None: |
|
3362 | if not opts.get('files_with_matches'): | |
3270 | ui.write(sep, label='grep.sep') |
|
3363 | ui.write(sep, label='grep.sep') | |
3271 | if not opts.get('text') and binary(): |
|
3364 | if not opts.get('text') and binary(): | |
3272 | ui.write(" Binary file matches") |
|
3365 | ui.write(" Binary file matches") | |
3273 | else: |
|
3366 | else: | |
3274 |
|
|
3367 | for s, label in l: | |
3275 |
ui.write( |
|
3368 | ui.write(s, label=label) | |
3276 | ui.write(after) |
|
|||
3277 | ui.write(eol) |
|
3369 | ui.write(eol) | |
3278 | found = True |
|
3370 | found = True | |
|
3371 | if opts.get('files_with_matches'): | |||
|
3372 | break | |||
3279 | return found |
|
3373 | return found | |
3280 |
|
3374 | |||
3281 | skip = {} |
|
3375 | skip = {} | |
@@ -3671,10 +3765,6 b' def import_(ui, repo, patch1=None, *patc' | |||||
3671 | if date: |
|
3765 | if date: | |
3672 | opts['date'] = util.parsedate(date) |
|
3766 | opts['date'] = util.parsedate(date) | |
3673 |
|
3767 | |||
3674 | editor = cmdutil.commiteditor |
|
|||
3675 | if opts.get('edit'): |
|
|||
3676 | editor = cmdutil.commitforceeditor |
|
|||
3677 |
|
||||
3678 | update = not opts.get('bypass') |
|
3768 | update = not opts.get('bypass') | |
3679 | if not update and opts.get('no_commit'): |
|
3769 | if not update and opts.get('no_commit'): | |
3680 | raise util.Abort(_('cannot use --no-commit with --bypass')) |
|
3770 | raise util.Abort(_('cannot use --no-commit with --bypass')) | |
@@ -3693,112 +3783,9 b' def import_(ui, repo, patch1=None, *patc' | |||||
3693 | cmdutil.bailifchanged(repo) |
|
3783 | cmdutil.bailifchanged(repo) | |
3694 |
|
3784 | |||
3695 | base = opts["base"] |
|
3785 | base = opts["base"] | |
3696 | strip = opts["strip"] |
|
|||
3697 | wlock = lock = tr = None |
|
3786 | wlock = lock = tr = None | |
3698 | msgs = [] |
|
3787 | msgs = [] | |
3699 |
|
3788 | |||
3700 | def tryone(ui, hunk, parents): |
|
|||
3701 | tmpname, message, user, date, branch, nodeid, p1, p2 = \ |
|
|||
3702 | patch.extract(ui, hunk) |
|
|||
3703 |
|
||||
3704 | if not tmpname: |
|
|||
3705 | return (None, None) |
|
|||
3706 | msg = _('applied to working directory') |
|
|||
3707 |
|
||||
3708 | try: |
|
|||
3709 | cmdline_message = cmdutil.logmessage(ui, opts) |
|
|||
3710 | if cmdline_message: |
|
|||
3711 | # pickup the cmdline msg |
|
|||
3712 | message = cmdline_message |
|
|||
3713 | elif message: |
|
|||
3714 | # pickup the patch msg |
|
|||
3715 | message = message.strip() |
|
|||
3716 | else: |
|
|||
3717 | # launch the editor |
|
|||
3718 | message = None |
|
|||
3719 | ui.debug('message:\n%s\n' % message) |
|
|||
3720 |
|
||||
3721 | if len(parents) == 1: |
|
|||
3722 | parents.append(repo[nullid]) |
|
|||
3723 | if opts.get('exact'): |
|
|||
3724 | if not nodeid or not p1: |
|
|||
3725 | raise util.Abort(_('not a Mercurial patch')) |
|
|||
3726 | p1 = repo[p1] |
|
|||
3727 | p2 = repo[p2 or nullid] |
|
|||
3728 | elif p2: |
|
|||
3729 | try: |
|
|||
3730 | p1 = repo[p1] |
|
|||
3731 | p2 = repo[p2] |
|
|||
3732 | # Without any options, consider p2 only if the |
|
|||
3733 | # patch is being applied on top of the recorded |
|
|||
3734 | # first parent. |
|
|||
3735 | if p1 != parents[0]: |
|
|||
3736 | p1 = parents[0] |
|
|||
3737 | p2 = repo[nullid] |
|
|||
3738 | except error.RepoError: |
|
|||
3739 | p1, p2 = parents |
|
|||
3740 | else: |
|
|||
3741 | p1, p2 = parents |
|
|||
3742 |
|
||||
3743 | n = None |
|
|||
3744 | if update: |
|
|||
3745 | if p1 != parents[0]: |
|
|||
3746 | hg.clean(repo, p1.node()) |
|
|||
3747 | if p2 != parents[1]: |
|
|||
3748 | repo.setparents(p1.node(), p2.node()) |
|
|||
3749 |
|
||||
3750 | if opts.get('exact') or opts.get('import_branch'): |
|
|||
3751 | repo.dirstate.setbranch(branch or 'default') |
|
|||
3752 |
|
||||
3753 | files = set() |
|
|||
3754 | patch.patch(ui, repo, tmpname, strip=strip, files=files, |
|
|||
3755 | eolmode=None, similarity=sim / 100.0) |
|
|||
3756 | files = list(files) |
|
|||
3757 | if opts.get('no_commit'): |
|
|||
3758 | if message: |
|
|||
3759 | msgs.append(message) |
|
|||
3760 | else: |
|
|||
3761 | if opts.get('exact') or p2: |
|
|||
3762 | # If you got here, you either use --force and know what |
|
|||
3763 | # you are doing or used --exact or a merge patch while |
|
|||
3764 | # being updated to its first parent. |
|
|||
3765 | m = None |
|
|||
3766 | else: |
|
|||
3767 | m = scmutil.matchfiles(repo, files or []) |
|
|||
3768 | n = repo.commit(message, opts.get('user') or user, |
|
|||
3769 | opts.get('date') or date, match=m, |
|
|||
3770 | editor=editor) |
|
|||
3771 | else: |
|
|||
3772 | if opts.get('exact') or opts.get('import_branch'): |
|
|||
3773 | branch = branch or 'default' |
|
|||
3774 | else: |
|
|||
3775 | branch = p1.branch() |
|
|||
3776 | store = patch.filestore() |
|
|||
3777 | try: |
|
|||
3778 | files = set() |
|
|||
3779 | try: |
|
|||
3780 | patch.patchrepo(ui, repo, p1, store, tmpname, strip, |
|
|||
3781 | files, eolmode=None) |
|
|||
3782 | except patch.PatchError, e: |
|
|||
3783 | raise util.Abort(str(e)) |
|
|||
3784 | memctx = context.makememctx(repo, (p1.node(), p2.node()), |
|
|||
3785 | message, |
|
|||
3786 | opts.get('user') or user, |
|
|||
3787 | opts.get('date') or date, |
|
|||
3788 | branch, files, store, |
|
|||
3789 | editor=cmdutil.commiteditor) |
|
|||
3790 | repo.savecommitmessage(memctx.description()) |
|
|||
3791 | n = memctx.commit() |
|
|||
3792 | finally: |
|
|||
3793 | store.close() |
|
|||
3794 | if opts.get('exact') and hex(n) != nodeid: |
|
|||
3795 | raise util.Abort(_('patch is damaged or loses information')) |
|
|||
3796 | if n: |
|
|||
3797 | # i18n: refers to a short changeset id |
|
|||
3798 | msg = _('created %s') % short(n) |
|
|||
3799 | return (msg, n) |
|
|||
3800 | finally: |
|
|||
3801 | os.unlink(tmpname) |
|
|||
3802 |
|
3789 | |||
3803 | try: |
|
3790 | try: | |
3804 | try: |
|
3791 | try: | |
@@ -3819,7 +3806,8 b' def import_(ui, repo, patch1=None, *patc' | |||||
3819 |
|
3806 | |||
3820 | haspatch = False |
|
3807 | haspatch = False | |
3821 | for hunk in patch.split(patchfile): |
|
3808 | for hunk in patch.split(patchfile): | |
3822 |
(msg, node) = tryone(ui, hunk, parents |
|
3809 | (msg, node) = cmdutil.tryimportone(ui, repo, hunk, parents, | |
|
3810 | opts, msgs, hg.clean) | |||
3823 | if msg: |
|
3811 | if msg: | |
3824 | haspatch = True |
|
3812 | haspatch = True | |
3825 | ui.note(msg + '\n') |
|
3813 | ui.note(msg + '\n') | |
@@ -3870,6 +3858,23 b' def incoming(ui, repo, source="default",' | |||||
3870 |
|
3858 | |||
3871 | See pull for valid source format details. |
|
3859 | See pull for valid source format details. | |
3872 |
|
3860 | |||
|
3861 | .. container:: verbose | |||
|
3862 | ||||
|
3863 | Examples: | |||
|
3864 | ||||
|
3865 | - show incoming changes with patches and full description:: | |||
|
3866 | ||||
|
3867 | hg incoming -vp | |||
|
3868 | ||||
|
3869 | - show incoming changes excluding merges, store a bundle:: | |||
|
3870 | ||||
|
3871 | hg in -vpM --bundle incoming.hg | |||
|
3872 | hg pull incoming.hg | |||
|
3873 | ||||
|
3874 | - briefly list changes inside a bundle:: | |||
|
3875 | ||||
|
3876 | hg in changes.hg -T "{desc|firstline}\\n" | |||
|
3877 | ||||
3873 | Returns 0 if there are incoming changes, 1 otherwise. |
|
3878 | Returns 0 if there are incoming changes, 1 otherwise. | |
3874 | """ |
|
3879 | """ | |
3875 | if opts.get('graph'): |
|
3880 | if opts.get('graph'): | |
@@ -4004,6 +4009,12 b' def log(ui, repo, *pats, **opts):' | |||||
4004 | each commit. When the -v/--verbose switch is used, the list of |
|
4009 | each commit. When the -v/--verbose switch is used, the list of | |
4005 | changed files and full commit message are shown. |
|
4010 | changed files and full commit message are shown. | |
4006 |
|
4011 | |||
|
4012 | With --graph the revisions are shown as an ASCII art DAG with the most | |||
|
4013 | recent changeset at the top. | |||
|
4014 | 'o' is a changeset, '@' is a working directory parent, 'x' is obsolete, | |||
|
4015 | and '+' represents a fork where the changeset from the lines below is a | |||
|
4016 | parent of the 'o' merge on the same same line. | |||
|
4017 | ||||
4007 | .. note:: |
|
4018 | .. note:: | |
4008 |
|
4019 | |||
4009 | log -p/--patch may generate unexpected diff output for merge |
|
4020 | log -p/--patch may generate unexpected diff output for merge | |
@@ -4071,55 +4082,22 b' def log(ui, repo, *pats, **opts):' | |||||
4071 | if opts.get('graph'): |
|
4082 | if opts.get('graph'): | |
4072 | return cmdutil.graphlog(ui, repo, *pats, **opts) |
|
4083 | return cmdutil.graphlog(ui, repo, *pats, **opts) | |
4073 |
|
4084 | |||
4074 |
|
|
4085 | revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts) | |
4075 | limit = cmdutil.loglimit(opts) |
|
4086 | limit = cmdutil.loglimit(opts) | |
4076 | count = 0 |
|
4087 | count = 0 | |
4077 |
|
4088 | |||
4078 |
getrenamed |
|
4089 | getrenamed = None | |
4079 | if opts.get('copies'): |
|
4090 | if opts.get('copies'): | |
|
4091 | endrev = None | |||
4080 | if opts.get('rev'): |
|
4092 | if opts.get('rev'): | |
4081 |
endrev = |
|
4093 | endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 | |
4082 | getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) |
|
4094 | getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) | |
4083 |
|
4095 | |||
4084 | df = False |
|
4096 | displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) | |
4085 | if opts.get("date"): |
|
4097 | for rev in revs: | |
4086 | df = util.matchdate(opts["date"]) |
|
4098 | if count == limit: | |
4087 |
|
4099 | break | ||
4088 | branches = opts.get('branch', []) + opts.get('only_branch', []) |
|
4100 | ctx = repo[rev] | |
4089 | opts['branch'] = [repo.lookupbranch(b) for b in branches] |
|
|||
4090 |
|
||||
4091 | displayer = cmdutil.show_changeset(ui, repo, opts, True) |
|
|||
4092 | def prep(ctx, fns): |
|
|||
4093 | rev = ctx.rev() |
|
|||
4094 | parents = [p for p in repo.changelog.parentrevs(rev) |
|
|||
4095 | if p != nullrev] |
|
|||
4096 | if opts.get('no_merges') and len(parents) == 2: |
|
|||
4097 | return |
|
|||
4098 | if opts.get('only_merges') and len(parents) != 2: |
|
|||
4099 | return |
|
|||
4100 | if opts.get('branch') and ctx.branch() not in opts['branch']: |
|
|||
4101 | return |
|
|||
4102 | if df and not df(ctx.date()[0]): |
|
|||
4103 | return |
|
|||
4104 |
|
||||
4105 | lower = encoding.lower |
|
|||
4106 | if opts.get('user'): |
|
|||
4107 | luser = lower(ctx.user()) |
|
|||
4108 | for k in [lower(x) for x in opts['user']]: |
|
|||
4109 | if (k in luser): |
|
|||
4110 | break |
|
|||
4111 | else: |
|
|||
4112 | return |
|
|||
4113 | if opts.get('keyword'): |
|
|||
4114 | luser = lower(ctx.user()) |
|
|||
4115 | ldesc = lower(ctx.description()) |
|
|||
4116 | lfiles = lower(" ".join(ctx.files())) |
|
|||
4117 | for k in [lower(x) for x in opts['keyword']]: |
|
|||
4118 | if (k in luser or k in ldesc or k in lfiles): |
|
|||
4119 | break |
|
|||
4120 | else: |
|
|||
4121 | return |
|
|||
4122 |
|
||||
4123 | copies = None |
|
4101 | copies = None | |
4124 | if getrenamed is not None and rev: |
|
4102 | if getrenamed is not None and rev: | |
4125 | copies = [] |
|
4103 | copies = [] | |
@@ -4127,22 +4105,11 b' def log(ui, repo, *pats, **opts):' | |||||
4127 | rename = getrenamed(fn, rev) |
|
4105 | rename = getrenamed(fn, rev) | |
4128 | if rename: |
|
4106 | if rename: | |
4129 | copies.append((fn, rename[0])) |
|
4107 | copies.append((fn, rename[0])) | |
4130 |
|
4108 | revmatchfn = filematcher and filematcher(ctx.rev()) or None | ||
4131 | revmatchfn = None |
|
|||
4132 | if opts.get('patch') or opts.get('stat'): |
|
|||
4133 | if opts.get('follow') or opts.get('follow_first'): |
|
|||
4134 | # note: this might be wrong when following through merges |
|
|||
4135 | revmatchfn = scmutil.match(repo[None], fns, default='path') |
|
|||
4136 | else: |
|
|||
4137 | revmatchfn = matchfn |
|
|||
4138 |
|
||||
4139 | displayer.show(ctx, copies=copies, matchfn=revmatchfn) |
|
4109 | displayer.show(ctx, copies=copies, matchfn=revmatchfn) | |
4140 |
|
4110 | if displayer.flush(rev): | ||
4141 | for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): |
|
|||
4142 | if displayer.flush(ctx.rev()): |
|
|||
4143 | count += 1 |
|
4111 | count += 1 | |
4144 | if count == limit: |
|
4112 | ||
4145 | break |
|
|||
4146 | displayer.close() |
|
4113 | displayer.close() | |
4147 |
|
4114 | |||
4148 | @command('manifest', |
|
4115 | @command('manifest', | |
@@ -4319,10 +4286,10 b' def merge(ui, repo, node=None, **opts):' | |||||
4319 |
|
4286 | |||
4320 | try: |
|
4287 | try: | |
4321 | # ui.forcemerge is an internal variable, do not document |
|
4288 | # ui.forcemerge is an internal variable, do not document | |
4322 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) |
|
4289 | repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge') | |
4323 | return hg.merge(repo, node, force=opts.get('force')) |
|
4290 | return hg.merge(repo, node, force=opts.get('force')) | |
4324 | finally: |
|
4291 | finally: | |
4325 | ui.setconfig('ui', 'forcemerge', '') |
|
4292 | ui.setconfig('ui', 'forcemerge', '', 'merge') | |
4326 |
|
4293 | |||
4327 | @command('outgoing|out', |
|
4294 | @command('outgoing|out', | |
4328 | [('f', 'force', None, _('run even when the destination is unrelated')), |
|
4295 | [('f', 'force', None, _('run even when the destination is unrelated')), | |
@@ -4347,8 +4314,9 b' def outgoing(ui, repo, dest=None, **opts' | |||||
4347 | """ |
|
4314 | """ | |
4348 | if opts.get('graph'): |
|
4315 | if opts.get('graph'): | |
4349 | cmdutil.checkunsupportedgraphflags([], opts) |
|
4316 | cmdutil.checkunsupportedgraphflags([], opts) | |
4350 | o = hg._outgoing(ui, repo, dest, opts) |
|
4317 | o, other = hg._outgoing(ui, repo, dest, opts) | |
4351 |
if o |
|
4318 | if not o: | |
|
4319 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |||
4352 | return |
|
4320 | return | |
4353 |
|
4321 | |||
4354 | revdag = cmdutil.graphrevs(repo, o, opts) |
|
4322 | revdag = cmdutil.graphrevs(repo, o, opts) | |
@@ -4356,6 +4324,7 b' def outgoing(ui, repo, dest=None, **opts' | |||||
4356 | showparents = [ctx.node() for ctx in repo[None].parents()] |
|
4324 | showparents = [ctx.node() for ctx in repo[None].parents()] | |
4357 | cmdutil.displaygraph(ui, revdag, displayer, showparents, |
|
4325 | cmdutil.displaygraph(ui, revdag, displayer, showparents, | |
4358 | graphmod.asciiedges) |
|
4326 | graphmod.asciiedges) | |
|
4327 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |||
4359 | return 0 |
|
4328 | return 0 | |
4360 |
|
4329 | |||
4361 | if opts.get('bookmarks'): |
|
4330 | if opts.get('bookmarks'): | |
@@ -4702,7 +4671,7 b' def push(ui, repo, dest=None, **opts):' | |||||
4702 | """ |
|
4671 | """ | |
4703 |
|
4672 | |||
4704 | if opts.get('bookmark'): |
|
4673 | if opts.get('bookmark'): | |
4705 | ui.setconfig('bookmarks', 'pushing', opts['bookmark']) |
|
4674 | ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push') | |
4706 | for b in opts['bookmark']: |
|
4675 | for b in opts['bookmark']: | |
4707 | # translate -B options to -r so changesets get pushed |
|
4676 | # translate -B options to -r so changesets get pushed | |
4708 | if b in repo._bookmarks: |
|
4677 | if b in repo._bookmarks: | |
@@ -4716,7 +4685,15 b' def push(ui, repo, dest=None, **opts):' | |||||
4716 | dest, branches = hg.parseurl(dest, opts.get('branch')) |
|
4685 | dest, branches = hg.parseurl(dest, opts.get('branch')) | |
4717 | ui.status(_('pushing to %s\n') % util.hidepassword(dest)) |
|
4686 | ui.status(_('pushing to %s\n') % util.hidepassword(dest)) | |
4718 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) |
|
4687 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) | |
4719 | other = hg.peer(repo, opts, dest) |
|
4688 | try: | |
|
4689 | other = hg.peer(repo, opts, dest) | |||
|
4690 | except error.RepoError: | |||
|
4691 | if dest == "default-push": | |||
|
4692 | raise util.Abort(_("default repository not configured!"), | |||
|
4693 | hint=_('see the "path" section in "hg help config"')) | |||
|
4694 | else: | |||
|
4695 | raise | |||
|
4696 | ||||
4720 | if revs: |
|
4697 | if revs: | |
4721 | revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)] |
|
4698 | revs = [repo.lookup(r) for r in scmutil.revrange(repo, revs)] | |
4722 |
|
4699 | |||
@@ -4726,8 +4703,9 b' def push(ui, repo, dest=None, **opts):' | |||||
4726 | c = repo[''] |
|
4703 | c = repo[''] | |
4727 | subs = c.substate # only repos that are committed |
|
4704 | subs = c.substate # only repos that are committed | |
4728 | for s in sorted(subs): |
|
4705 | for s in sorted(subs): | |
4729 |
|
|
4706 | result = c.sub(s).push(opts) | |
4730 |
|
|
4707 | if result == 0: | |
|
4708 | return not result | |||
4731 | finally: |
|
4709 | finally: | |
4732 | del repo._subtoppath |
|
4710 | del repo._subtoppath | |
4733 | result = repo.push(other, opts.get('force'), revs=revs, |
|
4711 | result = repo.push(other, opts.get('force'), revs=revs, | |
@@ -4970,11 +4948,12 b' def resolve(ui, repo, *pats, **opts):' | |||||
4970 |
|
4948 | |||
4971 | try: |
|
4949 | try: | |
4972 | # resolve file |
|
4950 | # resolve file | |
4973 |
ui.setconfig('ui', 'forcemerge', opts.get('tool', '') |
|
4951 | ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), | |
|
4952 | 'resolve') | |||
4974 | if ms.resolve(f, wctx): |
|
4953 | if ms.resolve(f, wctx): | |
4975 | ret = 1 |
|
4954 | ret = 1 | |
4976 | finally: |
|
4955 | finally: | |
4977 | ui.setconfig('ui', 'forcemerge', '') |
|
4956 | ui.setconfig('ui', 'forcemerge', '', 'resolve') | |
4978 | ms.commit() |
|
4957 | ms.commit() | |
4979 |
|
4958 | |||
4980 | # replace filemerge's .orig file with our resolve file |
|
4959 | # replace filemerge's .orig file with our resolve file | |
@@ -5177,7 +5156,6 b' def serve(ui, repo, **opts):' | |||||
5177 | s.serve_forever() |
|
5156 | s.serve_forever() | |
5178 |
|
5157 | |||
5179 | if opts["cmdserver"]: |
|
5158 | if opts["cmdserver"]: | |
5180 | checkrepo() |
|
|||
5181 | s = commandserver.server(ui, repo, opts["cmdserver"]) |
|
5159 | s = commandserver.server(ui, repo, opts["cmdserver"]) | |
5182 | return s.serve() |
|
5160 | return s.serve() | |
5183 |
|
5161 | |||
@@ -5192,9 +5170,9 b' def serve(ui, repo, **opts):' | |||||
5192 | val = opts.get(o, '') |
|
5170 | val = opts.get(o, '') | |
5193 | if val in (None, ''): # should check against default options instead |
|
5171 | if val in (None, ''): # should check against default options instead | |
5194 | continue |
|
5172 | continue | |
5195 | baseui.setconfig("web", o, val) |
|
5173 | baseui.setconfig("web", o, val, 'serve') | |
5196 | if repo and repo.ui != baseui: |
|
5174 | if repo and repo.ui != baseui: | |
5197 | repo.ui.setconfig("web", o, val) |
|
5175 | repo.ui.setconfig("web", o, val, 'serve') | |
5198 |
|
5176 | |||
5199 | o = opts.get('web_conf') or opts.get('webdir_conf') |
|
5177 | o = opts.get('web_conf') or opts.get('webdir_conf') | |
5200 | if not o: |
|
5178 | if not o: | |
@@ -5249,52 +5227,6 b' class httpservice(object):' | |||||
5249 | self.httpd.serve_forever() |
|
5227 | self.httpd.serve_forever() | |
5250 |
|
5228 | |||
5251 |
|
5229 | |||
5252 | @command('showconfig|debugconfig', |
|
|||
5253 | [('u', 'untrusted', None, _('show untrusted configuration options'))], |
|
|||
5254 | _('[-u] [NAME]...')) |
|
|||
5255 | def showconfig(ui, repo, *values, **opts): |
|
|||
5256 | """show combined config settings from all hgrc files |
|
|||
5257 |
|
||||
5258 | With no arguments, print names and values of all config items. |
|
|||
5259 |
|
||||
5260 | With one argument of the form section.name, print just the value |
|
|||
5261 | of that config item. |
|
|||
5262 |
|
||||
5263 | With multiple arguments, print names and values of all config |
|
|||
5264 | items with matching section names. |
|
|||
5265 |
|
||||
5266 | With --debug, the source (filename and line number) is printed |
|
|||
5267 | for each config item. |
|
|||
5268 |
|
||||
5269 | Returns 0 on success. |
|
|||
5270 | """ |
|
|||
5271 |
|
||||
5272 | for f in scmutil.rcpath(): |
|
|||
5273 | ui.debug('read config from: %s\n' % f) |
|
|||
5274 | untrusted = bool(opts.get('untrusted')) |
|
|||
5275 | if values: |
|
|||
5276 | sections = [v for v in values if '.' not in v] |
|
|||
5277 | items = [v for v in values if '.' in v] |
|
|||
5278 | if len(items) > 1 or items and sections: |
|
|||
5279 | raise util.Abort(_('only one config item permitted')) |
|
|||
5280 | for section, name, value in ui.walkconfig(untrusted=untrusted): |
|
|||
5281 | value = str(value).replace('\n', '\\n') |
|
|||
5282 | sectname = section + '.' + name |
|
|||
5283 | if values: |
|
|||
5284 | for v in values: |
|
|||
5285 | if v == section: |
|
|||
5286 | ui.debug('%s: ' % |
|
|||
5287 | ui.configsource(section, name, untrusted)) |
|
|||
5288 | ui.write('%s=%s\n' % (sectname, value)) |
|
|||
5289 | elif v == sectname: |
|
|||
5290 | ui.debug('%s: ' % |
|
|||
5291 | ui.configsource(section, name, untrusted)) |
|
|||
5292 | ui.write(value, '\n') |
|
|||
5293 | else: |
|
|||
5294 | ui.debug('%s: ' % |
|
|||
5295 | ui.configsource(section, name, untrusted)) |
|
|||
5296 | ui.write('%s=%s\n' % (sectname, value)) |
|
|||
5297 |
|
||||
5298 | @command('^status|st', |
|
5230 | @command('^status|st', | |
5299 | [('A', 'all', None, _('show status of all files')), |
|
5231 | [('A', 'all', None, _('show status of all files')), | |
5300 | ('m', 'modified', None, _('show only modified files')), |
|
5232 | ('m', 'modified', None, _('show only modified files')), | |
@@ -5345,7 +5277,7 b' def status(ui, repo, *pats, **opts):' | |||||
5345 | ! = missing (deleted by non-hg command, but still tracked) |
|
5277 | ! = missing (deleted by non-hg command, but still tracked) | |
5346 | ? = not tracked |
|
5278 | ? = not tracked | |
5347 | I = ignored |
|
5279 | I = ignored | |
5348 |
= origin of the previous file |
|
5280 | = origin of the previous file (with --copies) | |
5349 |
|
5281 | |||
5350 | .. container:: verbose |
|
5282 | .. container:: verbose | |
5351 |
|
5283 | |||
@@ -5553,38 +5485,82 b' def summary(ui, repo, **opts):' | |||||
5553 | cmdutil.summaryhooks(ui, repo) |
|
5485 | cmdutil.summaryhooks(ui, repo) | |
5554 |
|
5486 | |||
5555 | if opts.get('remote'): |
|
5487 | if opts.get('remote'): | |
5556 | t = [] |
|
5488 | needsincoming, needsoutgoing = True, True | |
|
5489 | else: | |||
|
5490 | needsincoming, needsoutgoing = False, False | |||
|
5491 | for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None): | |||
|
5492 | if i: | |||
|
5493 | needsincoming = True | |||
|
5494 | if o: | |||
|
5495 | needsoutgoing = True | |||
|
5496 | if not needsincoming and not needsoutgoing: | |||
|
5497 | return | |||
|
5498 | ||||
|
5499 | def getincoming(): | |||
5557 | source, branches = hg.parseurl(ui.expandpath('default')) |
|
5500 | source, branches = hg.parseurl(ui.expandpath('default')) | |
5558 | sbranch = branches[0] |
|
5501 | sbranch = branches[0] | |
5559 | other = hg.peer(repo, {}, source) |
|
5502 | try: | |
|
5503 | other = hg.peer(repo, {}, source) | |||
|
5504 | except error.RepoError: | |||
|
5505 | if opts.get('remote'): | |||
|
5506 | raise | |||
|
5507 | return source, sbranch, None, None, None | |||
5560 | revs, checkout = hg.addbranchrevs(repo, other, branches, None) |
|
5508 | revs, checkout = hg.addbranchrevs(repo, other, branches, None) | |
5561 | if revs: |
|
5509 | if revs: | |
5562 | revs = [other.lookup(rev) for rev in revs] |
|
5510 | revs = [other.lookup(rev) for rev in revs] | |
5563 | ui.debug('comparing with %s\n' % util.hidepassword(source)) |
|
5511 | ui.debug('comparing with %s\n' % util.hidepassword(source)) | |
5564 | repo.ui.pushbuffer() |
|
5512 | repo.ui.pushbuffer() | |
5565 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) |
|
5513 | commoninc = discovery.findcommonincoming(repo, other, heads=revs) | |
5566 | _common, incoming, _rheads = commoninc |
|
|||
5567 | repo.ui.popbuffer() |
|
5514 | repo.ui.popbuffer() | |
5568 | if incoming: |
|
5515 | return source, sbranch, other, commoninc, commoninc[1] | |
5569 | t.append(_('1 or more incoming')) |
|
5516 | ||
5570 |
|
5517 | if needsincoming: | ||
|
5518 | source, sbranch, sother, commoninc, incoming = getincoming() | |||
|
5519 | else: | |||
|
5520 | source = sbranch = sother = commoninc = incoming = None | |||
|
5521 | ||||
|
5522 | def getoutgoing(): | |||
5571 | dest, branches = hg.parseurl(ui.expandpath('default-push', 'default')) |
|
5523 | dest, branches = hg.parseurl(ui.expandpath('default-push', 'default')) | |
5572 | dbranch = branches[0] |
|
5524 | dbranch = branches[0] | |
5573 | revs, checkout = hg.addbranchrevs(repo, repo, branches, None) |
|
5525 | revs, checkout = hg.addbranchrevs(repo, repo, branches, None) | |
5574 | if source != dest: |
|
5526 | if source != dest: | |
5575 | other = hg.peer(repo, {}, dest) |
|
5527 | try: | |
|
5528 | dother = hg.peer(repo, {}, dest) | |||
|
5529 | except error.RepoError: | |||
|
5530 | if opts.get('remote'): | |||
|
5531 | raise | |||
|
5532 | return dest, dbranch, None, None | |||
5576 | ui.debug('comparing with %s\n' % util.hidepassword(dest)) |
|
5533 | ui.debug('comparing with %s\n' % util.hidepassword(dest)) | |
|
5534 | elif sother is None: | |||
|
5535 | # there is no explicit destination peer, but source one is invalid | |||
|
5536 | return dest, dbranch, None, None | |||
|
5537 | else: | |||
|
5538 | dother = sother | |||
5577 | if (source != dest or (sbranch is not None and sbranch != dbranch)): |
|
5539 | if (source != dest or (sbranch is not None and sbranch != dbranch)): | |
5578 |
common |
|
5540 | common = None | |
|
5541 | else: | |||
|
5542 | common = commoninc | |||
5579 | if revs: |
|
5543 | if revs: | |
5580 | revs = [repo.lookup(rev) for rev in revs] |
|
5544 | revs = [repo.lookup(rev) for rev in revs] | |
5581 | repo.ui.pushbuffer() |
|
5545 | repo.ui.pushbuffer() | |
5582 | outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs, |
|
5546 | outgoing = discovery.findcommonoutgoing(repo, dother, onlyheads=revs, | |
5583 |
commoninc=common |
|
5547 | commoninc=common) | |
5584 | repo.ui.popbuffer() |
|
5548 | repo.ui.popbuffer() | |
|
5549 | return dest, dbranch, dother, outgoing | |||
|
5550 | ||||
|
5551 | if needsoutgoing: | |||
|
5552 | dest, dbranch, dother, outgoing = getoutgoing() | |||
|
5553 | else: | |||
|
5554 | dest = dbranch = dother = outgoing = None | |||
|
5555 | ||||
|
5556 | if opts.get('remote'): | |||
|
5557 | t = [] | |||
|
5558 | if incoming: | |||
|
5559 | t.append(_('1 or more incoming')) | |||
5585 | o = outgoing.missing |
|
5560 | o = outgoing.missing | |
5586 | if o: |
|
5561 | if o: | |
5587 | t.append(_('%d outgoing') % len(o)) |
|
5562 | t.append(_('%d outgoing') % len(o)) | |
|
5563 | other = dother or sother | |||
5588 | if 'bookmarks' in other.listkeys('namespaces'): |
|
5564 | if 'bookmarks' in other.listkeys('namespaces'): | |
5589 | lmarks = repo.listkeys('bookmarks') |
|
5565 | lmarks = repo.listkeys('bookmarks') | |
5590 | rmarks = other.listkeys('bookmarks') |
|
5566 | rmarks = other.listkeys('bookmarks') | |
@@ -5602,6 +5578,10 b' def summary(ui, repo, **opts):' | |||||
5602 | # i18n: column positioning for "hg summary" |
|
5578 | # i18n: column positioning for "hg summary" | |
5603 | ui.status(_('remote: (synced)\n')) |
|
5579 | ui.status(_('remote: (synced)\n')) | |
5604 |
|
5580 | |||
|
5581 | cmdutil.summaryremotehooks(ui, repo, opts, | |||
|
5582 | ((source, sbranch, sother, commoninc), | |||
|
5583 | (dest, dbranch, dother, outgoing))) | |||
|
5584 | ||||
5605 | @command('tag', |
|
5585 | @command('tag', | |
5606 | [('f', 'force', None, _('force tag')), |
|
5586 | [('f', 'force', None, _('force tag')), | |
5607 | ('l', 'local', None, _('make the tag local')), |
|
5587 | ('l', 'local', None, _('make the tag local')), | |
@@ -5788,8 +5768,9 b' def unbundle(ui, repo, fname1, *fnames, ' | |||||
5788 | try: |
|
5768 | try: | |
5789 | for fname in fnames: |
|
5769 | for fname in fnames: | |
5790 | f = hg.openpath(ui, fname) |
|
5770 | f = hg.openpath(ui, fname) | |
5791 |
gen = change |
|
5771 | gen = exchange.readbundle(ui, f, fname) | |
5792 |
modheads = |
|
5772 | modheads = changegroup.addchangegroup(repo, gen, 'unbundle', | |
|
5773 | 'bundle:' + fname) | |||
5793 | finally: |
|
5774 | finally: | |
5794 | lock.release() |
|
5775 | lock.release() | |
5795 | bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) |
|
5776 | bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) | |
@@ -5933,7 +5914,7 b' def version_(ui):' | |||||
5933 | norepo = ("clone init version help debugcommands debugcomplete" |
|
5914 | norepo = ("clone init version help debugcommands debugcomplete" | |
5934 | " debugdate debuginstall debugfsinfo debugpushkey debugwireargs" |
|
5915 | " debugdate debuginstall debugfsinfo debugpushkey debugwireargs" | |
5935 | " debugknown debuggetbundle debugbundle") |
|
5916 | " debugknown debuggetbundle debugbundle") | |
5936 | optionalrepo = ("identify paths serve showconfig debugancestor debugdag" |
|
5917 | optionalrepo = ("identify paths serve config showconfig debugancestor debugdag" | |
5937 | " debugdata debugindex debugindexdot debugrevlog") |
|
5918 | " debugdata debugindex debugindexdot debugrevlog") | |
5938 | inferrepo = ("add addremove annotate cat commit diff grep forget log parents" |
|
5919 | inferrepo = ("add addremove annotate cat commit diff grep forget log parents" | |
5939 | " remove resolve status debugwalk") |
|
5920 | " remove resolve status debugwalk") |
@@ -142,11 +142,15 b' class server(object):' | |||||
142 | else: |
|
142 | else: | |
143 | logfile = open(logpath, 'a') |
|
143 | logfile = open(logpath, 'a') | |
144 |
|
144 | |||
145 | # the ui here is really the repo ui so take its baseui so we don't end |
|
145 | if repo: | |
146 | # up with its local configuration |
|
146 | # the ui here is really the repo ui so take its baseui so we don't | |
147 | self.ui = repo.baseui |
|
147 | # end up with its local configuration | |
148 |
self. |
|
148 | self.ui = repo.baseui | |
149 |
self.repo |
|
149 | self.repo = repo | |
|
150 | self.repoui = repo.ui | |||
|
151 | else: | |||
|
152 | self.ui = ui | |||
|
153 | self.repo = self.repoui = None | |||
150 |
|
154 | |||
151 | if mode == 'pipe': |
|
155 | if mode == 'pipe': | |
152 | self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e') |
|
156 | self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e') | |
@@ -183,18 +187,18 b' class server(object):' | |||||
183 | # copy the uis so changes (e.g. --config or --verbose) don't |
|
187 | # copy the uis so changes (e.g. --config or --verbose) don't | |
184 | # persist between requests |
|
188 | # persist between requests | |
185 | copiedui = self.ui.copy() |
|
189 | copiedui = self.ui.copy() | |
186 |
self.repo |
|
190 | if self.repo: | |
187 | # clone ui without using ui.copy because this is protected |
|
191 | self.repo.baseui = copiedui | |
188 | repoui = self.repoui.__class__(self.repoui) |
|
192 | # clone ui without using ui.copy because this is protected | |
189 | repoui.copy = copiedui.copy # redo copy protection |
|
193 | repoui = self.repoui.__class__(self.repoui) | |
190 | self.repo.ui = self.repo.dirstate._ui = repoui |
|
194 | repoui.copy = copiedui.copy # redo copy protection | |
191 | self.repo.invalidate() |
|
195 | self.repo.ui = self.repo.dirstate._ui = repoui | |
192 |
self.repo.invalidate |
|
196 | self.repo.invalidateall() | |
193 |
|
197 | |||
194 | req = dispatch.request(args[:], copiedui, self.repo, self.cin, |
|
198 | req = dispatch.request(args[:], copiedui, self.repo, self.cin, | |
195 | self.cout, self.cerr) |
|
199 | self.cout, self.cerr) | |
196 |
|
200 | |||
197 | ret = dispatch.dispatch(req) or 0 # might return None |
|
201 | ret = (dispatch.dispatch(req) or 0) & 255 # might return None | |
198 |
|
202 | |||
199 | # restore old cwd |
|
203 | # restore old cwd | |
200 | if '--cwd' in args: |
|
204 | if '--cwd' in args: |
@@ -93,7 +93,8 b' class config(object):' | |||||
93 | if section not in self: |
|
93 | if section not in self: | |
94 | self._data[section] = sortdict() |
|
94 | self._data[section] = sortdict() | |
95 | self._data[section][item] = value |
|
95 | self._data[section][item] = value | |
96 | self._source[(section, item)] = source |
|
96 | if source: | |
|
97 | self._source[(section, item)] = source | |||
97 |
|
98 | |||
98 | def restore(self, data): |
|
99 | def restore(self, data): | |
99 | """restore data returned by self.backup""" |
|
100 | """restore data returned by self.backup""" |
@@ -7,11 +7,12 b'' | |||||
7 |
|
7 | |||
8 | from node import nullid, nullrev, short, hex, bin |
|
8 | from node import nullid, nullrev, short, hex, bin | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 |
import |
|
10 | import mdiff, error, util, scmutil, subrepo, patch, encoding, phases | |
11 | import match as matchmod |
|
11 | import match as matchmod | |
12 | import os, errno, stat |
|
12 | import os, errno, stat | |
13 | import obsolete as obsmod |
|
13 | import obsolete as obsmod | |
14 | import repoview |
|
14 | import repoview | |
|
15 | import fileset | |||
15 |
|
16 | |||
16 | propertycache = util.propertycache |
|
17 | propertycache = util.propertycache | |
17 |
|
18 | |||
@@ -79,6 +80,9 b' class basectx(object):' | |||||
79 | def mutable(self): |
|
80 | def mutable(self): | |
80 | return self.phase() > phases.public |
|
81 | return self.phase() > phases.public | |
81 |
|
82 | |||
|
83 | def getfileset(self, expr): | |||
|
84 | return fileset.getfileset(self, expr) | |||
|
85 | ||||
82 | def obsolete(self): |
|
86 | def obsolete(self): | |
83 | """True if the changeset is obsolete""" |
|
87 | """True if the changeset is obsolete""" | |
84 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') |
|
88 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') | |
@@ -392,14 +396,32 b' class changectx(basectx):' | |||||
392 |
|
396 | |||
393 | def ancestor(self, c2): |
|
397 | def ancestor(self, c2): | |
394 | """ |
|
398 | """ | |
395 | return the ancestor context of self and c2 |
|
399 | return the "best" ancestor context of self and c2 | |
396 | """ |
|
400 | """ | |
397 | # deal with workingctxs |
|
401 | # deal with workingctxs | |
398 | n2 = c2._node |
|
402 | n2 = c2._node | |
399 | if n2 is None: |
|
403 | if n2 is None: | |
400 | n2 = c2._parents[0]._node |
|
404 | n2 = c2._parents[0]._node | |
401 |
|
|
405 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) | |
402 | return changectx(self._repo, n) |
|
406 | if not cahs: | |
|
407 | anc = nullid | |||
|
408 | elif len(cahs) == 1: | |||
|
409 | anc = cahs[0] | |||
|
410 | else: | |||
|
411 | for r in self._repo.ui.configlist('merge', 'preferancestor'): | |||
|
412 | ctx = changectx(self._repo, r) | |||
|
413 | anc = ctx.node() | |||
|
414 | if anc in cahs: | |||
|
415 | break | |||
|
416 | else: | |||
|
417 | anc = self._repo.changelog.ancestor(self._node, n2) | |||
|
418 | self._repo.ui.status( | |||
|
419 | (_("note: using %s as ancestor of %s and %s\n") % | |||
|
420 | (short(anc), short(self._node), short(n2))) + | |||
|
421 | ''.join(_(" alternatively, use --config " | |||
|
422 | "merge.preferancestor=%s\n") % | |||
|
423 | short(n) for n in sorted(cahs) if n != anc)) | |||
|
424 | return changectx(self._repo, anc) | |||
403 |
|
425 | |||
404 | def descendant(self, other): |
|
426 | def descendant(self, other): | |
405 | """True if other is descendant of this changeset""" |
|
427 | """True if other is descendant of this changeset""" | |
@@ -429,8 +451,7 b' class changectx(basectx):' | |||||
429 | if fn in self._dirs: |
|
451 | if fn in self._dirs: | |
430 | # specified pattern is a directory |
|
452 | # specified pattern is a directory | |
431 | continue |
|
453 | continue | |
432 |
|
|
454 | match.bad(fn, _('no such file in rev %s') % self) | |
433 | yield fn |
|
|||
434 |
|
455 | |||
435 | class basefilectx(object): |
|
456 | class basefilectx(object): | |
436 | """A filecontext object represents the common logic for its children: |
|
457 | """A filecontext object represents the common logic for its children: | |
@@ -684,55 +705,6 b' class basefilectx(object):' | |||||
684 |
|
705 | |||
685 | return zip(hist[base][0], hist[base][1].splitlines(True)) |
|
706 | return zip(hist[base][0], hist[base][1].splitlines(True)) | |
686 |
|
707 | |||
687 | def ancestor(self, fc2, actx): |
|
|||
688 | """ |
|
|||
689 | find the common ancestor file context, if any, of self, and fc2 |
|
|||
690 |
|
||||
691 | actx must be the changectx of the common ancestor |
|
|||
692 | of self's and fc2's respective changesets. |
|
|||
693 | """ |
|
|||
694 |
|
||||
695 | # the easy case: no (relevant) renames |
|
|||
696 | if fc2.path() == self.path() and self.path() in actx: |
|
|||
697 | return actx[self.path()] |
|
|||
698 |
|
||||
699 | # the next easiest cases: unambiguous predecessor (name trumps |
|
|||
700 | # history) |
|
|||
701 | if self.path() in actx and fc2.path() not in actx: |
|
|||
702 | return actx[self.path()] |
|
|||
703 | if fc2.path() in actx and self.path() not in actx: |
|
|||
704 | return actx[fc2.path()] |
|
|||
705 |
|
||||
706 | # prime the ancestor cache for the working directory |
|
|||
707 | acache = {} |
|
|||
708 | for c in (self, fc2): |
|
|||
709 | if c.filenode() is None: |
|
|||
710 | pl = [(n.path(), n.filenode()) for n in c.parents()] |
|
|||
711 | acache[(c._path, None)] = pl |
|
|||
712 |
|
||||
713 | flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog} |
|
|||
714 | def parents(vertex): |
|
|||
715 | if vertex in acache: |
|
|||
716 | return acache[vertex] |
|
|||
717 | f, n = vertex |
|
|||
718 | if f not in flcache: |
|
|||
719 | flcache[f] = self._repo.file(f) |
|
|||
720 | fl = flcache[f] |
|
|||
721 | pl = [(f, p) for p in fl.parents(n) if p != nullid] |
|
|||
722 | re = fl.renamed(n) |
|
|||
723 | if re: |
|
|||
724 | pl.append(re) |
|
|||
725 | acache[vertex] = pl |
|
|||
726 | return pl |
|
|||
727 |
|
||||
728 | a, b = (self._path, self._filenode), (fc2._path, fc2._filenode) |
|
|||
729 | v = ancestor.genericancestor(a, b, parents) |
|
|||
730 | if v: |
|
|||
731 | f, n = v |
|
|||
732 | return filectx(self._repo, f, fileid=n, filelog=flcache[f]) |
|
|||
733 |
|
||||
734 | return None |
|
|||
735 |
|
||||
736 | def ancestors(self, followfirst=False): |
|
708 | def ancestors(self, followfirst=False): | |
737 | visit = {} |
|
709 | visit = {} | |
738 | c = self |
|
710 | c = self |
@@ -228,9 +228,6 b' def mergecopies(repo, c1, c2, ca):' | |||||
228 | fullcopy = {} |
|
228 | fullcopy = {} | |
229 | diverge = {} |
|
229 | diverge = {} | |
230 |
|
230 | |||
231 | def _checkcopies(f, m1, m2): |
|
|||
232 | checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy) |
|
|||
233 |
|
||||
234 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) |
|
231 | repo.ui.debug(" searching for copies back to rev %d\n" % limit) | |
235 |
|
232 | |||
236 | u1 = _nonoverlap(m1, m2, ma) |
|
233 | u1 = _nonoverlap(m1, m2, ma) | |
@@ -244,9 +241,10 b' def mergecopies(repo, c1, c2, ca):' | |||||
244 | % "\n ".join(u2)) |
|
241 | % "\n ".join(u2)) | |
245 |
|
242 | |||
246 | for f in u1: |
|
243 | for f in u1: | |
247 | _checkcopies(f, m1, m2) |
|
244 | checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy) | |
|
245 | ||||
248 | for f in u2: |
|
246 | for f in u2: | |
249 | _checkcopies(f, m2, m1) |
|
247 | checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy) | |
250 |
|
248 | |||
251 | renamedelete = {} |
|
249 | renamedelete = {} | |
252 | renamedelete2 = set() |
|
250 | renamedelete2 = set() | |
@@ -262,7 +260,19 b' def mergecopies(repo, c1, c2, ca):' | |||||
262 | else: |
|
260 | else: | |
263 | diverge2.update(fl) # reverse map for below |
|
261 | diverge2.update(fl) # reverse map for below | |
264 |
|
262 | |||
265 | if fullcopy: |
|
263 | bothnew = sorted([d for d in m1 if d in m2 and d not in ma]) | |
|
264 | if bothnew: | |||
|
265 | repo.ui.debug(" unmatched files new in both:\n %s\n" | |||
|
266 | % "\n ".join(bothnew)) | |||
|
267 | bothdiverge, _copy, _fullcopy = {}, {}, {} | |||
|
268 | for f in bothnew: | |||
|
269 | checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy) | |||
|
270 | checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy) | |||
|
271 | for of, fl in bothdiverge.items(): | |||
|
272 | if len(fl) == 2 and fl[0] == fl[1]: | |||
|
273 | copy[fl[0]] = of # not actually divergent, just matching renames | |||
|
274 | ||||
|
275 | if fullcopy and repo.ui.debugflag: | |||
266 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " |
|
276 | repo.ui.debug(" all copies found (* = to merge, ! = divergent, " | |
267 | "% = renamed and deleted):\n") |
|
277 | "% = renamed and deleted):\n") | |
268 | for f in sorted(fullcopy): |
|
278 | for f in sorted(fullcopy): |
@@ -24,7 +24,7 b' These imports will not be delayed:' | |||||
24 | b = __import__(a) |
|
24 | b = __import__(a) | |
25 | ''' |
|
25 | ''' | |
26 |
|
26 | |||
27 | import __builtin__ |
|
27 | import __builtin__, os | |
28 | _origimport = __import__ |
|
28 | _origimport = __import__ | |
29 |
|
29 | |||
30 | nothing = object() |
|
30 | nothing = object() | |
@@ -167,7 +167,8 b' def isenabled():' | |||||
167 |
|
167 | |||
168 | def enable(): |
|
168 | def enable(): | |
169 | "enable global demand-loading of modules" |
|
169 | "enable global demand-loading of modules" | |
170 | __builtin__.__import__ = _demandimport |
|
170 | if os.environ.get('HGDEMANDIMPORT') != 'disable': | |
|
171 | __builtin__.__import__ = _demandimport | |||
171 |
|
172 | |||
172 | def disable(): |
|
173 | def disable(): | |
173 | "disable global demand-loading of modules" |
|
174 | "disable global demand-loading of modules" |
@@ -4,7 +4,6 b'' | |||||
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | import errno |
|
|||
8 |
|
7 | |||
9 | from node import nullid |
|
8 | from node import nullid | |
10 | from i18n import _ |
|
9 | from i18n import _ | |
@@ -504,17 +503,13 b' class dirstate(object):' | |||||
504 | if not self._dirty: |
|
503 | if not self._dirty: | |
505 | return |
|
504 | return | |
506 | st = self._opener("dirstate", "w", atomictemp=True) |
|
505 | st = self._opener("dirstate", "w", atomictemp=True) | |
507 |
|
||||
508 | def finish(s): |
|
|||
509 | st.write(s) |
|
|||
510 | st.close() |
|
|||
511 | self._lastnormaltime = 0 |
|
|||
512 | self._dirty = self._dirtypl = False |
|
|||
513 |
|
||||
514 | # use the modification time of the newly created temporary file as the |
|
506 | # use the modification time of the newly created temporary file as the | |
515 | # filesystem's notion of 'now' |
|
507 | # filesystem's notion of 'now' | |
516 | now = util.fstat(st).st_mtime |
|
508 | now = util.fstat(st).st_mtime | |
517 |
|
|
509 | st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now)) | |
|
510 | st.close() | |||
|
511 | self._lastnormaltime = 0 | |||
|
512 | self._dirty = self._dirtypl = False | |||
518 |
|
513 | |||
519 | def _dirignore(self, f): |
|
514 | def _dirignore(self, f): | |
520 | if f == '.': |
|
515 | if f == '.': | |
@@ -600,7 +595,7 b' class dirstate(object):' | |||||
600 | kind = getkind(st.st_mode) |
|
595 | kind = getkind(st.st_mode) | |
601 | if kind == dirkind: |
|
596 | if kind == dirkind: | |
602 | if nf in dmap: |
|
597 | if nf in dmap: | |
603 |
# |
|
598 | # file replaced by dir on disk but still in dirstate | |
604 | results[nf] = None |
|
599 | results[nf] = None | |
605 | if matchedir: |
|
600 | if matchedir: | |
606 | matchedir(nf) |
|
601 | matchedir(nf) | |
@@ -611,10 +606,10 b' class dirstate(object):' | |||||
611 | badfn(ff, badtype(kind)) |
|
606 | badfn(ff, badtype(kind)) | |
612 | if nf in dmap: |
|
607 | if nf in dmap: | |
613 | results[nf] = None |
|
608 | results[nf] = None | |
614 | except OSError, inst: |
|
609 | except OSError, inst: # nf not found on disk - it is dirstate only | |
615 | if nf in dmap: # does it exactly match a file? |
|
610 | if nf in dmap: # does it exactly match a missing file? | |
616 | results[nf] = None |
|
611 | results[nf] = None | |
617 | else: # does it match a directory? |
|
612 | else: # does it match a missing directory? | |
618 | prefix = nf + "/" |
|
613 | prefix = nf + "/" | |
619 | for fn in dmap: |
|
614 | for fn in dmap: | |
620 | if fn.startswith(prefix): |
|
615 | if fn.startswith(prefix): | |
@@ -642,17 +637,14 b' class dirstate(object):' | |||||
642 | # implementation doesn't use it at all. This satisfies the contract |
|
637 | # implementation doesn't use it at all. This satisfies the contract | |
643 | # because we only guarantee a "maybe". |
|
638 | # because we only guarantee a "maybe". | |
644 |
|
639 | |||
645 | def fwarn(f, msg): |
|
|||
646 | self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) |
|
|||
647 | return False |
|
|||
648 |
|
||||
649 | ignore = self._ignore |
|
|||
650 | dirignore = self._dirignore |
|
|||
651 | if ignored: |
|
640 | if ignored: | |
652 | ignore = util.never |
|
641 | ignore = util.never | |
653 | dirignore = util.never |
|
642 | dirignore = util.never | |
654 |
elif |
|
643 | elif unknown: | |
655 | # if unknown and ignored are False, skip step 2 |
|
644 | ignore = self._ignore | |
|
645 | dirignore = self._dirignore | |||
|
646 | else: | |||
|
647 | # if not unknown and not ignored, drop dir recursion and step 2 | |||
656 | ignore = util.always |
|
648 | ignore = util.always | |
657 | dirignore = util.always |
|
649 | dirignore = util.always | |
658 |
|
650 | |||
@@ -699,7 +691,7 b' class dirstate(object):' | |||||
699 | entries = listdir(join(nd), stat=True, skip=skip) |
|
691 | entries = listdir(join(nd), stat=True, skip=skip) | |
700 | except OSError, inst: |
|
692 | except OSError, inst: | |
701 | if inst.errno in (errno.EACCES, errno.ENOENT): |
|
693 | if inst.errno in (errno.EACCES, errno.ENOENT): | |
702 |
|
|
694 | match.bad(self.pathto(nd), inst.strerror) | |
703 | continue |
|
695 | continue | |
704 | raise |
|
696 | raise | |
705 | for f, kind, st in entries: |
|
697 | for f, kind, st in entries: | |
@@ -728,8 +720,11 b' class dirstate(object):' | |||||
728 | del results[s] |
|
720 | del results[s] | |
729 | del results['.hg'] |
|
721 | del results['.hg'] | |
730 |
|
722 | |||
731 | # step 3: report unseen items in the dmap hash |
|
723 | # step 3: visit remaining files from dmap | |
732 | if not skipstep3 and not exact: |
|
724 | if not skipstep3 and not exact: | |
|
725 | # If a dmap file is not in results yet, it was either | |||
|
726 | # a) not matching matchfn b) ignored, c) missing, or d) under a | |||
|
727 | # symlink directory. | |||
733 | if not results and matchalways: |
|
728 | if not results and matchalways: | |
734 | visit = dmap.keys() |
|
729 | visit = dmap.keys() | |
735 | else: |
|
730 | else: | |
@@ -737,9 +732,10 b' class dirstate(object):' | |||||
737 | visit.sort() |
|
732 | visit.sort() | |
738 |
|
733 | |||
739 | if unknown: |
|
734 | if unknown: | |
740 |
# unknown == True means we walked |
|
735 | # unknown == True means we walked all dirs under the roots | |
741 | # So if a file is not seen it was either a) not matching matchfn |
|
736 | # that wasn't ignored, and everything that matched was stat'ed | |
742 | # b) ignored, c) missing, or d) under a symlink directory. |
|
737 | # and is already in results. | |
|
738 | # The rest must thus be ignored or under a symlink. | |||
743 | audit_path = pathutil.pathauditor(self._root) |
|
739 | audit_path = pathutil.pathauditor(self._root) | |
744 |
|
740 | |||
745 | for nf in iter(visit): |
|
741 | for nf in iter(visit): | |
@@ -748,15 +744,17 b' class dirstate(object):' | |||||
748 | if audit_path.check(nf): |
|
744 | if audit_path.check(nf): | |
749 | try: |
|
745 | try: | |
750 | results[nf] = lstat(join(nf)) |
|
746 | results[nf] = lstat(join(nf)) | |
|
747 | # file was just ignored, no links, and exists | |||
751 | except OSError: |
|
748 | except OSError: | |
752 | # file doesn't exist |
|
749 | # file doesn't exist | |
753 | results[nf] = None |
|
750 | results[nf] = None | |
754 | else: |
|
751 | else: | |
755 | # It's either missing or under a symlink directory |
|
752 | # It's either missing or under a symlink directory | |
|
753 | # which we in this case report as missing | |||
756 | results[nf] = None |
|
754 | results[nf] = None | |
757 | else: |
|
755 | else: | |
758 | # We may not have walked the full directory tree above, |
|
756 | # We may not have walked the full directory tree above, | |
759 | # so stat everything we missed. |
|
757 | # so stat and check everything we missed. | |
760 | nf = iter(visit).next |
|
758 | nf = iter(visit).next | |
761 | for st in util.statfiles([join(i) for i in visit]): |
|
759 | for st in util.statfiles([join(i) for i in visit]): | |
762 | results[nf()] = st |
|
760 | results[nf()] = st |
@@ -154,7 +154,7 b' def _headssummary(repo, remote, outgoing' | |||||
154 |
|
154 | |||
155 | - branch: the branch name |
|
155 | - branch: the branch name | |
156 | - remoteheads: the list of remote heads known locally |
|
156 | - remoteheads: the list of remote heads known locally | |
157 |
None i |
|
157 | None if the branch is new | |
158 | - newheads: the new remote heads (known locally) with outgoing pushed |
|
158 | - newheads: the new remote heads (known locally) with outgoing pushed | |
159 | - unsyncedheads: the list of remote heads unknown locally. |
|
159 | - unsyncedheads: the list of remote heads unknown locally. | |
160 | """ |
|
160 | """ | |
@@ -250,8 +250,7 b' def checkheads(repo, remote, outgoing, r' | |||||
250 | hint=_("use 'hg push --new-branch' to create" |
|
250 | hint=_("use 'hg push --new-branch' to create" | |
251 | " new remote branches")) |
|
251 | " new remote branches")) | |
252 |
|
252 | |||
253 |
# 2 |
|
253 | # 2. Compute newly pushed bookmarks. We don't warn about bookmarked heads. | |
254 | # we don't warned about bookmarked heads. |
|
|||
255 | localbookmarks = repo._bookmarks |
|
254 | localbookmarks = repo._bookmarks | |
256 | remotebookmarks = remote.listkeys('bookmarks') |
|
255 | remotebookmarks = remote.listkeys('bookmarks') | |
257 | bookmarkedheads = set() |
|
256 | bookmarkedheads = set() | |
@@ -269,23 +268,23 b' def checkheads(repo, remote, outgoing, r' | |||||
269 | # If there are more heads after the push than before, a suitable |
|
268 | # If there are more heads after the push than before, a suitable | |
270 | # error message, depending on unsynced status, is displayed. |
|
269 | # error message, depending on unsynced status, is displayed. | |
271 | error = None |
|
270 | error = None | |
272 | unsynced = False |
|
|||
273 | allmissing = set(outgoing.missing) |
|
271 | allmissing = set(outgoing.missing) | |
274 | allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common)) |
|
272 | allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common)) | |
275 | allfuturecommon.update(allmissing) |
|
273 | allfuturecommon.update(allmissing) | |
276 | for branch, heads in sorted(headssum.iteritems()): |
|
274 | for branch, heads in sorted(headssum.iteritems()): | |
277 | candidate_newhs = set(heads[1]) |
|
275 | remoteheads, newheads, unsyncedheads = heads | |
|
276 | candidate_newhs = set(newheads) | |||
278 | # add unsynced data |
|
277 | # add unsynced data | |
279 |
if heads |
|
278 | if remoteheads is None: | |
280 | oldhs = set() |
|
279 | oldhs = set() | |
281 | else: |
|
280 | else: | |
282 |
oldhs = set(heads |
|
281 | oldhs = set(remoteheads) | |
283 |
oldhs.update(heads |
|
282 | oldhs.update(unsyncedheads) | |
284 |
candidate_newhs.update(heads |
|
283 | candidate_newhs.update(unsyncedheads) | |
285 | dhs = None |
|
284 | dhs = None # delta heads, the new heads on branch | |
286 | discardedheads = set() |
|
285 | discardedheads = set() | |
287 | if repo.obsstore: |
|
286 | if repo.obsstore: | |
288 | # remove future heads which are actually obsolete by another |
|
287 | # remove future heads which are actually obsoleted by another | |
289 | # pushed element: |
|
288 | # pushed element: | |
290 | # |
|
289 | # | |
291 | # XXX as above, There are several cases this case does not handle |
|
290 | # XXX as above, There are several cases this case does not handle | |
@@ -297,8 +296,8 b' def checkheads(repo, remote, outgoing, r' | |||||
297 | # (2) if the new heads have ancestors which are not obsolete and |
|
296 | # (2) if the new heads have ancestors which are not obsolete and | |
298 | # not ancestors of any other heads we will have a new head too. |
|
297 | # not ancestors of any other heads we will have a new head too. | |
299 | # |
|
298 | # | |
300 |
# Th |
|
299 | # These two cases will be easy to handle for known changeset but | |
301 | # more tricky for unsynced changes. |
|
300 | # much more tricky for unsynced changes. | |
302 | newhs = set() |
|
301 | newhs = set() | |
303 | for nh in candidate_newhs: |
|
302 | for nh in candidate_newhs: | |
304 | if nh in repo and repo[nh].phase() <= phases.public: |
|
303 | if nh in repo and repo[nh].phase() <= phases.public: | |
@@ -312,10 +311,17 b' def checkheads(repo, remote, outgoing, r' | |||||
312 | newhs.add(nh) |
|
311 | newhs.add(nh) | |
313 | else: |
|
312 | else: | |
314 | newhs = candidate_newhs |
|
313 | newhs = candidate_newhs | |
315 |
|
|
314 | unsynced = sorted(h for h in unsyncedheads if h not in discardedheads) | |
316 |
|
|
315 | if unsynced: | |
317 | if heads[0] is None: |
|
316 | heads = ' '.join(short(h) for h in unsynced) | |
318 |
if |
|
317 | if branch is None: | |
|
318 | repo.ui.status(_("remote has heads that are " | |||
|
319 | "not known locally: %s\n") % heads) | |||
|
320 | else: | |||
|
321 | repo.ui.status(_("remote has heads on branch '%s' that are " | |||
|
322 | "not known locally: %s\n") % (branch, heads)) | |||
|
323 | if remoteheads is None: | |||
|
324 | if len(newhs) > 1: | |||
319 | dhs = list(newhs) |
|
325 | dhs = list(newhs) | |
320 | if error is None: |
|
326 | if error is None: | |
321 | error = (_("push creates new branch '%s' " |
|
327 | error = (_("push creates new branch '%s' " | |
@@ -324,7 +330,7 b' def checkheads(repo, remote, outgoing, r' | |||||
324 | " see \"hg help push\" for details about" |
|
330 | " see \"hg help push\" for details about" | |
325 | " pushing new heads") |
|
331 | " pushing new heads") | |
326 | elif len(newhs) > len(oldhs): |
|
332 | elif len(newhs) > len(oldhs): | |
327 |
# |
|
333 | # remove bookmarked or existing remote heads from the new heads list | |
328 | dhs = sorted(newhs - bookmarkedheads - oldhs) |
|
334 | dhs = sorted(newhs - bookmarkedheads - oldhs) | |
329 | if dhs: |
|
335 | if dhs: | |
330 | if error is None: |
|
336 | if error is None: | |
@@ -334,7 +340,7 b' def checkheads(repo, remote, outgoing, r' | |||||
334 | else: |
|
340 | else: | |
335 | error = _("push creates new remote head %s!" |
|
341 | error = _("push creates new remote head %s!" | |
336 | ) % short(dhs[0]) |
|
342 | ) % short(dhs[0]) | |
337 |
if heads |
|
343 | if unsyncedheads: | |
338 | hint = _("pull and merge or" |
|
344 | hint = _("pull and merge or" | |
339 | " see \"hg help push\" for details about" |
|
345 | " see \"hg help push\" for details about" | |
340 | " pushing new heads") |
|
346 | " pushing new heads") | |
@@ -350,7 +356,3 b' def checkheads(repo, remote, outgoing, r' | |||||
350 | repo.ui.note((" %s\n") % short(h)) |
|
356 | repo.ui.note((" %s\n") % short(h)) | |
351 | if error: |
|
357 | if error: | |
352 | raise util.Abort(error, hint=hint) |
|
358 | raise util.Abort(error, hint=hint) | |
353 |
|
||||
354 | # 6. Check for unsynced changes on involved branches. |
|
|||
355 | if unsynced: |
|
|||
356 | repo.ui.warn(_("note: unsynced remote changes!\n")) |
|
@@ -40,7 +40,7 b' def dispatch(req):' | |||||
40 | if not req.ui: |
|
40 | if not req.ui: | |
41 | req.ui = uimod.ui() |
|
41 | req.ui = uimod.ui() | |
42 | if '--traceback' in req.args: |
|
42 | if '--traceback' in req.args: | |
43 | req.ui.setconfig('ui', 'traceback', 'on') |
|
43 | req.ui.setconfig('ui', 'traceback', 'on', '--traceback') | |
44 |
|
44 | |||
45 | # set ui streams from the request |
|
45 | # set ui streams from the request | |
46 | if req.fin: |
|
46 | if req.fin: | |
@@ -103,8 +103,8 b' def _runcatch(req):' | |||||
103 | if req.repo: |
|
103 | if req.repo: | |
104 | # copy configs that were passed on the cmdline (--config) to |
|
104 | # copy configs that were passed on the cmdline (--config) to | |
105 | # the repo ui |
|
105 | # the repo ui | |
106 |
for |
|
106 | for sec, name, val in cfgs: | |
107 |
req.repo.ui.setconfig( |
|
107 | req.repo.ui.setconfig(sec, name, val, source='--config') | |
108 |
|
108 | |||
109 | # if we are in HGPLAIN mode, then disable custom debugging |
|
109 | # if we are in HGPLAIN mode, then disable custom debugging | |
110 | debugger = ui.config("ui", "debugger") |
|
110 | debugger = ui.config("ui", "debugger") | |
@@ -522,7 +522,7 b' def _parseconfig(ui, config):' | |||||
522 | section, name = name.split('.', 1) |
|
522 | section, name = name.split('.', 1) | |
523 | if not section or not name: |
|
523 | if not section or not name: | |
524 | raise IndexError |
|
524 | raise IndexError | |
525 | ui.setconfig(section, name, value) |
|
525 | ui.setconfig(section, name, value, '--config') | |
526 | configs.append((section, name, value)) |
|
526 | configs.append((section, name, value)) | |
527 | except (IndexError, ValueError): |
|
527 | except (IndexError, ValueError): | |
528 | raise util.Abort(_('malformed --config option: %r ' |
|
528 | raise util.Abort(_('malformed --config option: %r ' | |
@@ -739,19 +739,19 b' def _dispatch(req):' | |||||
739 | for opt in ('verbose', 'debug', 'quiet'): |
|
739 | for opt in ('verbose', 'debug', 'quiet'): | |
740 | val = str(bool(options[opt])) |
|
740 | val = str(bool(options[opt])) | |
741 | for ui_ in uis: |
|
741 | for ui_ in uis: | |
742 | ui_.setconfig('ui', opt, val) |
|
742 | ui_.setconfig('ui', opt, val, '--' + opt) | |
743 |
|
743 | |||
744 | if options['traceback']: |
|
744 | if options['traceback']: | |
745 | for ui_ in uis: |
|
745 | for ui_ in uis: | |
746 | ui_.setconfig('ui', 'traceback', 'on') |
|
746 | ui_.setconfig('ui', 'traceback', 'on', '--traceback') | |
747 |
|
747 | |||
748 | if options['noninteractive']: |
|
748 | if options['noninteractive']: | |
749 | for ui_ in uis: |
|
749 | for ui_ in uis: | |
750 | ui_.setconfig('ui', 'interactive', 'off') |
|
750 | ui_.setconfig('ui', 'interactive', 'off', '-y') | |
751 |
|
751 | |||
752 | if cmdoptions.get('insecure', False): |
|
752 | if cmdoptions.get('insecure', False): | |
753 | for ui_ in uis: |
|
753 | for ui_ in uis: | |
754 | ui_.setconfig('web', 'cacerts', '') |
|
754 | ui_.setconfig('web', 'cacerts', '', '--insecure') | |
755 |
|
755 | |||
756 | if options['version']: |
|
756 | if options['version']: | |
757 | return commands.version_(ui) |
|
757 | return commands.version_(ui) | |
@@ -777,7 +777,7 b' def _dispatch(req):' | |||||
777 | repo = hg.repository(ui, path=path) |
|
777 | repo = hg.repository(ui, path=path) | |
778 | if not repo.local(): |
|
778 | if not repo.local(): | |
779 | raise util.Abort(_("repository '%s' is not local") % path) |
|
779 | raise util.Abort(_("repository '%s' is not local") % path) | |
780 | repo.ui.setconfig("bundle", "mainreporoot", repo.root) |
|
780 | repo.ui.setconfig("bundle", "mainreporoot", repo.root, 'repo') | |
781 | except error.RequirementError: |
|
781 | except error.RequirementError: | |
782 | raise |
|
782 | raise | |
783 | except error.RepoError: |
|
783 | except error.RepoError: |
@@ -11,7 +11,7 b' from i18n import _, gettext' | |||||
11 |
|
11 | |||
12 | _extensions = {} |
|
12 | _extensions = {} | |
13 | _order = [] |
|
13 | _order = [] | |
14 | _ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg'] |
|
14 | _ignore = ['hbisect', 'bookmarks', 'parentrevspec', 'interhg', 'inotify'] | |
15 |
|
15 | |||
16 | def extensions(ui=None): |
|
16 | def extensions(ui=None): | |
17 | if ui: |
|
17 | if ui: | |
@@ -43,10 +43,10 b' def find(name):' | |||||
43 |
|
43 | |||
44 | def loadpath(path, module_name): |
|
44 | def loadpath(path, module_name): | |
45 | module_name = module_name.replace('.', '_') |
|
45 | module_name = module_name.replace('.', '_') | |
46 | path = util.expandpath(path) |
|
46 | path = util.normpath(util.expandpath(path)) | |
47 | if os.path.isdir(path): |
|
47 | if os.path.isdir(path): | |
48 | # module/__init__.py style |
|
48 | # module/__init__.py style | |
49 |
d, f = os.path.split(path |
|
49 | d, f = os.path.split(path) | |
50 | fd, fpath, desc = imp.find_module(f, [d]) |
|
50 | fd, fpath, desc = imp.find_module(f, [d]) | |
51 | return imp.load_module(module_name, fd, fpath, desc) |
|
51 | return imp.load_module(module_name, fd, fpath, desc) | |
52 | else: |
|
52 | else: |
@@ -248,20 +248,21 b' def _xmerge(repo, mynode, orig, fcd, fco' | |||||
248 | tool, toolpath, binary, symlink = toolconf |
|
248 | tool, toolpath, binary, symlink = toolconf | |
249 | a, b, c, back = files |
|
249 | a, b, c, back = files | |
250 | out = "" |
|
250 | out = "" | |
251 |
env = |
|
251 | env = {'HG_FILE': fcd.path(), | |
252 |
|
|
252 | 'HG_MY_NODE': short(mynode), | |
253 |
|
|
253 | 'HG_OTHER_NODE': str(fco.changectx()), | |
254 |
|
|
254 | 'HG_BASE_NODE': str(fca.changectx()), | |
255 |
|
|
255 | 'HG_MY_ISLINK': 'l' in fcd.flags(), | |
256 |
|
|
256 | 'HG_OTHER_ISLINK': 'l' in fco.flags(), | |
257 |
|
|
257 | 'HG_BASE_ISLINK': 'l' in fca.flags(), | |
|
258 | } | |||
258 |
|
259 | |||
259 | ui = repo.ui |
|
260 | ui = repo.ui | |
260 |
|
261 | |||
261 | args = _toolstr(ui, tool, "args", '$local $base $other') |
|
262 | args = _toolstr(ui, tool, "args", '$local $base $other') | |
262 | if "$output" in args: |
|
263 | if "$output" in args: | |
263 | out, a = a, back # read input from backup, write to original |
|
264 | out, a = a, back # read input from backup, write to original | |
264 |
replace = |
|
265 | replace = {'local': a, 'base': b, 'other': c, 'output': out} | |
265 | args = util.interpolate(r'\$', replace, args, |
|
266 | args = util.interpolate(r'\$', replace, args, | |
266 | lambda s: util.shellquote(util.localpath(s))) |
|
267 | lambda s: util.shellquote(util.localpath(s))) | |
267 | r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env, |
|
268 | r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env, | |
@@ -333,10 +334,10 b' def filemerge(repo, mynode, orig, fcd, f' | |||||
333 | if onfailure: |
|
334 | if onfailure: | |
334 | ui.warn(onfailure % fd) |
|
335 | ui.warn(onfailure % fd) | |
335 | else: |
|
336 | else: | |
336 |
|
|
337 | util.unlink(back) | |
337 |
|
338 | |||
338 |
|
|
339 | util.unlink(b) | |
339 |
|
|
340 | util.unlink(c) | |
340 | return r |
|
341 | return r | |
341 |
|
342 | |||
342 | if not r and (_toolbool(ui, tool, "checkconflicts") or |
|
343 | if not r and (_toolbool(ui, tool, "checkconflicts") or | |
@@ -367,10 +368,10 b' def filemerge(repo, mynode, orig, fcd, f' | |||||
367 | if onfailure: |
|
368 | if onfailure: | |
368 | ui.warn(onfailure % fd) |
|
369 | ui.warn(onfailure % fd) | |
369 | else: |
|
370 | else: | |
370 |
|
|
371 | util.unlink(back) | |
371 |
|
372 | |||
372 |
|
|
373 | util.unlink(b) | |
373 |
|
|
374 | util.unlink(c) | |
374 | return r |
|
375 | return r | |
375 |
|
376 | |||
376 | # tell hggettext to extract docstrings from these functions: |
|
377 | # tell hggettext to extract docstrings from these functions: |
@@ -34,10 +34,10 b' def dagwalker(repo, revs):' | |||||
34 | return |
|
34 | return | |
35 |
|
35 | |||
36 | cl = repo.changelog |
|
36 | cl = repo.changelog | |
37 |
lowestrev = min( |
|
37 | lowestrev = revs.min() | |
38 | gpcache = {} |
|
38 | gpcache = {} | |
39 |
|
39 | |||
40 |
knownrevs = set( |
|
40 | knownrevs = revs.set() | |
41 | for rev in revs: |
|
41 | for rev in revs: | |
42 | ctx = repo[rev] |
|
42 | ctx = repo[rev] | |
43 | parents = sorted(set([p.rev() for p in ctx.parents() |
|
43 | parents = sorted(set([p.rev() for p in ctx.parents() |
@@ -12,18 +12,21 b' import extensions, revset, fileset, temp' | |||||
12 | import encoding, util, minirst |
|
12 | import encoding, util, minirst | |
13 | import cmdutil |
|
13 | import cmdutil | |
14 |
|
14 | |||
15 | def listexts(header, exts, indent=1): |
|
15 | def listexts(header, exts, indent=1, showdeprecated=False): | |
16 | '''return a text listing of the given extensions''' |
|
16 | '''return a text listing of the given extensions''' | |
17 | rst = [] |
|
17 | rst = [] | |
18 | if exts: |
|
18 | if exts: | |
19 | rst.append('\n%s\n\n' % header) |
|
19 | rst.append('\n%s\n\n' % header) | |
20 | for name, desc in sorted(exts.iteritems()): |
|
20 | for name, desc in sorted(exts.iteritems()): | |
|
21 | if '(DEPRECATED)' in desc and not showdeprecated: | |||
|
22 | continue | |||
21 | rst.append('%s:%s: %s\n' % (' ' * indent, name, desc)) |
|
23 | rst.append('%s:%s: %s\n' % (' ' * indent, name, desc)) | |
22 | return rst |
|
24 | return rst | |
23 |
|
25 | |||
24 | def extshelp(): |
|
26 | def extshelp(): | |
25 | rst = loaddoc('extensions')().splitlines(True) |
|
27 | rst = loaddoc('extensions')().splitlines(True) | |
26 | rst.extend(listexts(_('enabled extensions:'), extensions.enabled())) |
|
28 | rst.extend(listexts( | |
|
29 | _('enabled extensions:'), extensions.enabled(), showdeprecated=True)) | |||
27 | rst.extend(listexts(_('disabled extensions:'), extensions.disabled())) |
|
30 | rst.extend(listexts(_('disabled extensions:'), extensions.disabled())) | |
28 | doc = ''.join(rst) |
|
31 | doc = ''.join(rst) | |
29 | return doc |
|
32 | return doc | |
@@ -38,7 +41,7 b' def optrst(options, verbose):' | |||||
38 | shortopt, longopt, default, desc = option |
|
41 | shortopt, longopt, default, desc = option | |
39 | optlabel = _("VALUE") # default label |
|
42 | optlabel = _("VALUE") # default label | |
40 |
|
43 | |||
41 | if _("DEPRECATED") in desc and not verbose: |
|
44 | if not verbose and ("DEPRECATED" in desc or _("DEPRECATED") in desc): | |
42 | continue |
|
45 | continue | |
43 |
|
46 | |||
44 | so = '' |
|
47 | so = '' | |
@@ -89,8 +92,6 b' def topicmatch(kw):' | |||||
89 | results['topics'].append((names[0], header)) |
|
92 | results['topics'].append((names[0], header)) | |
90 | import commands # avoid cycle |
|
93 | import commands # avoid cycle | |
91 | for cmd, entry in commands.table.iteritems(): |
|
94 | for cmd, entry in commands.table.iteritems(): | |
92 | if cmd.startswith('debug'): |
|
|||
93 | continue |
|
|||
94 | if len(entry) == 3: |
|
95 | if len(entry) == 3: | |
95 | summary = entry[2] |
|
96 | summary = entry[2] | |
96 | else: |
|
97 | else: | |
@@ -308,6 +309,8 b' def help_(ui, name, unknowncmd=False, fu' | |||||
308 | # list of commands |
|
309 | # list of commands | |
309 | if name == "shortlist": |
|
310 | if name == "shortlist": | |
310 | header = _('basic commands:\n\n') |
|
311 | header = _('basic commands:\n\n') | |
|
312 | elif name == "debug": | |||
|
313 | header = _('debug commands (internal and unsupported):\n\n') | |||
311 | else: |
|
314 | else: | |
312 | header = _('list of commands:\n\n') |
|
315 | header = _('list of commands:\n\n') | |
313 |
|
316 | |||
@@ -323,7 +326,7 b' def help_(ui, name, unknowncmd=False, fu' | |||||
323 | if name == "shortlist" and not f.startswith("^"): |
|
326 | if name == "shortlist" and not f.startswith("^"): | |
324 | continue |
|
327 | continue | |
325 | f = f.lstrip("^") |
|
328 | f = f.lstrip("^") | |
326 | if not ui.debugflag and f.startswith("debug"): |
|
329 | if not ui.debugflag and f.startswith("debug") and name != "debug": | |
327 | continue |
|
330 | continue | |
328 | doc = e[0].__doc__ |
|
331 | doc = e[0].__doc__ | |
329 | if doc and 'DEPRECATED' in doc and not ui.verbose: |
|
332 | if doc and 'DEPRECATED' in doc and not ui.verbose: |
@@ -85,7 +85,9 b' ones.' | |||||
85 | be read. Mercurial checks each of these locations in the specified |
|
85 | be read. Mercurial checks each of these locations in the specified | |
86 | order until one or more configuration files are detected. |
|
86 | order until one or more configuration files are detected. | |
87 |
|
87 | |||
88 | .. note:: The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial`` |
|
88 | .. note:: | |
|
89 | ||||
|
90 | The registry key ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mercurial`` | |||
89 | is used when running 32-bit Python on 64-bit Windows. |
|
91 | is used when running 32-bit Python on 64-bit Windows. | |
90 |
|
92 | |||
91 | Syntax |
|
93 | Syntax | |
@@ -204,7 +206,9 b' changesets. You can define subsequent al' | |||||
204 |
|
206 | |||
205 | stable5 = latest -b stable |
|
207 | stable5 = latest -b stable | |
206 |
|
208 | |||
207 | .. note:: It is possible to create aliases with the same names as |
|
209 | .. note:: | |
|
210 | ||||
|
211 | It is possible to create aliases with the same names as | |||
208 | existing commands, which will then override the original |
|
212 | existing commands, which will then override the original | |
209 | definitions. This is almost always a bad idea! |
|
213 | definitions. This is almost always a bad idea! | |
210 |
|
214 | |||
@@ -235,7 +239,9 b' alias, as was done above for the purge a' | |||||
235 | ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg |
|
239 | ``$HG_ARGS`` expands to the arguments given to Mercurial. In the ``hg | |
236 | echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``. |
|
240 | echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``. | |
237 |
|
241 | |||
238 | .. note:: Some global configuration options such as ``-R`` are |
|
242 | .. note:: | |
|
243 | ||||
|
244 | Some global configuration options such as ``-R`` are | |||
239 | processed before shell aliases and will thus not be passed to |
|
245 | processed before shell aliases and will thus not be passed to | |
240 | aliases. |
|
246 | aliases. | |
241 |
|
247 | |||
@@ -362,7 +368,9 b' filtered by the command. The string ``OU' | |||||
362 | of an empty temporary file, where the filtered data must be written by |
|
368 | of an empty temporary file, where the filtered data must be written by | |
363 | the command. |
|
369 | the command. | |
364 |
|
370 | |||
365 | .. note:: The tempfile mechanism is recommended for Windows systems, |
|
371 | .. note:: | |
|
372 | ||||
|
373 | The tempfile mechanism is recommended for Windows systems, | |||
366 | where the standard shell I/O redirection operators often have |
|
374 | where the standard shell I/O redirection operators often have | |
367 | strange effects and may corrupt the contents of your files. |
|
375 | strange effects and may corrupt the contents of your files. | |
368 |
|
376 | |||
@@ -708,13 +716,17 b' variables it is passed are listed with n' | |||||
708 | in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the |
|
716 | in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the | |
709 | update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``. |
|
717 | update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``. | |
710 |
|
718 | |||
711 | .. note:: It is generally better to use standard hooks rather than the |
|
719 | .. note:: | |
|
720 | ||||
|
721 | It is generally better to use standard hooks rather than the | |||
712 | generic pre- and post- command hooks as they are guaranteed to be |
|
722 | generic pre- and post- command hooks as they are guaranteed to be | |
713 | called in the appropriate contexts for influencing transactions. |
|
723 | called in the appropriate contexts for influencing transactions. | |
714 | Also, hooks like "commit" will be called in all contexts that |
|
724 | Also, hooks like "commit" will be called in all contexts that | |
715 | generate a commit (e.g. tag) and not just the commit command. |
|
725 | generate a commit (e.g. tag) and not just the commit command. | |
716 |
|
726 | |||
717 | .. note:: Environment variables with empty values may not be passed to |
|
727 | .. note:: | |
|
728 | ||||
|
729 | Environment variables with empty values may not be passed to | |||
718 | hooks on platforms such as Windows. As an example, ``$HG_PARENT2`` |
|
730 | hooks on platforms such as Windows. As an example, ``$HG_PARENT2`` | |
719 | will have an empty value under Unix-like platforms for non-merge |
|
731 | will have an empty value under Unix-like platforms for non-merge | |
720 | changesets, while it will not be available at all under Windows. |
|
732 | changesets, while it will not be available at all under Windows. |
@@ -69,6 +69,7 b' and a regexp pattern of the form ``\\.c$`' | |||||
69 | regexp pattern, start it with ``^``. |
|
69 | regexp pattern, start it with ``^``. | |
70 |
|
70 | |||
71 | .. note:: |
|
71 | .. note:: | |
|
72 | ||||
72 | Patterns specified in other than ``.hgignore`` are always rooted. |
|
73 | Patterns specified in other than ``.hgignore`` are always rooted. | |
73 | Please see :hg:`help patterns` for details. |
|
74 | Please see :hg:`help patterns` for details. | |
74 |
|
75 |
@@ -73,6 +73,7 b' 7. If the file to be merged is not binar' | |||||
73 | 8. The merge of the file fails and must be resolved before commit. |
|
73 | 8. The merge of the file fails and must be resolved before commit. | |
74 |
|
74 | |||
75 | .. note:: |
|
75 | .. note:: | |
|
76 | ||||
76 | After selecting a merge program, Mercurial will by default attempt |
|
77 | After selecting a merge program, Mercurial will by default attempt | |
77 | to merge the files using a simple merge algorithm first. Only if it doesn't |
|
78 | to merge the files using a simple merge algorithm first. Only if it doesn't | |
78 | succeed because of conflicting changes Mercurial will actually execute the |
|
79 | succeed because of conflicting changes Mercurial will actually execute the |
@@ -7,6 +7,7 b' patterns.' | |||||
7 | Alternate pattern notations must be specified explicitly. |
|
7 | Alternate pattern notations must be specified explicitly. | |
8 |
|
8 | |||
9 | .. note:: |
|
9 | .. note:: | |
|
10 | ||||
10 | Patterns specified in ``.hgignore`` are not rooted. |
|
11 | Patterns specified in ``.hgignore`` are not rooted. | |
11 | Please see :hg:`help hgignore` for details. |
|
12 | Please see :hg:`help hgignore` for details. | |
12 |
|
13 |
@@ -42,6 +42,7 b' Normally, all servers are ``publishing``' | |||||
42 | - secret changesets are neither pushed, pulled, or cloned |
|
42 | - secret changesets are neither pushed, pulled, or cloned | |
43 |
|
43 | |||
44 | .. note:: |
|
44 | .. note:: | |
|
45 | ||||
45 | Pulling a draft changeset from a publishing server does not mark it |
|
46 | Pulling a draft changeset from a publishing server does not mark it | |
46 | as public on the server side due to the read-only nature of pull. |
|
47 | as public on the server side due to the read-only nature of pull. | |
47 |
|
48 | |||
@@ -55,10 +56,12 b' repository to disable publishing in its ' | |||||
55 | See :hg:`help config` for more information on configuration files. |
|
56 | See :hg:`help config` for more information on configuration files. | |
56 |
|
57 | |||
57 | .. note:: |
|
58 | .. note:: | |
|
59 | ||||
58 | Servers running older versions of Mercurial are treated as |
|
60 | Servers running older versions of Mercurial are treated as | |
59 | publishing. |
|
61 | publishing. | |
60 |
|
62 | |||
61 | .. note:: |
|
63 | .. note:: | |
|
64 | ||||
62 | Changesets in secret phase are not exchanged with the server. This |
|
65 | Changesets in secret phase are not exchanged with the server. This | |
63 | applies to their content: file names, file contents, and changeset |
|
66 | applies to their content: file names, file contents, and changeset | |
64 | metadata. For technical reasons, the identifier (e.g. d825e4025e39) |
|
67 | metadata. For technical reasons, the identifier (e.g. d825e4025e39) |
@@ -39,6 +39,7 b' 3. Nested repository states. They are de' | |||||
39 | repositories states when committing in the parent repository. |
|
39 | repositories states when committing in the parent repository. | |
40 |
|
40 | |||
41 | .. note:: |
|
41 | .. note:: | |
|
42 | ||||
42 | The ``.hgsubstate`` file should not be edited manually. |
|
43 | The ``.hgsubstate`` file should not be edited manually. | |
43 |
|
44 | |||
44 |
|
45 | |||
@@ -83,6 +84,9 b' Interaction with Mercurial Commands' | |||||
83 | :archive: archive does not recurse in subrepositories unless |
|
84 | :archive: archive does not recurse in subrepositories unless | |
84 | -S/--subrepos is specified. |
|
85 | -S/--subrepos is specified. | |
85 |
|
86 | |||
|
87 | :cat: cat currently only handles exact file matches in subrepos. | |||
|
88 | Git and Subversion subrepositories are currently ignored. | |||
|
89 | ||||
86 | :commit: commit creates a consistent snapshot of the state of the |
|
90 | :commit: commit creates a consistent snapshot of the state of the | |
87 | entire project and its subrepositories. If any subrepositories |
|
91 | entire project and its subrepositories. If any subrepositories | |
88 | have been modified, Mercurial will abort. Mercurial can be made |
|
92 | have been modified, Mercurial will abort. Mercurial can be made |
@@ -52,14 +52,20 b' In addition to filters, there are some b' | |||||
52 |
|
52 | |||
53 | - if(expr, then[, else]) |
|
53 | - if(expr, then[, else]) | |
54 |
|
54 | |||
|
55 | - ifcontains(expr, expr, then[, else]) | |||
|
56 | ||||
55 | - ifeq(expr, expr, then[, else]) |
|
57 | - ifeq(expr, expr, then[, else]) | |
56 |
|
58 | |||
57 | - join(list, sep) |
|
59 | - join(list, sep) | |
58 |
|
60 | |||
59 | - label(label, expr) |
|
61 | - label(label, expr) | |
60 |
|
62 | |||
|
63 | - revset(query[, formatargs]) | |||
|
64 | ||||
61 | - rstdoc(text, style) |
|
65 | - rstdoc(text, style) | |
62 |
|
66 | |||
|
67 | - shortest(node) | |||
|
68 | ||||
63 | - strip(text[, chars]) |
|
69 | - strip(text[, chars]) | |
64 |
|
70 | |||
65 | - sub(pat, repl, expr) |
|
71 | - sub(pat, repl, expr) | |
@@ -106,3 +112,11 b' Some sample command line templates:' | |||||
106 | - Display the contents of the 'extra' field, one per line:: |
|
112 | - Display the contents of the 'extra' field, one per line:: | |
107 |
|
113 | |||
108 | $ hg log -r 0 --template "{join(extras, '\n')}\n" |
|
114 | $ hg log -r 0 --template "{join(extras, '\n')}\n" | |
|
115 | ||||
|
116 | - Mark the current bookmark with '*':: | |||
|
117 | ||||
|
118 | $ hg log --template "{bookmarks % '{bookmark}{ifeq(bookmark, current, \"*\")} '}\n" | |||
|
119 | ||||
|
120 | - Mark the working copy parent with '@':: | |||
|
121 | ||||
|
122 | $ hg log --template "{ifcontains(rev, revset('.'), '@')}\n" |
@@ -129,8 +129,25 b' def peer(uiorrepo, opts, path, create=Fa' | |||||
129 | return _peerorrepo(rui, path, create).peer() |
|
129 | return _peerorrepo(rui, path, create).peer() | |
130 |
|
130 | |||
131 | def defaultdest(source): |
|
131 | def defaultdest(source): | |
132 |
'''return default destination of clone if none is given |
|
132 | '''return default destination of clone if none is given | |
133 | return os.path.basename(os.path.normpath(util.url(source).path or '')) |
|
133 | ||
|
134 | >>> defaultdest('foo') | |||
|
135 | 'foo' | |||
|
136 | >>> defaultdest('/foo/bar') | |||
|
137 | 'bar' | |||
|
138 | >>> defaultdest('/') | |||
|
139 | '' | |||
|
140 | >>> defaultdest('') | |||
|
141 | '' | |||
|
142 | >>> defaultdest('http://example.org/') | |||
|
143 | '' | |||
|
144 | >>> defaultdest('http://example.org/foo/') | |||
|
145 | 'foo' | |||
|
146 | ''' | |||
|
147 | path = util.url(source).path | |||
|
148 | if not path: | |||
|
149 | return '' | |||
|
150 | return os.path.basename(os.path.normpath(path)) | |||
134 |
|
151 | |||
135 | def share(ui, source, dest=None, update=True): |
|
152 | def share(ui, source, dest=None, update=True): | |
136 | '''create a shared repository''' |
|
153 | '''create a shared repository''' | |
@@ -284,7 +301,8 b' def clone(ui, peeropts, source, dest=Non' | |||||
284 |
|
301 | |||
285 | if dest is None: |
|
302 | if dest is None: | |
286 | dest = defaultdest(source) |
|
303 | dest = defaultdest(source) | |
287 | ui.status(_("destination directory: %s\n") % dest) |
|
304 | if dest: | |
|
305 | ui.status(_("destination directory: %s\n") % dest) | |||
288 | else: |
|
306 | else: | |
289 | dest = ui.expandpath(dest) |
|
307 | dest = ui.expandpath(dest) | |
290 |
|
308 | |||
@@ -413,7 +431,7 b' def clone(ui, peeropts, source, dest=Non' | |||||
413 | fp.write("default = %s\n" % defaulturl) |
|
431 | fp.write("default = %s\n" % defaulturl) | |
414 | fp.close() |
|
432 | fp.close() | |
415 |
|
433 | |||
416 | destrepo.ui.setconfig('paths', 'default', defaulturl) |
|
434 | destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone') | |
417 |
|
435 | |||
418 | if update: |
|
436 | if update: | |
419 | if update is not True: |
|
437 | if update is not True: | |
@@ -567,8 +585,7 b' def _outgoing(ui, repo, dest, opts):' | |||||
567 | o = outgoing.missing |
|
585 | o = outgoing.missing | |
568 | if not o: |
|
586 | if not o: | |
569 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
|
587 | scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) | |
570 |
|
|
588 | return o, other | |
571 | return o |
|
|||
572 |
|
589 | |||
573 | def outgoing(ui, repo, dest, opts): |
|
590 | def outgoing(ui, repo, dest, opts): | |
574 | def recurse(): |
|
591 | def recurse(): | |
@@ -581,8 +598,9 b' def outgoing(ui, repo, dest, opts):' | |||||
581 | return ret |
|
598 | return ret | |
582 |
|
599 | |||
583 | limit = cmdutil.loglimit(opts) |
|
600 | limit = cmdutil.loglimit(opts) | |
584 | o = _outgoing(ui, repo, dest, opts) |
|
601 | o, other = _outgoing(ui, repo, dest, opts) | |
585 |
if o |
|
602 | if not o: | |
|
603 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |||
586 | return recurse() |
|
604 | return recurse() | |
587 |
|
605 | |||
588 | if opts.get('newest_first'): |
|
606 | if opts.get('newest_first'): | |
@@ -598,6 +616,7 b' def outgoing(ui, repo, dest, opts):' | |||||
598 | count += 1 |
|
616 | count += 1 | |
599 | displayer.show(repo[n]) |
|
617 | displayer.show(repo[n]) | |
600 | displayer.close() |
|
618 | displayer.close() | |
|
619 | cmdutil.outgoinghooks(ui, repo, other, opts, o) | |||
601 | recurse() |
|
620 | recurse() | |
602 | return 0 # exit code is zero since we found outgoing changes |
|
621 | return 0 # exit code is zero since we found outgoing changes | |
603 |
|
622 | |||
@@ -621,19 +640,19 b' def remoteui(src, opts):' | |||||
621 | for o in 'ssh', 'remotecmd': |
|
640 | for o in 'ssh', 'remotecmd': | |
622 | v = opts.get(o) or src.config('ui', o) |
|
641 | v = opts.get(o) or src.config('ui', o) | |
623 | if v: |
|
642 | if v: | |
624 | dst.setconfig("ui", o, v) |
|
643 | dst.setconfig("ui", o, v, 'copied') | |
625 |
|
644 | |||
626 | # copy bundle-specific options |
|
645 | # copy bundle-specific options | |
627 | r = src.config('bundle', 'mainreporoot') |
|
646 | r = src.config('bundle', 'mainreporoot') | |
628 | if r: |
|
647 | if r: | |
629 | dst.setconfig('bundle', 'mainreporoot', r) |
|
648 | dst.setconfig('bundle', 'mainreporoot', r, 'copied') | |
630 |
|
649 | |||
631 | # copy selected local settings to the remote ui |
|
650 | # copy selected local settings to the remote ui | |
632 | for sect in ('auth', 'hostfingerprints', 'http_proxy'): |
|
651 | for sect in ('auth', 'hostfingerprints', 'http_proxy'): | |
633 | for key, val in src.configitems(sect): |
|
652 | for key, val in src.configitems(sect): | |
634 | dst.setconfig(sect, key, val) |
|
653 | dst.setconfig(sect, key, val, 'copied') | |
635 | v = src.config('web', 'cacerts') |
|
654 | v = src.config('web', 'cacerts') | |
636 | if v: |
|
655 | if v: | |
637 | dst.setconfig('web', 'cacerts', util.expandpath(v)) |
|
656 | dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') | |
638 |
|
657 | |||
639 | return dst |
|
658 | return dst |
@@ -64,10 +64,10 b' class hgweb(object):' | |||||
64 | r = repo |
|
64 | r = repo | |
65 |
|
65 | |||
66 | r = self._getview(r) |
|
66 | r = self._getview(r) | |
67 | r.ui.setconfig('ui', 'report_untrusted', 'off') |
|
67 | r.ui.setconfig('ui', 'report_untrusted', 'off', 'hgweb') | |
68 | r.baseui.setconfig('ui', 'report_untrusted', 'off') |
|
68 | r.baseui.setconfig('ui', 'report_untrusted', 'off', 'hgweb') | |
69 | r.ui.setconfig('ui', 'nontty', 'true') |
|
69 | r.ui.setconfig('ui', 'nontty', 'true', 'hgweb') | |
70 | r.baseui.setconfig('ui', 'nontty', 'true') |
|
70 | r.baseui.setconfig('ui', 'nontty', 'true', 'hgweb') | |
71 | self.repo = r |
|
71 | self.repo = r | |
72 | hook.redirect(True) |
|
72 | hook.redirect(True) | |
73 | self.mtime = -1 |
|
73 | self.mtime = -1 |
@@ -96,8 +96,8 b' class hgwebdir(object):' | |||||
96 | u = self.baseui.copy() |
|
96 | u = self.baseui.copy() | |
97 | else: |
|
97 | else: | |
98 | u = ui.ui() |
|
98 | u = ui.ui() | |
99 | u.setconfig('ui', 'report_untrusted', 'off') |
|
99 | u.setconfig('ui', 'report_untrusted', 'off', 'hgwebdir') | |
100 | u.setconfig('ui', 'nontty', 'true') |
|
100 | u.setconfig('ui', 'nontty', 'true', 'hgwebdir') | |
101 |
|
101 | |||
102 | if not isinstance(self.conf, (dict, list, tuple)): |
|
102 | if not isinstance(self.conf, (dict, list, tuple)): | |
103 | map = {'paths': 'hgweb-paths'} |
|
103 | map = {'paths': 'hgweb-paths'} | |
@@ -308,17 +308,17 b' class hgwebdir(object):' | |||||
308 |
|
308 | |||
309 | # add '/' to the name to make it obvious that |
|
309 | # add '/' to the name to make it obvious that | |
310 | # the entry is a directory, not a regular repository |
|
310 | # the entry is a directory, not a regular repository | |
311 |
row = |
|
311 | row = {'contact': "", | |
312 |
|
|
312 | 'contact_sort': "", | |
313 |
|
|
313 | 'name': name + '/', | |
314 |
|
|
314 | 'name_sort': name, | |
315 |
|
|
315 | 'url': url, | |
316 |
|
|
316 | 'description': "", | |
317 |
|
|
317 | 'description_sort': "", | |
318 |
|
|
318 | 'lastchange': d, | |
319 |
|
|
319 | 'lastchange_sort': d[1]-d[0], | |
320 |
|
|
320 | 'archives': [], | |
321 |
|
|
321 | 'isdirectory': True} | |
322 |
|
322 | |||
323 | seendirs.add(name) |
|
323 | seendirs.add(name) | |
324 | yield row |
|
324 | yield row | |
@@ -356,17 +356,18 b' class hgwebdir(object):' | |||||
356 | contact = get_contact(get) |
|
356 | contact = get_contact(get) | |
357 | description = get("web", "description", "") |
|
357 | description = get("web", "description", "") | |
358 | name = get("web", "name", name) |
|
358 | name = get("web", "name", name) | |
359 |
row = |
|
359 | row = {'contact': contact or "unknown", | |
360 |
|
|
360 | 'contact_sort': contact.upper() or "unknown", | |
361 |
|
|
361 | 'name': name, | |
362 |
|
|
362 | 'name_sort': name, | |
363 |
|
|
363 | 'url': url, | |
364 |
|
|
364 | 'description': description or "unknown", | |
365 |
|
|
365 | 'description_sort': description.upper() or "unknown", | |
366 |
|
|
366 | 'lastchange': d, | |
367 |
|
|
367 | 'lastchange_sort': d[1]-d[0], | |
368 |
|
|
368 | 'archives': archivelist(u, "tip", url), | |
369 |
|
|
369 | 'isdirectory': None, | |
|
370 | } | |||
370 |
|
371 | |||
371 | seenrepos.add(name) |
|
372 | seenrepos.add(name) | |
372 | yield row |
|
373 | yield row |
@@ -12,7 +12,7 b' from common import HTTP_OK' | |||||
12 | HGTYPE = 'application/mercurial-0.1' |
|
12 | HGTYPE = 'application/mercurial-0.1' | |
13 | HGERRTYPE = 'application/hg-error' |
|
13 | HGERRTYPE = 'application/hg-error' | |
14 |
|
14 | |||
15 |
class webproto( |
|
15 | class webproto(wireproto.abstractserverproto): | |
16 | def __init__(self, req, ui): |
|
16 | def __init__(self, req, ui): | |
17 | self.req = req |
|
17 | self.req = req | |
18 | self.response = '' |
|
18 | self.response = '' |
@@ -8,7 +8,7 b'' | |||||
8 | import os, mimetypes, re, cgi, copy |
|
8 | import os, mimetypes, re, cgi, copy | |
9 | import webutil |
|
9 | import webutil | |
10 | from mercurial import error, encoding, archival, templater, templatefilters |
|
10 | from mercurial import error, encoding, archival, templater, templatefilters | |
11 |
from mercurial.node import short, hex |
|
11 | from mercurial.node import short, hex | |
12 | from mercurial import util |
|
12 | from mercurial import util | |
13 | from common import paritygen, staticfile, get_contact, ErrorResponse |
|
13 | from common import paritygen, staticfile, get_contact, ErrorResponse | |
14 | from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND |
|
14 | from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND | |
@@ -187,7 +187,7 b' def _search(web, req, tmpl):' | |||||
187 |
|
187 | |||
188 | mfunc = revset.match(web.repo.ui, revdef) |
|
188 | mfunc = revset.match(web.repo.ui, revdef) | |
189 | try: |
|
189 | try: | |
190 |
revs = mfunc(web.repo, |
|
190 | revs = mfunc(web.repo, revset.baseset(web.repo)) | |
191 | return MODE_REVSET, revs |
|
191 | return MODE_REVSET, revs | |
192 | # ParseError: wrongly placed tokens, wrongs arguments, etc |
|
192 | # ParseError: wrongly placed tokens, wrongs arguments, etc | |
193 | # RepoLookupError: no such revision, e.g. in 'revision:' |
|
193 | # RepoLookupError: no such revision, e.g. in 'revision:' | |
@@ -712,28 +712,22 b' def comparison(web, req, tmpl):' | |||||
712 | return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))] |
|
712 | return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))] | |
713 | return f.data().splitlines() |
|
713 | return f.data().splitlines() | |
714 |
|
714 | |||
|
715 | parent = ctx.p1() | |||
|
716 | leftrev = parent.rev() | |||
|
717 | leftnode = parent.node() | |||
|
718 | rightrev = ctx.rev() | |||
|
719 | rightnode = ctx.node() | |||
715 | if path in ctx: |
|
720 | if path in ctx: | |
716 | fctx = ctx[path] |
|
721 | fctx = ctx[path] | |
717 | rightrev = fctx.filerev() |
|
|||
718 | rightnode = fctx.filenode() |
|
|||
719 | rightlines = filelines(fctx) |
|
722 | rightlines = filelines(fctx) | |
720 | parents = fctx.parents() |
|
723 | if path not in parent: | |
721 | if not parents: |
|
|||
722 | leftrev = -1 |
|
|||
723 | leftnode = nullid |
|
|||
724 | leftlines = () |
|
724 | leftlines = () | |
725 | else: |
|
725 | else: | |
726 |
pfctx = parent |
|
726 | pfctx = parent[path] | |
727 | leftrev = pfctx.filerev() |
|
|||
728 | leftnode = pfctx.filenode() |
|
|||
729 | leftlines = filelines(pfctx) |
|
727 | leftlines = filelines(pfctx) | |
730 | else: |
|
728 | else: | |
731 | rightrev = -1 |
|
|||
732 | rightnode = nullid |
|
|||
733 | rightlines = () |
|
729 | rightlines = () | |
734 | fctx = ctx.parents()[0][path] |
|
730 | fctx = ctx.parents()[0][path] | |
735 | leftrev = fctx.filerev() |
|
|||
736 | leftnode = fctx.filenode() |
|
|||
737 | leftlines = filelines(fctx) |
|
731 | leftlines = filelines(fctx) | |
738 |
|
732 | |||
739 | comparison = webutil.compare(tmpl, context, leftlines, rightlines) |
|
733 | comparison = webutil.compare(tmpl, context, leftlines, rightlines) | |
@@ -982,7 +976,11 b' def graph(web, req, tmpl):' | |||||
982 | if len(revs) >= revcount: |
|
976 | if len(revs) >= revcount: | |
983 | break |
|
977 | break | |
984 |
|
978 | |||
985 | dag = graphmod.dagwalker(web.repo, revs) |
|
979 | # We have to feed a baseset to dagwalker as it is expecting smartset | |
|
980 | # object. This does not have a big impact on hgweb performance itself | |||
|
981 | # since hgweb graphing code is not itself lazy yet. | |||
|
982 | dag = graphmod.dagwalker(web.repo, revset.baseset(revs)) | |||
|
983 | # As we said one line above... not lazy. | |||
986 | tree = list(graphmod.colored(dag, web.repo)) |
|
984 | tree = list(graphmod.colored(dag, web.repo)) | |
987 |
|
985 | |||
988 | def getcolumns(tree): |
|
986 | def getcolumns(tree): | |
@@ -1018,26 +1016,26 b' def graph(web, req, tmpl):' | |||||
1018 | [cgi.escape(x) for x in ctx.tags()], |
|
1016 | [cgi.escape(x) for x in ctx.tags()], | |
1019 | [cgi.escape(x) for x in ctx.bookmarks()])) |
|
1017 | [cgi.escape(x) for x in ctx.bookmarks()])) | |
1020 | else: |
|
1018 | else: | |
1021 |
edgedata = [ |
|
1019 | edgedata = [{'col': edge[0], 'nextcol': edge[1], | |
1022 |
|
|
1020 | 'color': (edge[2] - 1) % 6 + 1, | |
1023 |
|
|
1021 | 'width': edge[3], 'bcolor': edge[4]} | |
1024 | for edge in edges] |
|
1022 | for edge in edges] | |
1025 |
|
1023 | |||
1026 | data.append( |
|
1024 | data.append( | |
1027 |
|
|
1025 | {'node': node, | |
1028 |
|
|
1026 | 'col': vtx[0], | |
1029 |
|
|
1027 | 'color': (vtx[1] - 1) % 6 + 1, | |
1030 |
|
|
1028 | 'edges': edgedata, | |
1031 |
|
|
1029 | 'row': row, | |
1032 |
|
|
1030 | 'nextrow': row + 1, | |
1033 |
|
|
1031 | 'desc': desc, | |
1034 |
|
|
1032 | 'user': user, | |
1035 |
|
|
1033 | 'age': age, | |
1036 |
|
|
1034 | 'bookmarks': webutil.nodebookmarksdict( | |
1037 |
|
|
1035 | web.repo, ctx.node()), | |
1038 |
|
|
1036 | 'branches': webutil.nodebranchdict(web.repo, ctx), | |
1039 |
|
|
1037 | 'inbranch': webutil.nodeinbranch(web.repo, ctx), | |
1040 |
|
|
1038 | 'tags': webutil.nodetagsdict(web.repo, ctx.node())}) | |
1041 |
|
1039 | |||
1042 | row += 1 |
|
1040 | row += 1 | |
1043 |
|
1041 |
@@ -7,7 +7,7 b'' | |||||
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | import os, copy |
|
9 | import os, copy | |
10 | from mercurial import match, patch, error, ui, util, pathutil |
|
10 | from mercurial import match, patch, error, ui, util, pathutil, context | |
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial.node import hex, nullid |
|
12 | from mercurial.node import hex, nullid | |
13 | from common import ErrorResponse |
|
13 | from common import ErrorResponse | |
@@ -138,6 +138,9 b' def _siblings(siblings=[], hiderev=None)' | |||||
138 | yield d |
|
138 | yield d | |
139 |
|
139 | |||
140 | def parents(ctx, hide=None): |
|
140 | def parents(ctx, hide=None): | |
|
141 | if (isinstance(ctx, context.basefilectx) and | |||
|
142 | ctx.changectx().rev() != ctx.linkrev()): | |||
|
143 | return _siblings([ctx._repo[ctx.linkrev()]], hide) | |||
141 | return _siblings(ctx.parents(), hide) |
|
144 | return _siblings(ctx.parents(), hide) | |
142 |
|
145 | |||
143 | def children(ctx, hide=None): |
|
146 | def children(ctx, hide=None): | |
@@ -146,7 +149,7 b' def children(ctx, hide=None):' | |||||
146 | def renamelink(fctx): |
|
149 | def renamelink(fctx): | |
147 | r = fctx.renamed() |
|
150 | r = fctx.renamed() | |
148 | if r: |
|
151 | if r: | |
149 |
return [ |
|
152 | return [{'file': r[0], 'node': hex(r[1])}] | |
150 | return [] |
|
153 | return [] | |
151 |
|
154 | |||
152 | def nodetagsdict(repo, node): |
|
155 | def nodetagsdict(repo, node): |
@@ -6,7 +6,7 b'' | |||||
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 |
import os, sys, time |
|
9 | import os, sys, time | |
10 | import extensions, util, demandimport |
|
10 | import extensions, util, demandimport | |
11 |
|
11 | |||
12 | def _pythonhook(ui, repo, name, hname, funcname, args, throw): |
|
12 | def _pythonhook(ui, repo, name, hname, funcname, args, throw): | |
@@ -19,11 +19,10 b' def _pythonhook(ui, repo, name, hname, f' | |||||
19 | unmodified commands (e.g. mercurial.commands.update) can |
|
19 | unmodified commands (e.g. mercurial.commands.update) can | |
20 | be run as hooks without wrappers to convert return values.''' |
|
20 | be run as hooks without wrappers to convert return values.''' | |
21 |
|
21 | |||
22 | ui.note(_("calling hook %s: %s\n") % (hname, funcname)) |
|
22 | if util.safehasattr(funcname, '__call__'): | |
23 | starttime = time.time() |
|
23 | obj = funcname | |
24 |
|
24 | funcname = obj.__module__ + "." + obj.__name__ | ||
25 | obj = funcname |
|
25 | else: | |
26 | if not util.safehasattr(obj, '__call__'): |
|
|||
27 | d = funcname.rfind('.') |
|
26 | d = funcname.rfind('.') | |
28 | if d == -1: |
|
27 | if d == -1: | |
29 | raise util.Abort(_('%s hook is invalid ("%s" not in ' |
|
28 | raise util.Abort(_('%s hook is invalid ("%s" not in ' | |
@@ -75,6 +74,10 b' def _pythonhook(ui, repo, name, hname, f' | |||||
75 | raise util.Abort(_('%s hook is invalid ' |
|
74 | raise util.Abort(_('%s hook is invalid ' | |
76 | '("%s" is not callable)') % |
|
75 | '("%s" is not callable)') % | |
77 | (hname, funcname)) |
|
76 | (hname, funcname)) | |
|
77 | ||||
|
78 | ui.note(_("calling hook %s: %s\n") % (hname, funcname)) | |||
|
79 | starttime = time.time() | |||
|
80 | ||||
78 | try: |
|
81 | try: | |
79 | try: |
|
82 | try: | |
80 | # redirect IO descriptors to the ui descriptors so hooks |
|
83 | # redirect IO descriptors to the ui descriptors so hooks | |
@@ -100,11 +103,8 b' def _pythonhook(ui, repo, name, hname, f' | |||||
100 | finally: |
|
103 | finally: | |
101 | sys.stdout, sys.stderr, sys.stdin = old |
|
104 | sys.stdout, sys.stderr, sys.stdin = old | |
102 | duration = time.time() - starttime |
|
105 | duration = time.time() - starttime | |
103 | readablefunc = funcname |
|
|||
104 | if isinstance(funcname, types.FunctionType): |
|
|||
105 | readablefunc = funcname.__module__ + "." + funcname.__name__ |
|
|||
106 | ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n', |
|
106 | ui.log('pythonhook', 'pythonhook-%s: %s finished in %0.2f seconds\n', | |
107 |
name, |
|
107 | name, funcname, duration) | |
108 | if r: |
|
108 | if r: | |
109 | if throw: |
|
109 | if throw: | |
110 | raise util.Abort(_('%s hook failed') % hname) |
|
110 | raise util.Abort(_('%s hook failed') % hname) |
@@ -8,6 +8,7 b'' | |||||
8 |
|
8 | |||
9 | from node import nullid |
|
9 | from node import nullid | |
10 | from i18n import _ |
|
10 | from i18n import _ | |
|
11 | import tempfile | |||
11 | import changegroup, statichttprepo, error, httpconnection, url, util, wireproto |
|
12 | import changegroup, statichttprepo, error, httpconnection, url, util, wireproto | |
12 | import os, urllib, urllib2, zlib, httplib |
|
13 | import os, urllib, urllib2, zlib, httplib | |
13 | import errno, socket |
|
14 | import errno, socket | |
@@ -211,10 +212,29 b' class httppeer(wireproto.wirepeer):' | |||||
211 | fp.close() |
|
212 | fp.close() | |
212 | os.unlink(tempname) |
|
213 | os.unlink(tempname) | |
213 |
|
214 | |||
214 | def _abort(self, exception): |
|
215 | def _calltwowaystream(self, cmd, fp, **args): | |
215 | raise exception |
|
216 | fh = None | |
|
217 | filename = None | |||
|
218 | try: | |||
|
219 | # dump bundle to disk | |||
|
220 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") | |||
|
221 | fh = os.fdopen(fd, "wb") | |||
|
222 | d = fp.read(4096) | |||
|
223 | while d: | |||
|
224 | fh.write(d) | |||
|
225 | d = fp.read(4096) | |||
|
226 | fh.close() | |||
|
227 | # start http push | |||
|
228 | fp = httpconnection.httpsendfile(self.ui, filename, "rb") | |||
|
229 | headers = {'Content-Type': 'application/mercurial-0.1'} | |||
|
230 | return self._callstream(cmd, data=fp, headers=headers, **args) | |||
|
231 | finally: | |||
|
232 | if fh is not None: | |||
|
233 | fh.close() | |||
|
234 | os.unlink(filename) | |||
216 |
|
235 | |||
217 |
def _ |
|
236 | def _callcompressable(self, cmd, **args): | |
|
237 | stream = self._callstream(cmd, **args) | |||
218 | return util.chunkbuffer(zgenerator(stream)) |
|
238 | return util.chunkbuffer(zgenerator(stream)) | |
219 |
|
239 | |||
220 | class httpspeer(httppeer): |
|
240 | class httpspeer(httppeer): |
This diff has been collapsed as it changes many lines, (762 lines changed) Show them Hide them | |||||
@@ -6,10 +6,11 b'' | |||||
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from node import hex, nullid, short |
|
7 | from node import hex, nullid, short | |
8 | from i18n import _ |
|
8 | from i18n import _ | |
9 | import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview |
|
9 | import urllib | |
|
10 | import peer, changegroup, subrepo, pushkey, obsolete, repoview | |||
10 | import changelog, dirstate, filelog, manifest, context, bookmarks, phases |
|
11 | import changelog, dirstate, filelog, manifest, context, bookmarks, phases | |
11 | import lock as lockmod |
|
12 | import lock as lockmod | |
12 | import transaction, store, encoding |
|
13 | import transaction, store, encoding, exchange, bundle2 | |
13 | import scmutil, util, extensions, hook, error, revset |
|
14 | import scmutil, util, extensions, hook, error, revset | |
14 | import match as matchmod |
|
15 | import match as matchmod | |
15 | import merge as mergemod |
|
16 | import merge as mergemod | |
@@ -62,13 +63,14 b' def unfilteredmethod(orig):' | |||||
62 | return orig(repo.unfiltered(), *args, **kwargs) |
|
63 | return orig(repo.unfiltered(), *args, **kwargs) | |
63 | return wrapper |
|
64 | return wrapper | |
64 |
|
65 | |||
65 |
|
|
66 | moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle', | |
66 | LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset'])) |
|
67 | 'unbundle')) | |
|
68 | legacycaps = moderncaps.union(set(['changegroupsubset'])) | |||
67 |
|
69 | |||
68 | class localpeer(peer.peerrepository): |
|
70 | class localpeer(peer.peerrepository): | |
69 | '''peer for a local repo; reflects only the most recent API''' |
|
71 | '''peer for a local repo; reflects only the most recent API''' | |
70 |
|
72 | |||
71 |
def __init__(self, repo, caps= |
|
73 | def __init__(self, repo, caps=moderncaps): | |
72 | peer.peerrepository.__init__(self) |
|
74 | peer.peerrepository.__init__(self) | |
73 | self._repo = repo.filtered('served') |
|
75 | self._repo = repo.filtered('served') | |
74 | self.ui = repo.ui |
|
76 | self.ui = repo.ui | |
@@ -103,18 +105,42 b' class localpeer(peer.peerrepository):' | |||||
103 | def known(self, nodes): |
|
105 | def known(self, nodes): | |
104 | return self._repo.known(nodes) |
|
106 | return self._repo.known(nodes) | |
105 |
|
107 | |||
106 |
def getbundle(self, source, heads=None, common=None, bundlecaps=None |
|
108 | def getbundle(self, source, heads=None, common=None, bundlecaps=None, | |
107 | return self._repo.getbundle(source, heads=heads, common=common, |
|
109 | format='HG10', **kwargs): | |
108 | bundlecaps=None) |
|
110 | cg = exchange.getbundle(self._repo, source, heads=heads, | |
|
111 | common=common, bundlecaps=bundlecaps, **kwargs) | |||
|
112 | if bundlecaps is not None and 'HG2X' in bundlecaps: | |||
|
113 | # When requesting a bundle2, getbundle returns a stream to make the | |||
|
114 | # wire level function happier. We need to build a proper object | |||
|
115 | # from it in local peer. | |||
|
116 | cg = bundle2.unbundle20(self.ui, cg) | |||
|
117 | return cg | |||
109 |
|
118 | |||
110 | # TODO We might want to move the next two calls into legacypeer and add |
|
119 | # TODO We might want to move the next two calls into legacypeer and add | |
111 | # unbundle instead. |
|
120 | # unbundle instead. | |
112 |
|
121 | |||
|
122 | def unbundle(self, cg, heads, url): | |||
|
123 | """apply a bundle on a repo | |||
|
124 | ||||
|
125 | This function handles the repo locking itself.""" | |||
|
126 | try: | |||
|
127 | cg = exchange.readbundle(self.ui, cg, None) | |||
|
128 | ret = exchange.unbundle(self._repo, cg, heads, 'push', url) | |||
|
129 | if util.safehasattr(ret, 'getchunks'): | |||
|
130 | # This is a bundle20 object, turn it into an unbundler. | |||
|
131 | # This little dance should be dropped eventually when the API | |||
|
132 | # is finally improved. | |||
|
133 | stream = util.chunkbuffer(ret.getchunks()) | |||
|
134 | ret = bundle2.unbundle20(self.ui, stream) | |||
|
135 | return ret | |||
|
136 | except exchange.PushRaced, exc: | |||
|
137 | raise error.ResponseError(_('push failed:'), exc.message) | |||
|
138 | ||||
113 | def lock(self): |
|
139 | def lock(self): | |
114 | return self._repo.lock() |
|
140 | return self._repo.lock() | |
115 |
|
141 | |||
116 | def addchangegroup(self, cg, source, url): |
|
142 | def addchangegroup(self, cg, source, url): | |
117 |
return self._repo |
|
143 | return changegroup.addchangegroup(self._repo, cg, source, url) | |
118 |
|
144 | |||
119 | def pushkey(self, namespace, key, old, new): |
|
145 | def pushkey(self, namespace, key, old, new): | |
120 | return self._repo.pushkey(namespace, key, old, new) |
|
146 | return self._repo.pushkey(namespace, key, old, new) | |
@@ -131,7 +157,7 b' class locallegacypeer(localpeer):' | |||||
131 | restricted capabilities''' |
|
157 | restricted capabilities''' | |
132 |
|
158 | |||
133 | def __init__(self, repo): |
|
159 | def __init__(self, repo): | |
134 |
localpeer.__init__(self, repo, caps= |
|
160 | localpeer.__init__(self, repo, caps=legacycaps) | |
135 |
|
161 | |||
136 | def branches(self, nodes): |
|
162 | def branches(self, nodes): | |
137 | return self._repo.branches(nodes) |
|
163 | return self._repo.branches(nodes) | |
@@ -140,10 +166,10 b' class locallegacypeer(localpeer):' | |||||
140 | return self._repo.between(pairs) |
|
166 | return self._repo.between(pairs) | |
141 |
|
167 | |||
142 | def changegroup(self, basenodes, source): |
|
168 | def changegroup(self, basenodes, source): | |
143 |
return self._repo |
|
169 | return changegroup.changegroup(self._repo, basenodes, source) | |
144 |
|
170 | |||
145 | def changegroupsubset(self, bases, heads, source): |
|
171 | def changegroupsubset(self, bases, heads, source): | |
146 |
return self._repo |
|
172 | return changegroup.changegroupsubset(self._repo, bases, heads, source) | |
147 |
|
173 | |||
148 | class localrepository(object): |
|
174 | class localrepository(object): | |
149 |
|
175 | |||
@@ -154,6 +180,8 b' class localrepository(object):' | |||||
154 | requirements = ['revlogv1'] |
|
180 | requirements = ['revlogv1'] | |
155 | filtername = None |
|
181 | filtername = None | |
156 |
|
182 | |||
|
183 | bundle2caps = {'HG2X': ()} | |||
|
184 | ||||
157 | # a list of (ui, featureset) functions. |
|
185 | # a list of (ui, featureset) functions. | |
158 | # only functions defined in module of enabled extensions are invoked |
|
186 | # only functions defined in module of enabled extensions are invoked | |
159 | featuresetupfuncs = set() |
|
187 | featuresetupfuncs = set() | |
@@ -275,6 +303,12 b' class localrepository(object):' | |||||
275 | pass |
|
303 | pass | |
276 |
|
304 | |||
277 | def _restrictcapabilities(self, caps): |
|
305 | def _restrictcapabilities(self, caps): | |
|
306 | # bundle2 is not ready for prime time, drop it unless explicitly | |||
|
307 | # required by the tests (or some brave tester) | |||
|
308 | if self.ui.configbool('experimental', 'bundle2-exp', False): | |||
|
309 | caps = set(caps) | |||
|
310 | capsblob = bundle2.encodecaps(self.bundle2caps) | |||
|
311 | caps.add('bundle2-exp=' + urllib.quote(capsblob)) | |||
278 | return caps |
|
312 | return caps | |
279 |
|
313 | |||
280 | def _applyrequirements(self, requirements): |
|
314 | def _applyrequirements(self, requirements): | |
@@ -428,7 +462,7 b' class localrepository(object):' | |||||
428 | '''Return a list of revisions matching the given revset''' |
|
462 | '''Return a list of revisions matching the given revset''' | |
429 | expr = revset.formatspec(expr, *args) |
|
463 | expr = revset.formatspec(expr, *args) | |
430 | m = revset.match(None, expr) |
|
464 | m = revset.match(None, expr) | |
431 |
return |
|
465 | return m(self, revset.spanset(self)) | |
432 |
|
466 | |||
433 | def set(self, expr, *args): |
|
467 | def set(self, expr, *args): | |
434 | ''' |
|
468 | ''' | |
@@ -823,13 +857,17 b' class localrepository(object):' | |||||
823 | raise error.RepoError( |
|
857 | raise error.RepoError( | |
824 | _("abandoned transaction found - run hg recover")) |
|
858 | _("abandoned transaction found - run hg recover")) | |
825 |
|
859 | |||
|
860 | def onclose(): | |||
|
861 | self.store.write(tr) | |||
|
862 | ||||
826 | self._writejournal(desc) |
|
863 | self._writejournal(desc) | |
827 | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] |
|
864 | renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] | |
828 | rp = report and report or self.ui.warn |
|
865 | rp = report and report or self.ui.warn | |
829 | tr = transaction.transaction(rp, self.sopener, |
|
866 | tr = transaction.transaction(rp, self.sopener, | |
830 | "journal", |
|
867 | "journal", | |
831 | aftertrans(renames), |
|
868 | aftertrans(renames), | |
832 |
self.store.createmode |
|
869 | self.store.createmode, | |
|
870 | onclose) | |||
833 | self._transref = weakref.ref(tr) |
|
871 | self._transref = weakref.ref(tr) | |
834 | return tr |
|
872 | return tr | |
835 |
|
873 | |||
@@ -842,7 +880,7 b' class localrepository(object):' | |||||
842 | (self.svfs, 'journal.phaseroots')) |
|
880 | (self.svfs, 'journal.phaseroots')) | |
843 |
|
881 | |||
844 | def undofiles(self): |
|
882 | def undofiles(self): | |
845 |
return [vfs |
|
883 | return [(vfs, undoname(x)) for vfs, x in self._journalfiles()] | |
846 |
|
884 | |||
847 | def _writejournal(self, desc): |
|
885 | def _writejournal(self, desc): | |
848 | self.opener.write("journal.dirstate", |
|
886 | self.opener.write("journal.dirstate", | |
@@ -992,6 +1030,14 b' class localrepository(object):' | |||||
992 | except AttributeError: |
|
1030 | except AttributeError: | |
993 | pass |
|
1031 | pass | |
994 | self.invalidatecaches() |
|
1032 | self.invalidatecaches() | |
|
1033 | self.store.invalidatecaches() | |||
|
1034 | ||||
|
1035 | def invalidateall(self): | |||
|
1036 | '''Fully invalidates both store and non-store parts, causing the | |||
|
1037 | subsequent operation to reread any outside changes.''' | |||
|
1038 | # extension should hook this to invalidate its caches | |||
|
1039 | self.invalidate() | |||
|
1040 | self.invalidatedirstate() | |||
995 |
|
1041 | |||
996 | def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc): |
|
1042 | def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc): | |
997 | try: |
|
1043 | try: | |
@@ -1005,6 +1051,7 b' class localrepository(object):' | |||||
1005 | l = lockmod.lock(vfs, lockname, |
|
1051 | l = lockmod.lock(vfs, lockname, | |
1006 | int(self.ui.config("ui", "timeout", "600")), |
|
1052 | int(self.ui.config("ui", "timeout", "600")), | |
1007 | releasefn, desc=desc) |
|
1053 | releasefn, desc=desc) | |
|
1054 | self.ui.warn(_("got lock after %s seconds\n") % l.delay) | |||
1008 | if acquirefn: |
|
1055 | if acquirefn: | |
1009 | acquirefn() |
|
1056 | acquirefn() | |
1010 | return l |
|
1057 | return l | |
@@ -1029,7 +1076,6 b' class localrepository(object):' | |||||
1029 | return l |
|
1076 | return l | |
1030 |
|
1077 | |||
1031 | def unlock(): |
|
1078 | def unlock(): | |
1032 | self.store.write() |
|
|||
1033 | if hasunfilteredcache(self, '_phasecache'): |
|
1079 | if hasunfilteredcache(self, '_phasecache'): | |
1034 | self._phasecache.write() |
|
1080 | self._phasecache.write() | |
1035 | for k, ce in self._filecache.items(): |
|
1081 | for k, ce in self._filecache.items(): | |
@@ -1122,12 +1168,14 b' class localrepository(object):' | |||||
1122 | self.ui.warn(_("warning: can't find ancestor for '%s' " |
|
1168 | self.ui.warn(_("warning: can't find ancestor for '%s' " | |
1123 | "copied from '%s'!\n") % (fname, cfname)) |
|
1169 | "copied from '%s'!\n") % (fname, cfname)) | |
1124 |
|
1170 | |||
|
1171 | elif fparent1 == nullid: | |||
|
1172 | fparent1, fparent2 = fparent2, nullid | |||
1125 | elif fparent2 != nullid: |
|
1173 | elif fparent2 != nullid: | |
1126 | # is one parent an ancestor of the other? |
|
1174 | # is one parent an ancestor of the other? | |
1127 | fparentancestor = flog.ancestor(fparent1, fparent2) |
|
1175 | fparentancestors = flog.commonancestorsheads(fparent1, fparent2) | |
1128 |
if fparent |
|
1176 | if fparent1 in fparentancestors: | |
1129 | fparent1, fparent2 = fparent2, nullid |
|
1177 | fparent1, fparent2 = fparent2, nullid | |
1130 |
elif fparent |
|
1178 | elif fparent2 in fparentancestors: | |
1131 | fparent2 = nullid |
|
1179 | fparent2 = nullid | |
1132 |
|
1180 | |||
1133 | # is the file changed? |
|
1181 | # is the file changed? | |
@@ -1183,10 +1231,9 b' class localrepository(object):' | |||||
1183 | # only manage subrepos and .hgsubstate if .hgsub is present |
|
1231 | # only manage subrepos and .hgsubstate if .hgsub is present | |
1184 | if '.hgsub' in wctx: |
|
1232 | if '.hgsub' in wctx: | |
1185 | # we'll decide whether to track this ourselves, thanks |
|
1233 | # we'll decide whether to track this ourselves, thanks | |
1186 |
|
|
1234 | for c in changes[:3]: | |
1187 |
|
|
1235 | if '.hgsubstate' in c: | |
1188 |
|
|
1236 | c.remove('.hgsubstate') | |
1189 | changes[2].remove('.hgsubstate') |
|
|||
1190 |
|
1237 | |||
1191 | # compare current state to last committed state |
|
1238 | # compare current state to last committed state | |
1192 | # build new substate based on last committed state |
|
1239 | # build new substate based on last committed state | |
@@ -1578,7 +1625,7 b' class localrepository(object):' | |||||
1578 | r = modified, added, removed, deleted, unknown, ignored, clean |
|
1625 | r = modified, added, removed, deleted, unknown, ignored, clean | |
1579 |
|
1626 | |||
1580 | if listsubrepos: |
|
1627 | if listsubrepos: | |
1581 |
for subpath, sub in s |
|
1628 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): | |
1582 | if working: |
|
1629 | if working: | |
1583 | rev2 = None |
|
1630 | rev2 = None | |
1584 | else: |
|
1631 | else: | |
@@ -1658,623 +1705,24 b' class localrepository(object):' | |||||
1658 | return r |
|
1705 | return r | |
1659 |
|
1706 | |||
1660 | def pull(self, remote, heads=None, force=False): |
|
1707 | def pull(self, remote, heads=None, force=False): | |
1661 | if remote.local(): |
|
1708 | return exchange.pull (self, remote, heads, force) | |
1662 | missing = set(remote.requirements) - self.supported |
|
|||
1663 | if missing: |
|
|||
1664 | msg = _("required features are not" |
|
|||
1665 | " supported in the destination:" |
|
|||
1666 | " %s") % (', '.join(sorted(missing))) |
|
|||
1667 | raise util.Abort(msg) |
|
|||
1668 |
|
||||
1669 | # don't open transaction for nothing or you break future useful |
|
|||
1670 | # rollback call |
|
|||
1671 | tr = None |
|
|||
1672 | trname = 'pull\n' + util.hidepassword(remote.url()) |
|
|||
1673 | lock = self.lock() |
|
|||
1674 | try: |
|
|||
1675 | tmp = discovery.findcommonincoming(self.unfiltered(), remote, |
|
|||
1676 | heads=heads, force=force) |
|
|||
1677 | common, fetch, rheads = tmp |
|
|||
1678 | if not fetch: |
|
|||
1679 | self.ui.status(_("no changes found\n")) |
|
|||
1680 | result = 0 |
|
|||
1681 | else: |
|
|||
1682 | tr = self.transaction(trname) |
|
|||
1683 | if heads is None and list(common) == [nullid]: |
|
|||
1684 | self.ui.status(_("requesting all changes\n")) |
|
|||
1685 | elif heads is None and remote.capable('changegroupsubset'): |
|
|||
1686 | # issue1320, avoid a race if remote changed after discovery |
|
|||
1687 | heads = rheads |
|
|||
1688 |
|
||||
1689 | if remote.capable('getbundle'): |
|
|||
1690 | # TODO: get bundlecaps from remote |
|
|||
1691 | cg = remote.getbundle('pull', common=common, |
|
|||
1692 | heads=heads or rheads) |
|
|||
1693 | elif heads is None: |
|
|||
1694 | cg = remote.changegroup(fetch, 'pull') |
|
|||
1695 | elif not remote.capable('changegroupsubset'): |
|
|||
1696 | raise util.Abort(_("partial pull cannot be done because " |
|
|||
1697 | "other repository doesn't support " |
|
|||
1698 | "changegroupsubset.")) |
|
|||
1699 | else: |
|
|||
1700 | cg = remote.changegroupsubset(fetch, heads, 'pull') |
|
|||
1701 | result = self.addchangegroup(cg, 'pull', remote.url()) |
|
|||
1702 |
|
1709 | |||
1703 | # compute target subset |
|
1710 | def checkpush(self, pushop): | |
1704 | if heads is None: |
|
|||
1705 | # We pulled every thing possible |
|
|||
1706 | # sync on everything common |
|
|||
1707 | subset = common + rheads |
|
|||
1708 | else: |
|
|||
1709 | # We pulled a specific subset |
|
|||
1710 | # sync on this subset |
|
|||
1711 | subset = heads |
|
|||
1712 |
|
||||
1713 | # Get remote phases data from remote |
|
|||
1714 | remotephases = remote.listkeys('phases') |
|
|||
1715 | publishing = bool(remotephases.get('publishing', False)) |
|
|||
1716 | if remotephases and not publishing: |
|
|||
1717 | # remote is new and unpublishing |
|
|||
1718 | pheads, _dr = phases.analyzeremotephases(self, subset, |
|
|||
1719 | remotephases) |
|
|||
1720 | phases.advanceboundary(self, phases.public, pheads) |
|
|||
1721 | phases.advanceboundary(self, phases.draft, subset) |
|
|||
1722 | else: |
|
|||
1723 | # Remote is old or publishing all common changesets |
|
|||
1724 | # should be seen as public |
|
|||
1725 | phases.advanceboundary(self, phases.public, subset) |
|
|||
1726 |
|
||||
1727 | def gettransaction(): |
|
|||
1728 | if tr is None: |
|
|||
1729 | return self.transaction(trname) |
|
|||
1730 | return tr |
|
|||
1731 |
|
||||
1732 | obstr = obsolete.syncpull(self, remote, gettransaction) |
|
|||
1733 | if obstr is not None: |
|
|||
1734 | tr = obstr |
|
|||
1735 |
|
||||
1736 | if tr is not None: |
|
|||
1737 | tr.close() |
|
|||
1738 | finally: |
|
|||
1739 | if tr is not None: |
|
|||
1740 | tr.release() |
|
|||
1741 | lock.release() |
|
|||
1742 |
|
||||
1743 | return result |
|
|||
1744 |
|
||||
1745 | def checkpush(self, force, revs): |
|
|||
1746 | """Extensions can override this function if additional checks have |
|
1711 | """Extensions can override this function if additional checks have | |
1747 | to be performed before pushing, or call it if they override push |
|
1712 | to be performed before pushing, or call it if they override push | |
1748 | command. |
|
1713 | command. | |
1749 | """ |
|
1714 | """ | |
1750 | pass |
|
1715 | pass | |
1751 |
|
1716 | |||
1752 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
1717 | @unfilteredpropertycache | |
1753 | '''Push outgoing changesets (limited by revs) from the current |
|
1718 | def prepushoutgoinghooks(self): | |
1754 | repository to remote. Return an integer: |
|
1719 | """Return util.hooks consists of "(repo, remote, outgoing)" | |
1755 | - None means nothing to push |
|
1720 | functions, which are called before pushing changesets. | |
1756 | - 0 means HTTP error |
|
|||
1757 | - 1 means we pushed and remote head count is unchanged *or* |
|
|||
1758 | we have outgoing changesets but refused to push |
|
|||
1759 | - other values as described by addchangegroup() |
|
|||
1760 | ''' |
|
|||
1761 | if remote.local(): |
|
|||
1762 | missing = set(self.requirements) - remote.local().supported |
|
|||
1763 | if missing: |
|
|||
1764 | msg = _("required features are not" |
|
|||
1765 | " supported in the destination:" |
|
|||
1766 | " %s") % (', '.join(sorted(missing))) |
|
|||
1767 | raise util.Abort(msg) |
|
|||
1768 |
|
||||
1769 | # there are two ways to push to remote repo: |
|
|||
1770 | # |
|
|||
1771 | # addchangegroup assumes local user can lock remote |
|
|||
1772 | # repo (local filesystem, old ssh servers). |
|
|||
1773 | # |
|
|||
1774 | # unbundle assumes local user cannot lock remote repo (new ssh |
|
|||
1775 | # servers, http servers). |
|
|||
1776 |
|
||||
1777 | if not remote.canpush(): |
|
|||
1778 | raise util.Abort(_("destination does not support push")) |
|
|||
1779 | unfi = self.unfiltered() |
|
|||
1780 | def localphasemove(nodes, phase=phases.public): |
|
|||
1781 | """move <nodes> to <phase> in the local source repo""" |
|
|||
1782 | if locallock is not None: |
|
|||
1783 | phases.advanceboundary(self, phase, nodes) |
|
|||
1784 | else: |
|
|||
1785 | # repo is not locked, do not change any phases! |
|
|||
1786 | # Informs the user that phases should have been moved when |
|
|||
1787 | # applicable. |
|
|||
1788 | actualmoves = [n for n in nodes if phase < self[n].phase()] |
|
|||
1789 | phasestr = phases.phasenames[phase] |
|
|||
1790 | if actualmoves: |
|
|||
1791 | self.ui.status(_('cannot lock source repo, skipping local' |
|
|||
1792 | ' %s phase update\n') % phasestr) |
|
|||
1793 | # get local lock as we might write phase data |
|
|||
1794 | locallock = None |
|
|||
1795 | try: |
|
|||
1796 | locallock = self.lock() |
|
|||
1797 | except IOError, err: |
|
|||
1798 | if err.errno != errno.EACCES: |
|
|||
1799 | raise |
|
|||
1800 | # source repo cannot be locked. |
|
|||
1801 | # We do not abort the push, but just disable the local phase |
|
|||
1802 | # synchronisation. |
|
|||
1803 | msg = 'cannot lock source repository: %s\n' % err |
|
|||
1804 | self.ui.debug(msg) |
|
|||
1805 | try: |
|
|||
1806 | self.checkpush(force, revs) |
|
|||
1807 | lock = None |
|
|||
1808 | unbundle = remote.capable('unbundle') |
|
|||
1809 | if not unbundle: |
|
|||
1810 | lock = remote.lock() |
|
|||
1811 | try: |
|
|||
1812 | # discovery |
|
|||
1813 | fci = discovery.findcommonincoming |
|
|||
1814 | commoninc = fci(unfi, remote, force=force) |
|
|||
1815 | common, inc, remoteheads = commoninc |
|
|||
1816 | fco = discovery.findcommonoutgoing |
|
|||
1817 | outgoing = fco(unfi, remote, onlyheads=revs, |
|
|||
1818 | commoninc=commoninc, force=force) |
|
|||
1819 |
|
||||
1820 |
|
||||
1821 | if not outgoing.missing: |
|
|||
1822 | # nothing to push |
|
|||
1823 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) |
|
|||
1824 | ret = None |
|
|||
1825 | else: |
|
|||
1826 | # something to push |
|
|||
1827 | if not force: |
|
|||
1828 | # if self.obsstore == False --> no obsolete |
|
|||
1829 | # then, save the iteration |
|
|||
1830 | if unfi.obsstore: |
|
|||
1831 | # this message are here for 80 char limit reason |
|
|||
1832 | mso = _("push includes obsolete changeset: %s!") |
|
|||
1833 | mst = "push includes %s changeset: %s!" |
|
|||
1834 | # plain versions for i18n tool to detect them |
|
|||
1835 | _("push includes unstable changeset: %s!") |
|
|||
1836 | _("push includes bumped changeset: %s!") |
|
|||
1837 | _("push includes divergent changeset: %s!") |
|
|||
1838 | # If we are to push if there is at least one |
|
|||
1839 | # obsolete or unstable changeset in missing, at |
|
|||
1840 | # least one of the missinghead will be obsolete or |
|
|||
1841 | # unstable. So checking heads only is ok |
|
|||
1842 | for node in outgoing.missingheads: |
|
|||
1843 | ctx = unfi[node] |
|
|||
1844 | if ctx.obsolete(): |
|
|||
1845 | raise util.Abort(mso % ctx) |
|
|||
1846 | elif ctx.troubled(): |
|
|||
1847 | raise util.Abort(_(mst) |
|
|||
1848 | % (ctx.troubles()[0], |
|
|||
1849 | ctx)) |
|
|||
1850 | newbm = self.ui.configlist('bookmarks', 'pushing') |
|
|||
1851 | discovery.checkheads(unfi, remote, outgoing, |
|
|||
1852 | remoteheads, newbranch, |
|
|||
1853 | bool(inc), newbm) |
|
|||
1854 |
|
||||
1855 | # TODO: get bundlecaps from remote |
|
|||
1856 | bundlecaps = None |
|
|||
1857 | # create a changegroup from local |
|
|||
1858 | if revs is None and not (outgoing.excluded |
|
|||
1859 | or self.changelog.filteredrevs): |
|
|||
1860 | # push everything, |
|
|||
1861 | # use the fast path, no race possible on push |
|
|||
1862 | bundler = changegroup.bundle10(self, bundlecaps) |
|
|||
1863 | cg = self._changegroupsubset(outgoing, |
|
|||
1864 | bundler, |
|
|||
1865 | 'push', |
|
|||
1866 | fastpath=True) |
|
|||
1867 | else: |
|
|||
1868 | cg = self.getlocalbundle('push', outgoing, bundlecaps) |
|
|||
1869 |
|
||||
1870 | # apply changegroup to remote |
|
|||
1871 | if unbundle: |
|
|||
1872 | # local repo finds heads on server, finds out what |
|
|||
1873 | # revs it must push. once revs transferred, if server |
|
|||
1874 | # finds it has different heads (someone else won |
|
|||
1875 | # commit/push race), server aborts. |
|
|||
1876 | if force: |
|
|||
1877 | remoteheads = ['force'] |
|
|||
1878 | # ssh: return remote's addchangegroup() |
|
|||
1879 | # http: return remote's addchangegroup() or 0 for error |
|
|||
1880 | ret = remote.unbundle(cg, remoteheads, 'push') |
|
|||
1881 | else: |
|
|||
1882 | # we return an integer indicating remote head count |
|
|||
1883 | # change |
|
|||
1884 | ret = remote.addchangegroup(cg, 'push', self.url()) |
|
|||
1885 |
|
||||
1886 | if ret: |
|
|||
1887 | # push succeed, synchronize target of the push |
|
|||
1888 | cheads = outgoing.missingheads |
|
|||
1889 | elif revs is None: |
|
|||
1890 | # All out push fails. synchronize all common |
|
|||
1891 | cheads = outgoing.commonheads |
|
|||
1892 | else: |
|
|||
1893 | # I want cheads = heads(::missingheads and ::commonheads) |
|
|||
1894 | # (missingheads is revs with secret changeset filtered out) |
|
|||
1895 | # |
|
|||
1896 | # This can be expressed as: |
|
|||
1897 | # cheads = ( (missingheads and ::commonheads) |
|
|||
1898 | # + (commonheads and ::missingheads))" |
|
|||
1899 | # ) |
|
|||
1900 | # |
|
|||
1901 | # while trying to push we already computed the following: |
|
|||
1902 | # common = (::commonheads) |
|
|||
1903 | # missing = ((commonheads::missingheads) - commonheads) |
|
|||
1904 | # |
|
|||
1905 | # We can pick: |
|
|||
1906 | # * missingheads part of common (::commonheads) |
|
|||
1907 | common = set(outgoing.common) |
|
|||
1908 | nm = self.changelog.nodemap |
|
|||
1909 | cheads = [node for node in revs if nm[node] in common] |
|
|||
1910 | # and |
|
|||
1911 | # * commonheads parents on missing |
|
|||
1912 | revset = unfi.set('%ln and parents(roots(%ln))', |
|
|||
1913 | outgoing.commonheads, |
|
|||
1914 | outgoing.missing) |
|
|||
1915 | cheads.extend(c.node() for c in revset) |
|
|||
1916 | # even when we don't push, exchanging phase data is useful |
|
|||
1917 | remotephases = remote.listkeys('phases') |
|
|||
1918 | if (self.ui.configbool('ui', '_usedassubrepo', False) |
|
|||
1919 | and remotephases # server supports phases |
|
|||
1920 | and ret is None # nothing was pushed |
|
|||
1921 | and remotephases.get('publishing', False)): |
|
|||
1922 | # When: |
|
|||
1923 | # - this is a subrepo push |
|
|||
1924 | # - and remote support phase |
|
|||
1925 | # - and no changeset was pushed |
|
|||
1926 | # - and remote is publishing |
|
|||
1927 | # We may be in issue 3871 case! |
|
|||
1928 | # We drop the possible phase synchronisation done by |
|
|||
1929 | # courtesy to publish changesets possibly locally draft |
|
|||
1930 | # on the remote. |
|
|||
1931 | remotephases = {'publishing': 'True'} |
|
|||
1932 | if not remotephases: # old server or public only repo |
|
|||
1933 | localphasemove(cheads) |
|
|||
1934 | # don't push any phase data as there is nothing to push |
|
|||
1935 | else: |
|
|||
1936 | ana = phases.analyzeremotephases(self, cheads, remotephases) |
|
|||
1937 | pheads, droots = ana |
|
|||
1938 | ### Apply remote phase on local |
|
|||
1939 | if remotephases.get('publishing', False): |
|
|||
1940 | localphasemove(cheads) |
|
|||
1941 | else: # publish = False |
|
|||
1942 | localphasemove(pheads) |
|
|||
1943 | localphasemove(cheads, phases.draft) |
|
|||
1944 | ### Apply local phase on remote |
|
|||
1945 |
|
||||
1946 | # Get the list of all revs draft on remote by public here. |
|
|||
1947 | # XXX Beware that revset break if droots is not strictly |
|
|||
1948 | # XXX root we may want to ensure it is but it is costly |
|
|||
1949 | outdated = unfi.set('heads((%ln::%ln) and public())', |
|
|||
1950 | droots, cheads) |
|
|||
1951 | for newremotehead in outdated: |
|
|||
1952 | r = remote.pushkey('phases', |
|
|||
1953 | newremotehead.hex(), |
|
|||
1954 | str(phases.draft), |
|
|||
1955 | str(phases.public)) |
|
|||
1956 | if not r: |
|
|||
1957 | self.ui.warn(_('updating %s to public failed!\n') |
|
|||
1958 | % newremotehead) |
|
|||
1959 | self.ui.debug('try to push obsolete markers to remote\n') |
|
|||
1960 | obsolete.syncpush(self, remote) |
|
|||
1961 | finally: |
|
|||
1962 | if lock is not None: |
|
|||
1963 | lock.release() |
|
|||
1964 | finally: |
|
|||
1965 | if locallock is not None: |
|
|||
1966 | locallock.release() |
|
|||
1967 |
|
||||
1968 | bookmarks.updateremote(self.ui, unfi, remote, revs) |
|
|||
1969 | return ret |
|
|||
1970 |
|
||||
1971 | def changegroupinfo(self, nodes, source): |
|
|||
1972 | if self.ui.verbose or source == 'bundle': |
|
|||
1973 | self.ui.status(_("%d changesets found\n") % len(nodes)) |
|
|||
1974 | if self.ui.debugflag: |
|
|||
1975 | self.ui.debug("list of changesets:\n") |
|
|||
1976 | for node in nodes: |
|
|||
1977 | self.ui.debug("%s\n" % hex(node)) |
|
|||
1978 |
|
||||
1979 | def changegroupsubset(self, bases, heads, source): |
|
|||
1980 | """Compute a changegroup consisting of all the nodes that are |
|
|||
1981 | descendants of any of the bases and ancestors of any of the heads. |
|
|||
1982 | Return a chunkbuffer object whose read() method will return |
|
|||
1983 | successive changegroup chunks. |
|
|||
1984 |
|
||||
1985 | It is fairly complex as determining which filenodes and which |
|
|||
1986 | manifest nodes need to be included for the changeset to be complete |
|
|||
1987 | is non-trivial. |
|
|||
1988 |
|
||||
1989 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
|||
1990 | the changegroup a particular filenode or manifestnode belongs to. |
|
|||
1991 | """ |
|
1721 | """ | |
1992 | cl = self.changelog |
|
1722 | return util.hooks() | |
1993 | if not bases: |
|
|||
1994 | bases = [nullid] |
|
|||
1995 | # TODO: remove call to nodesbetween. |
|
|||
1996 | csets, bases, heads = cl.nodesbetween(bases, heads) |
|
|||
1997 | discbases = [] |
|
|||
1998 | for n in bases: |
|
|||
1999 | discbases.extend([p for p in cl.parents(n) if p != nullid]) |
|
|||
2000 | outgoing = discovery.outgoing(cl, discbases, heads) |
|
|||
2001 | bundler = changegroup.bundle10(self) |
|
|||
2002 | return self._changegroupsubset(outgoing, bundler, source) |
|
|||
2003 |
|
||||
2004 | def getlocalbundle(self, source, outgoing, bundlecaps=None): |
|
|||
2005 | """Like getbundle, but taking a discovery.outgoing as an argument. |
|
|||
2006 |
|
||||
2007 | This is only implemented for local repos and reuses potentially |
|
|||
2008 | precomputed sets in outgoing.""" |
|
|||
2009 | if not outgoing.missing: |
|
|||
2010 | return None |
|
|||
2011 | bundler = changegroup.bundle10(self, bundlecaps) |
|
|||
2012 | return self._changegroupsubset(outgoing, bundler, source) |
|
|||
2013 |
|
1723 | |||
2014 | def getbundle(self, source, heads=None, common=None, bundlecaps=None): |
|
1724 | def push(self, remote, force=False, revs=None, newbranch=False): | |
2015 | """Like changegroupsubset, but returns the set difference between the |
|
1725 | return exchange.push(self, remote, force, revs, newbranch) | |
2016 | ancestors of heads and the ancestors common. |
|
|||
2017 |
|
||||
2018 | If heads is None, use the local heads. If common is None, use [nullid]. |
|
|||
2019 |
|
||||
2020 | The nodes in common might not all be known locally due to the way the |
|
|||
2021 | current discovery protocol works. |
|
|||
2022 | """ |
|
|||
2023 | cl = self.changelog |
|
|||
2024 | if common: |
|
|||
2025 | hasnode = cl.hasnode |
|
|||
2026 | common = [n for n in common if hasnode(n)] |
|
|||
2027 | else: |
|
|||
2028 | common = [nullid] |
|
|||
2029 | if not heads: |
|
|||
2030 | heads = cl.heads() |
|
|||
2031 | return self.getlocalbundle(source, |
|
|||
2032 | discovery.outgoing(cl, common, heads), |
|
|||
2033 | bundlecaps=bundlecaps) |
|
|||
2034 |
|
||||
2035 | @unfilteredmethod |
|
|||
2036 | def _changegroupsubset(self, outgoing, bundler, source, |
|
|||
2037 | fastpath=False): |
|
|||
2038 | commonrevs = outgoing.common |
|
|||
2039 | csets = outgoing.missing |
|
|||
2040 | heads = outgoing.missingheads |
|
|||
2041 | # We go through the fast path if we get told to, or if all (unfiltered |
|
|||
2042 | # heads have been requested (since we then know there all linkrevs will |
|
|||
2043 | # be pulled by the client). |
|
|||
2044 | heads.sort() |
|
|||
2045 | fastpathlinkrev = fastpath or ( |
|
|||
2046 | self.filtername is None and heads == sorted(self.heads())) |
|
|||
2047 |
|
||||
2048 | self.hook('preoutgoing', throw=True, source=source) |
|
|||
2049 | self.changegroupinfo(csets, source) |
|
|||
2050 | gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
|||
2051 | return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN') |
|
|||
2052 |
|
||||
2053 | def changegroup(self, basenodes, source): |
|
|||
2054 | # to avoid a race we use changegroupsubset() (issue1320) |
|
|||
2055 | return self.changegroupsubset(basenodes, self.heads(), source) |
|
|||
2056 |
|
||||
2057 | @unfilteredmethod |
|
|||
2058 | def addchangegroup(self, source, srctype, url, emptyok=False): |
|
|||
2059 | """Add the changegroup returned by source.read() to this repo. |
|
|||
2060 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
|||
2061 | the URL of the repo where this changegroup is coming from. |
|
|||
2062 |
|
||||
2063 | Return an integer summarizing the change to this repo: |
|
|||
2064 | - nothing changed or no source: 0 |
|
|||
2065 | - more heads than before: 1+added heads (2..n) |
|
|||
2066 | - fewer heads than before: -1-removed heads (-2..-n) |
|
|||
2067 | - number of heads stays the same: 1 |
|
|||
2068 | """ |
|
|||
2069 | def csmap(x): |
|
|||
2070 | self.ui.debug("add changeset %s\n" % short(x)) |
|
|||
2071 | return len(cl) |
|
|||
2072 |
|
||||
2073 | def revmap(x): |
|
|||
2074 | return cl.rev(x) |
|
|||
2075 |
|
||||
2076 | if not source: |
|
|||
2077 | return 0 |
|
|||
2078 |
|
||||
2079 | self.hook('prechangegroup', throw=True, source=srctype, url=url) |
|
|||
2080 |
|
||||
2081 | changesets = files = revisions = 0 |
|
|||
2082 | efiles = set() |
|
|||
2083 |
|
||||
2084 | # write changelog data to temp files so concurrent readers will not see |
|
|||
2085 | # inconsistent view |
|
|||
2086 | cl = self.changelog |
|
|||
2087 | cl.delayupdate() |
|
|||
2088 | oldheads = cl.heads() |
|
|||
2089 |
|
||||
2090 | tr = self.transaction("\n".join([srctype, util.hidepassword(url)])) |
|
|||
2091 | try: |
|
|||
2092 | trp = weakref.proxy(tr) |
|
|||
2093 | # pull off the changeset group |
|
|||
2094 | self.ui.status(_("adding changesets\n")) |
|
|||
2095 | clstart = len(cl) |
|
|||
2096 | class prog(object): |
|
|||
2097 | step = _('changesets') |
|
|||
2098 | count = 1 |
|
|||
2099 | ui = self.ui |
|
|||
2100 | total = None |
|
|||
2101 | def __call__(self): |
|
|||
2102 | self.ui.progress(self.step, self.count, unit=_('chunks'), |
|
|||
2103 | total=self.total) |
|
|||
2104 | self.count += 1 |
|
|||
2105 | pr = prog() |
|
|||
2106 | source.callback = pr |
|
|||
2107 |
|
||||
2108 | source.changelogheader() |
|
|||
2109 | srccontent = cl.addgroup(source, csmap, trp) |
|
|||
2110 | if not (srccontent or emptyok): |
|
|||
2111 | raise util.Abort(_("received changelog group is empty")) |
|
|||
2112 | clend = len(cl) |
|
|||
2113 | changesets = clend - clstart |
|
|||
2114 | for c in xrange(clstart, clend): |
|
|||
2115 | efiles.update(self[c].files()) |
|
|||
2116 | efiles = len(efiles) |
|
|||
2117 | self.ui.progress(_('changesets'), None) |
|
|||
2118 |
|
||||
2119 | # pull off the manifest group |
|
|||
2120 | self.ui.status(_("adding manifests\n")) |
|
|||
2121 | pr.step = _('manifests') |
|
|||
2122 | pr.count = 1 |
|
|||
2123 | pr.total = changesets # manifests <= changesets |
|
|||
2124 | # no need to check for empty manifest group here: |
|
|||
2125 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
|||
2126 | # no new manifest will be created and the manifest group will |
|
|||
2127 | # be empty during the pull |
|
|||
2128 | source.manifestheader() |
|
|||
2129 | self.manifest.addgroup(source, revmap, trp) |
|
|||
2130 | self.ui.progress(_('manifests'), None) |
|
|||
2131 |
|
||||
2132 | needfiles = {} |
|
|||
2133 | if self.ui.configbool('server', 'validate', default=False): |
|
|||
2134 | # validate incoming csets have their manifests |
|
|||
2135 | for cset in xrange(clstart, clend): |
|
|||
2136 | mfest = self.changelog.read(self.changelog.node(cset))[0] |
|
|||
2137 | mfest = self.manifest.readdelta(mfest) |
|
|||
2138 | # store file nodes we must see |
|
|||
2139 | for f, n in mfest.iteritems(): |
|
|||
2140 | needfiles.setdefault(f, set()).add(n) |
|
|||
2141 |
|
||||
2142 | # process the files |
|
|||
2143 | self.ui.status(_("adding file changes\n")) |
|
|||
2144 | pr.step = _('files') |
|
|||
2145 | pr.count = 1 |
|
|||
2146 | pr.total = efiles |
|
|||
2147 | source.callback = None |
|
|||
2148 |
|
||||
2149 | newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp, |
|
|||
2150 | pr, needfiles) |
|
|||
2151 | revisions += newrevs |
|
|||
2152 | files += newfiles |
|
|||
2153 |
|
||||
2154 | dh = 0 |
|
|||
2155 | if oldheads: |
|
|||
2156 | heads = cl.heads() |
|
|||
2157 | dh = len(heads) - len(oldheads) |
|
|||
2158 | for h in heads: |
|
|||
2159 | if h not in oldheads and self[h].closesbranch(): |
|
|||
2160 | dh -= 1 |
|
|||
2161 | htext = "" |
|
|||
2162 | if dh: |
|
|||
2163 | htext = _(" (%+d heads)") % dh |
|
|||
2164 |
|
||||
2165 | self.ui.status(_("added %d changesets" |
|
|||
2166 | " with %d changes to %d files%s\n") |
|
|||
2167 | % (changesets, revisions, files, htext)) |
|
|||
2168 | self.invalidatevolatilesets() |
|
|||
2169 |
|
||||
2170 | if changesets > 0: |
|
|||
2171 | p = lambda: cl.writepending() and self.root or "" |
|
|||
2172 | self.hook('pretxnchangegroup', throw=True, |
|
|||
2173 | node=hex(cl.node(clstart)), source=srctype, |
|
|||
2174 | url=url, pending=p) |
|
|||
2175 |
|
||||
2176 | added = [cl.node(r) for r in xrange(clstart, clend)] |
|
|||
2177 | publishing = self.ui.configbool('phases', 'publish', True) |
|
|||
2178 | if srctype == 'push': |
|
|||
2179 | # Old server can not push the boundary themself. |
|
|||
2180 | # New server won't push the boundary if changeset already |
|
|||
2181 | # existed locally as secrete |
|
|||
2182 | # |
|
|||
2183 | # We should not use added here but the list of all change in |
|
|||
2184 | # the bundle |
|
|||
2185 | if publishing: |
|
|||
2186 | phases.advanceboundary(self, phases.public, srccontent) |
|
|||
2187 | else: |
|
|||
2188 | phases.advanceboundary(self, phases.draft, srccontent) |
|
|||
2189 | phases.retractboundary(self, phases.draft, added) |
|
|||
2190 | elif srctype != 'strip': |
|
|||
2191 | # publishing only alter behavior during push |
|
|||
2192 | # |
|
|||
2193 | # strip should not touch boundary at all |
|
|||
2194 | phases.retractboundary(self, phases.draft, added) |
|
|||
2195 |
|
||||
2196 | # make changelog see real files again |
|
|||
2197 | cl.finalize(trp) |
|
|||
2198 |
|
||||
2199 | tr.close() |
|
|||
2200 |
|
||||
2201 | if changesets > 0: |
|
|||
2202 | if srctype != 'strip': |
|
|||
2203 | # During strip, branchcache is invalid but coming call to |
|
|||
2204 | # `destroyed` will repair it. |
|
|||
2205 | # In other case we can safely update cache on disk. |
|
|||
2206 | branchmap.updatecache(self.filtered('served')) |
|
|||
2207 | def runhooks(): |
|
|||
2208 | # These hooks run when the lock releases, not when the |
|
|||
2209 | # transaction closes. So it's possible for the changelog |
|
|||
2210 | # to have changed since we last saw it. |
|
|||
2211 | if clstart >= len(self): |
|
|||
2212 | return |
|
|||
2213 |
|
||||
2214 | # forcefully update the on-disk branch cache |
|
|||
2215 | self.ui.debug("updating the branch cache\n") |
|
|||
2216 | self.hook("changegroup", node=hex(cl.node(clstart)), |
|
|||
2217 | source=srctype, url=url) |
|
|||
2218 |
|
||||
2219 | for n in added: |
|
|||
2220 | self.hook("incoming", node=hex(n), source=srctype, |
|
|||
2221 | url=url) |
|
|||
2222 |
|
||||
2223 | newheads = [h for h in self.heads() if h not in oldheads] |
|
|||
2224 | self.ui.log("incoming", |
|
|||
2225 | "%s incoming changes - new heads: %s\n", |
|
|||
2226 | len(added), |
|
|||
2227 | ', '.join([hex(c[:6]) for c in newheads])) |
|
|||
2228 | self._afterlock(runhooks) |
|
|||
2229 |
|
||||
2230 | finally: |
|
|||
2231 | tr.release() |
|
|||
2232 | # never return 0 here: |
|
|||
2233 | if dh < 0: |
|
|||
2234 | return dh - 1 |
|
|||
2235 | else: |
|
|||
2236 | return dh + 1 |
|
|||
2237 |
|
||||
2238 | def addchangegroupfiles(self, source, revmap, trp, pr, needfiles): |
|
|||
2239 | revisions = 0 |
|
|||
2240 | files = 0 |
|
|||
2241 | while True: |
|
|||
2242 | chunkdata = source.filelogheader() |
|
|||
2243 | if not chunkdata: |
|
|||
2244 | break |
|
|||
2245 | f = chunkdata["filename"] |
|
|||
2246 | self.ui.debug("adding %s revisions\n" % f) |
|
|||
2247 | pr() |
|
|||
2248 | fl = self.file(f) |
|
|||
2249 | o = len(fl) |
|
|||
2250 | if not fl.addgroup(source, revmap, trp): |
|
|||
2251 | raise util.Abort(_("received file revlog group is empty")) |
|
|||
2252 | revisions += len(fl) - o |
|
|||
2253 | files += 1 |
|
|||
2254 | if f in needfiles: |
|
|||
2255 | needs = needfiles[f] |
|
|||
2256 | for new in xrange(o, len(fl)): |
|
|||
2257 | n = fl.node(new) |
|
|||
2258 | if n in needs: |
|
|||
2259 | needs.remove(n) |
|
|||
2260 | else: |
|
|||
2261 | raise util.Abort( |
|
|||
2262 | _("received spurious file revlog entry")) |
|
|||
2263 | if not needs: |
|
|||
2264 | del needfiles[f] |
|
|||
2265 | self.ui.progress(_('files'), None) |
|
|||
2266 |
|
||||
2267 | for f, needs in needfiles.iteritems(): |
|
|||
2268 | fl = self.file(f) |
|
|||
2269 | for n in needs: |
|
|||
2270 | try: |
|
|||
2271 | fl.rev(n) |
|
|||
2272 | except error.LookupError: |
|
|||
2273 | raise util.Abort( |
|
|||
2274 | _('missing file data for %s:%s - run hg verify') % |
|
|||
2275 | (f, hex(n))) |
|
|||
2276 |
|
||||
2277 | return revisions, files |
|
|||
2278 |
|
1726 | |||
2279 | def stream_in(self, remote, requirements): |
|
1727 | def stream_in(self, remote, requirements): | |
2280 | lock = self.lock() |
|
1728 | lock = self.lock() | |
@@ -2310,26 +1758,36 b' class localrepository(object):' | |||||
2310 | handled_bytes = 0 |
|
1758 | handled_bytes = 0 | |
2311 | self.ui.progress(_('clone'), 0, total=total_bytes) |
|
1759 | self.ui.progress(_('clone'), 0, total=total_bytes) | |
2312 | start = time.time() |
|
1760 | start = time.time() | |
2313 | for i in xrange(total_files): |
|
1761 | ||
2314 | # XXX doesn't support '\n' or '\r' in filenames |
|
1762 | tr = self.transaction(_('clone')) | |
2315 | l = fp.readline() |
|
1763 | try: | |
2316 | try: |
|
1764 | for i in xrange(total_files): | |
2317 | name, size = l.split('\0', 1) |
|
1765 | # XXX doesn't support '\n' or '\r' in filenames | |
2318 |
|
|
1766 | l = fp.readline() | |
2319 | except (ValueError, TypeError): |
|
1767 | try: | |
2320 | raise error.ResponseError( |
|
1768 | name, size = l.split('\0', 1) | |
2321 | _('unexpected response from remote server:'), l) |
|
1769 | size = int(size) | |
2322 | if self.ui.debugflag: |
|
1770 | except (ValueError, TypeError): | |
2323 | self.ui.debug('adding %s (%s)\n' % |
|
1771 | raise error.ResponseError( | |
2324 | (name, util.bytecount(size))) |
|
1772 | _('unexpected response from remote server:'), l) | |
2325 | # for backwards compat, name was partially encoded |
|
1773 | if self.ui.debugflag: | |
2326 | ofp = self.sopener(store.decodedir(name), 'w') |
|
1774 | self.ui.debug('adding %s (%s)\n' % | |
2327 | for chunk in util.filechunkiter(fp, limit=size): |
|
1775 | (name, util.bytecount(size))) | |
2328 | handled_bytes += len(chunk) |
|
1776 | # for backwards compat, name was partially encoded | |
2329 | self.ui.progress(_('clone'), handled_bytes, |
|
1777 | ofp = self.sopener(store.decodedir(name), 'w') | |
2330 | total=total_bytes) |
|
1778 | for chunk in util.filechunkiter(fp, limit=size): | |
2331 |
|
|
1779 | handled_bytes += len(chunk) | |
2332 | ofp.close() |
|
1780 | self.ui.progress(_('clone'), handled_bytes, | |
|
1781 | total=total_bytes) | |||
|
1782 | ofp.write(chunk) | |||
|
1783 | ofp.close() | |||
|
1784 | tr.close() | |||
|
1785 | finally: | |||
|
1786 | tr.release() | |||
|
1787 | ||||
|
1788 | # Writing straight to files circumvented the inmemory caches | |||
|
1789 | self.invalidate() | |||
|
1790 | ||||
2333 | elapsed = time.time() - start |
|
1791 | elapsed = time.time() - start | |
2334 | if elapsed <= 0: |
|
1792 | if elapsed <= 0: | |
2335 | elapsed = 0.001 |
|
1793 | elapsed = 0.001 |
@@ -38,7 +38,7 b' class lock(object):' | |||||
38 | self.desc = desc |
|
38 | self.desc = desc | |
39 | self.postrelease = [] |
|
39 | self.postrelease = [] | |
40 | self.pid = os.getpid() |
|
40 | self.pid = os.getpid() | |
41 | self.lock() |
|
41 | self.delay = self.lock() | |
42 |
|
42 | |||
43 | def __del__(self): |
|
43 | def __del__(self): | |
44 | if self.held: |
|
44 | if self.held: | |
@@ -57,7 +57,7 b' class lock(object):' | |||||
57 | while True: |
|
57 | while True: | |
58 | try: |
|
58 | try: | |
59 | self.trylock() |
|
59 | self.trylock() | |
60 |
return |
|
60 | return self.timeout - timeout | |
61 | except error.LockHeld, inst: |
|
61 | except error.LockHeld, inst: | |
62 | if timeout != 0: |
|
62 | if timeout != 0: | |
63 | time.sleep(1) |
|
63 | time.sleep(1) |
@@ -6,30 +6,32 b'' | |||||
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | import re |
|
8 | import re | |
9 |
import util, |
|
9 | import util, pathutil | |
10 | from i18n import _ |
|
10 | from i18n import _ | |
11 |
|
11 | |||
12 |
def _rematcher( |
|
12 | def _rematcher(regex): | |
13 | m = util.compilere(pat) |
|
13 | '''compile the regexp with the best available regexp engine and return a | |
|
14 | matcher function''' | |||
|
15 | m = util.compilere(regex) | |||
14 | try: |
|
16 | try: | |
15 | # slightly faster, provided by facebook's re2 bindings |
|
17 | # slightly faster, provided by facebook's re2 bindings | |
16 | return m.test_match |
|
18 | return m.test_match | |
17 | except AttributeError: |
|
19 | except AttributeError: | |
18 | return m.match |
|
20 | return m.match | |
19 |
|
21 | |||
20 | def _expandsets(pats, ctx): |
|
22 | def _expandsets(kindpats, ctx): | |
21 | '''convert set: patterns into a list of files in the given context''' |
|
23 | '''Returns the kindpats list with the 'set' patterns expanded.''' | |
22 | fset = set() |
|
24 | fset = set() | |
23 | other = [] |
|
25 | other = [] | |
24 |
|
26 | |||
25 |
for kind, |
|
27 | for kind, pat in kindpats: | |
26 | if kind == 'set': |
|
28 | if kind == 'set': | |
27 | if not ctx: |
|
29 | if not ctx: | |
28 | raise util.Abort("fileset expression with no context") |
|
30 | raise util.Abort("fileset expression with no context") | |
29 |
s = |
|
31 | s = ctx.getfileset(pat) | |
30 | fset.update(s) |
|
32 | fset.update(s) | |
31 | continue |
|
33 | continue | |
32 |
other.append((kind, |
|
34 | other.append((kind, pat)) | |
33 | return fset, other |
|
35 | return fset, other | |
34 |
|
36 | |||
35 | class match(object): |
|
37 | class match(object): | |
@@ -41,10 +43,10 b' class match(object):' | |||||
41 | root - the canonical root of the tree you're matching against |
|
43 | root - the canonical root of the tree you're matching against | |
42 | cwd - the current working directory, if relevant |
|
44 | cwd - the current working directory, if relevant | |
43 | patterns - patterns to find |
|
45 | patterns - patterns to find | |
44 | include - patterns to include |
|
46 | include - patterns to include (unless they are excluded) | |
45 | exclude - patterns to exclude |
|
47 | exclude - patterns to exclude (even if they are included) | |
46 |
default - if a pattern in |
|
48 | default - if a pattern in patterns has no explicit type, assume this one | |
47 |
exact - patterns are actually |
|
49 | exact - patterns are actually filenames (include/exclude still apply) | |
48 |
|
50 | |||
49 | a pattern is one of: |
|
51 | a pattern is one of: | |
50 | 'glob:<glob>' - a glob relative to cwd |
|
52 | 'glob:<glob>' - a glob relative to cwd | |
@@ -59,17 +61,17 b' class match(object):' | |||||
59 |
|
61 | |||
60 | self._root = root |
|
62 | self._root = root | |
61 | self._cwd = cwd |
|
63 | self._cwd = cwd | |
62 | self._files = [] |
|
64 | self._files = [] # exact files and roots of patterns | |
63 | self._anypats = bool(include or exclude) |
|
65 | self._anypats = bool(include or exclude) | |
64 | self._ctx = ctx |
|
66 | self._ctx = ctx | |
65 | self._always = False |
|
67 | self._always = False | |
66 |
|
68 | |||
67 | if include: |
|
69 | if include: | |
68 | pats = _normalize(include, 'glob', root, cwd, auditor) |
|
70 | kindpats = _normalize(include, 'glob', root, cwd, auditor) | |
69 | self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)') |
|
71 | self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)') | |
70 | if exclude: |
|
72 | if exclude: | |
71 | pats = _normalize(exclude, 'glob', root, cwd, auditor) |
|
73 | kindpats = _normalize(exclude, 'glob', root, cwd, auditor) | |
72 | self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)') |
|
74 | self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)') | |
73 | if exact: |
|
75 | if exact: | |
74 | if isinstance(patterns, list): |
|
76 | if isinstance(patterns, list): | |
75 | self._files = patterns |
|
77 | self._files = patterns | |
@@ -77,10 +79,10 b' class match(object):' | |||||
77 | self._files = list(patterns) |
|
79 | self._files = list(patterns) | |
78 | pm = self.exact |
|
80 | pm = self.exact | |
79 | elif patterns: |
|
81 | elif patterns: | |
80 | pats = _normalize(patterns, default, root, cwd, auditor) |
|
82 | kindpats = _normalize(patterns, default, root, cwd, auditor) | |
81 | self._files = _roots(pats) |
|
83 | self._files = _roots(kindpats) | |
82 | self._anypats = self._anypats or _anypats(pats) |
|
84 | self._anypats = self._anypats or _anypats(kindpats) | |
83 | self.patternspat, pm = _buildmatch(ctx, pats, '$') |
|
85 | self.patternspat, pm = _buildmatch(ctx, kindpats, '$') | |
84 |
|
86 | |||
85 | if patterns or exact: |
|
87 | if patterns or exact: | |
86 | if include: |
|
88 | if include: | |
@@ -114,28 +116,45 b' class match(object):' | |||||
114 | def __iter__(self): |
|
116 | def __iter__(self): | |
115 | for f in self._files: |
|
117 | for f in self._files: | |
116 | yield f |
|
118 | yield f | |
|
119 | ||||
|
120 | # Callbacks related to how the matcher is used by dirstate.walk. | |||
|
121 | # Subscribers to these events must monkeypatch the matcher object. | |||
117 | def bad(self, f, msg): |
|
122 | def bad(self, f, msg): | |
118 |
''' |
|
123 | '''Callback from dirstate.walk for each explicit file that can't be | |
119 | found/accessed, with an error message |
|
124 | found/accessed, with an error message.''' | |
120 | ''' |
|
|||
121 | pass |
|
125 | pass | |
122 | # If this is set, it will be called when an explicitly listed directory is |
|
126 | ||
123 | # visited. |
|
127 | # If an explicitdir is set, it will be called when an explicitly listed | |
|
128 | # directory is visited. | |||
124 | explicitdir = None |
|
129 | explicitdir = None | |
125 | # If this is set, it will be called when a directory discovered by recursive |
|
130 | ||
126 | # traversal is visited. |
|
131 | # If an traversedir is set, it will be called when a directory discovered | |
|
132 | # by recursive traversal is visited. | |||
127 | traversedir = None |
|
133 | traversedir = None | |
128 | def missing(self, f): |
|
134 | ||
129 | pass |
|
|||
130 | def exact(self, f): |
|
|||
131 | return f in self._fmap |
|
|||
132 | def rel(self, f): |
|
135 | def rel(self, f): | |
|
136 | '''Convert repo path back to path that is relative to cwd of matcher.''' | |||
133 | return util.pathto(self._root, self._cwd, f) |
|
137 | return util.pathto(self._root, self._cwd, f) | |
|
138 | ||||
134 | def files(self): |
|
139 | def files(self): | |
|
140 | '''Explicitly listed files or patterns or roots: | |||
|
141 | if no patterns or .always(): empty list, | |||
|
142 | if exact: list exact files, | |||
|
143 | if not .anypats(): list all files and dirs, | |||
|
144 | else: optimal roots''' | |||
135 | return self._files |
|
145 | return self._files | |
|
146 | ||||
|
147 | def exact(self, f): | |||
|
148 | '''Returns True if f is in .files().''' | |||
|
149 | return f in self._fmap | |||
|
150 | ||||
136 | def anypats(self): |
|
151 | def anypats(self): | |
|
152 | '''Matcher uses patterns or include/exclude.''' | |||
137 | return self._anypats |
|
153 | return self._anypats | |
|
154 | ||||
138 | def always(self): |
|
155 | def always(self): | |
|
156 | '''Matcher will match everything and .files() will be empty | |||
|
157 | - optimization might be possible and necessary.''' | |||
139 | return self._always |
|
158 | return self._always | |
140 |
|
159 | |||
141 | class exact(match): |
|
160 | class exact(match): | |
@@ -191,21 +210,36 b' class narrowmatcher(match):' | |||||
191 | def bad(self, f, msg): |
|
210 | def bad(self, f, msg): | |
192 | self._matcher.bad(self._path + "/" + f, msg) |
|
211 | self._matcher.bad(self._path + "/" + f, msg) | |
193 |
|
212 | |||
194 | def patkind(pat): |
|
213 | def patkind(pattern, default=None): | |
195 | return _patsplit(pat, None)[0] |
|
214 | '''If pattern is 'kind:pat' with a known kind, return kind.''' | |
|
215 | return _patsplit(pattern, default)[0] | |||
196 |
|
216 | |||
197 | def _patsplit(pat, default): |
|
217 | def _patsplit(pattern, default): | |
198 |
"""Split a string into |
|
218 | """Split a string into the optional pattern kind prefix and the actual | |
199 |
|
|
219 | pattern.""" | |
200 | if ':' in pat: |
|
220 | if ':' in pattern: | |
201 |
kind, |
|
221 | kind, pat = pattern.split(':', 1) | |
202 | if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', |
|
222 | if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre', | |
203 | 'listfile', 'listfile0', 'set'): |
|
223 | 'listfile', 'listfile0', 'set'): | |
204 |
return kind, |
|
224 | return kind, pat | |
205 | return default, pat |
|
225 | return default, pattern | |
206 |
|
226 | |||
207 | def _globre(pat): |
|
227 | def _globre(pat): | |
208 |
|
|
228 | r'''Convert an extended glob string to a regexp string. | |
|
229 | ||||
|
230 | >>> print _globre(r'?') | |||
|
231 | . | |||
|
232 | >>> print _globre(r'*') | |||
|
233 | [^/]* | |||
|
234 | >>> print _globre(r'**') | |||
|
235 | .* | |||
|
236 | >>> print _globre(r'[a*?!^][^b][!c]') | |||
|
237 | [a*?!^][\^b][^c] | |||
|
238 | >>> print _globre(r'{a,b}') | |||
|
239 | (?:a|b) | |||
|
240 | >>> print _globre(r'.\*\?') | |||
|
241 | \.\*\? | |||
|
242 | ''' | |||
209 | i, n = 0, len(pat) |
|
243 | i, n = 0, len(pat) | |
210 | res = '' |
|
244 | res = '' | |
211 | group = 0 |
|
245 | group = 0 | |
@@ -260,99 +294,115 b' def _globre(pat):' | |||||
260 | res += escape(c) |
|
294 | res += escape(c) | |
261 | return res |
|
295 | return res | |
262 |
|
296 | |||
263 |
def _regex(kind, |
|
297 | def _regex(kind, pat, globsuffix): | |
264 |
''' |
|
298 | '''Convert a (normalized) pattern of any kind into a regular expression. | |
265 | if not name: |
|
299 | globsuffix is appended to the regexp of globs.''' | |
|
300 | if not pat: | |||
266 | return '' |
|
301 | return '' | |
267 | if kind == 're': |
|
302 | if kind == 're': | |
268 |
return |
|
303 | return pat | |
269 |
|
|
304 | if kind == 'path': | |
270 |
return '^' + re.escape( |
|
305 | return '^' + re.escape(pat) + '(?:/|$)' | |
271 |
|
|
306 | if kind == 'relglob': | |
272 |
return '(?:|.*/)' + _globre( |
|
307 | return '(?:|.*/)' + _globre(pat) + globsuffix | |
273 |
|
|
308 | if kind == 'relpath': | |
274 |
return re.escape( |
|
309 | return re.escape(pat) + '(?:/|$)' | |
275 |
|
|
310 | if kind == 'relre': | |
276 |
if |
|
311 | if pat.startswith('^'): | |
277 |
return |
|
312 | return pat | |
278 |
return '.*' + |
|
313 | return '.*' + pat | |
279 |
return _globre( |
|
314 | return _globre(pat) + globsuffix | |
280 |
|
315 | |||
281 |
def _buildmatch(ctx, pats, |
|
316 | def _buildmatch(ctx, kindpats, globsuffix): | |
282 | fset, pats = _expandsets(pats, ctx) |
|
317 | '''Return regexp string and a matcher function for kindpats. | |
283 | if not pats: |
|
318 | globsuffix is appended to the regexp of globs.''' | |
|
319 | fset, kindpats = _expandsets(kindpats, ctx) | |||
|
320 | if not kindpats: | |||
284 | return "", fset.__contains__ |
|
321 | return "", fset.__contains__ | |
285 |
|
322 | |||
286 |
|
|
323 | regex, mf = _buildregexmatch(kindpats, globsuffix) | |
287 | if fset: |
|
324 | if fset: | |
288 |
return |
|
325 | return regex, lambda f: f in fset or mf(f) | |
289 |
return |
|
326 | return regex, mf | |
290 |
|
327 | |||
291 |
def _buildregexmatch(pats, |
|
328 | def _buildregexmatch(kindpats, globsuffix): | |
292 |
""" |
|
329 | """Build a match function from a list of kinds and kindpats, | |
|
330 | return regexp string and a matcher function.""" | |||
293 | try: |
|
331 | try: | |
294 |
|
|
332 | regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix) | |
295 | if len(pat) > 20000: |
|
333 | for (k, p) in kindpats]) | |
|
334 | if len(regex) > 20000: | |||
296 | raise OverflowError |
|
335 | raise OverflowError | |
297 |
return |
|
336 | return regex, _rematcher(regex) | |
298 | except OverflowError: |
|
337 | except OverflowError: | |
299 | # We're using a Python with a tiny regex engine and we |
|
338 | # We're using a Python with a tiny regex engine and we | |
300 | # made it explode, so we'll divide the pattern list in two |
|
339 | # made it explode, so we'll divide the pattern list in two | |
301 | # until it works |
|
340 | # until it works | |
302 | l = len(pats) |
|
341 | l = len(kindpats) | |
303 | if l < 2: |
|
342 | if l < 2: | |
304 | raise |
|
343 | raise | |
305 |
|
|
344 | regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix) | |
306 |
|
|
345 | regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix) | |
307 | return pat, lambda s: a(s) or b(s) |
|
346 | return pat, lambda s: a(s) or b(s) | |
308 | except re.error: |
|
347 | except re.error: | |
309 | for k, p in pats: |
|
348 | for k, p in kindpats: | |
310 | try: |
|
349 | try: | |
311 |
_rematcher('(?:%s)' % _regex(k, p, |
|
350 | _rematcher('(?:%s)' % _regex(k, p, globsuffix)) | |
312 | except re.error: |
|
351 | except re.error: | |
313 | raise util.Abort(_("invalid pattern (%s): %s") % (k, p)) |
|
352 | raise util.Abort(_("invalid pattern (%s): %s") % (k, p)) | |
314 | raise util.Abort(_("invalid pattern")) |
|
353 | raise util.Abort(_("invalid pattern")) | |
315 |
|
354 | |||
316 |
def _normalize( |
|
355 | def _normalize(patterns, default, root, cwd, auditor): | |
317 | pats = [] |
|
356 | '''Convert 'kind:pat' from the patterns list to tuples with kind and | |
318 | for kind, name in [_patsplit(p, default) for p in names]: |
|
357 | normalized and rooted patterns and with listfiles expanded.''' | |
|
358 | kindpats = [] | |||
|
359 | for kind, pat in [_patsplit(p, default) for p in patterns]: | |||
319 | if kind in ('glob', 'relpath'): |
|
360 | if kind in ('glob', 'relpath'): | |
320 |
|
|
361 | pat = pathutil.canonpath(root, cwd, pat, auditor) | |
321 | elif kind in ('relglob', 'path'): |
|
362 | elif kind in ('relglob', 'path'): | |
322 |
|
|
363 | pat = util.normpath(pat) | |
323 | elif kind in ('listfile', 'listfile0'): |
|
364 | elif kind in ('listfile', 'listfile0'): | |
324 | try: |
|
365 | try: | |
325 |
files = util.readfile( |
|
366 | files = util.readfile(pat) | |
326 | if kind == 'listfile0': |
|
367 | if kind == 'listfile0': | |
327 | files = files.split('\0') |
|
368 | files = files.split('\0') | |
328 | else: |
|
369 | else: | |
329 | files = files.splitlines() |
|
370 | files = files.splitlines() | |
330 | files = [f for f in files if f] |
|
371 | files = [f for f in files if f] | |
331 | except EnvironmentError: |
|
372 | except EnvironmentError: | |
332 |
raise util.Abort(_("unable to read file list (%s)") % |
|
373 | raise util.Abort(_("unable to read file list (%s)") % pat) | |
333 | pats += _normalize(files, default, root, cwd, auditor) |
|
374 | kindpats += _normalize(files, default, root, cwd, auditor) | |
334 | continue |
|
375 | continue | |
|
376 | # else: re or relre - which cannot be normalized | |||
|
377 | kindpats.append((kind, pat)) | |||
|
378 | return kindpats | |||
|
379 | ||||
|
380 | def _roots(kindpats): | |||
|
381 | '''return roots and exact explicitly listed files from patterns | |||
335 |
|
|
382 | ||
336 | pats.append((kind, name)) |
|
383 | >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')]) | |
337 | return pats |
|
384 | ['g', 'g', '.'] | |
338 |
|
385 | >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')]) | ||
339 | def _roots(patterns): |
|
386 | ['r', 'p/p', '.'] | |
|
387 | >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')]) | |||
|
388 | ['.', '.', '.'] | |||
|
389 | ''' | |||
340 | r = [] |
|
390 | r = [] | |
341 |
for kind, |
|
391 | for kind, pat in kindpats: | |
342 | if kind == 'glob': # find the non-glob prefix |
|
392 | if kind == 'glob': # find the non-glob prefix | |
343 | root = [] |
|
393 | root = [] | |
344 |
for p in |
|
394 | for p in pat.split('/'): | |
345 | if '[' in p or '{' in p or '*' in p or '?' in p: |
|
395 | if '[' in p or '{' in p or '*' in p or '?' in p: | |
346 | break |
|
396 | break | |
347 | root.append(p) |
|
397 | root.append(p) | |
348 | r.append('/'.join(root) or '.') |
|
398 | r.append('/'.join(root) or '.') | |
349 | elif kind in ('relpath', 'path'): |
|
399 | elif kind in ('relpath', 'path'): | |
350 |
r.append( |
|
400 | r.append(pat or '.') | |
351 | else: # relglob, re, relre |
|
401 | else: # relglob, re, relre | |
352 | r.append('.') |
|
402 | r.append('.') | |
353 | return r |
|
403 | return r | |
354 |
|
404 | |||
355 |
def _anypats(pat |
|
405 | def _anypats(kindpats): | |
356 |
for kind, |
|
406 | for kind, pat in kindpats: | |
357 | if kind in ('glob', 're', 'relglob', 'relre', 'set'): |
|
407 | if kind in ('glob', 're', 'relglob', 'relre', 'set'): | |
358 | return True |
|
408 | return True |
@@ -34,7 +34,7 b' class mergestate(object):' | |||||
34 | [type][length][content] |
|
34 | [type][length][content] | |
35 |
|
35 | |||
36 | Type is a single character, length is a 4 bytes integer, content is an |
|
36 | Type is a single character, length is a 4 bytes integer, content is an | |
37 |
arbitrary suites of bytes of leng |
|
37 | arbitrary suites of bytes of length `length`. | |
38 |
|
38 | |||
39 | Type should be a letter. Capital letter are mandatory record, Mercurial |
|
39 | Type should be a letter. Capital letter are mandatory record, Mercurial | |
40 | should abort if they are unknown. lower case record can be safely ignored. |
|
40 | should abort if they are unknown. lower case record can be safely ignored. | |
@@ -47,10 +47,12 b' class mergestate(object):' | |||||
47 | ''' |
|
47 | ''' | |
48 | statepathv1 = "merge/state" |
|
48 | statepathv1 = "merge/state" | |
49 | statepathv2 = "merge/state2" |
|
49 | statepathv2 = "merge/state2" | |
|
50 | ||||
50 | def __init__(self, repo): |
|
51 | def __init__(self, repo): | |
51 | self._repo = repo |
|
52 | self._repo = repo | |
52 | self._dirty = False |
|
53 | self._dirty = False | |
53 | self._read() |
|
54 | self._read() | |
|
55 | ||||
54 | def reset(self, node=None, other=None): |
|
56 | def reset(self, node=None, other=None): | |
55 | self._state = {} |
|
57 | self._state = {} | |
56 | if node: |
|
58 | if node: | |
@@ -58,7 +60,13 b' class mergestate(object):' | |||||
58 | self._other = other |
|
60 | self._other = other | |
59 | shutil.rmtree(self._repo.join("merge"), True) |
|
61 | shutil.rmtree(self._repo.join("merge"), True) | |
60 | self._dirty = False |
|
62 | self._dirty = False | |
|
63 | ||||
61 | def _read(self): |
|
64 | def _read(self): | |
|
65 | """Analyse each record content to restore a serialized state from disk | |||
|
66 | ||||
|
67 | This function process "record" entry produced by the de-serialization | |||
|
68 | of on disk file. | |||
|
69 | """ | |||
62 | self._state = {} |
|
70 | self._state = {} | |
63 | records = self._readrecords() |
|
71 | records = self._readrecords() | |
64 | for rtype, record in records: |
|
72 | for rtype, record in records: | |
@@ -73,7 +81,21 b' class mergestate(object):' | |||||
73 | raise util.Abort(_('unsupported merge state record: %s') |
|
81 | raise util.Abort(_('unsupported merge state record: %s') | |
74 | % rtype) |
|
82 | % rtype) | |
75 | self._dirty = False |
|
83 | self._dirty = False | |
|
84 | ||||
76 | def _readrecords(self): |
|
85 | def _readrecords(self): | |
|
86 | """Read merge state from disk and return a list of record (TYPE, data) | |||
|
87 | ||||
|
88 | We read data from both v1 and v2 files and decide which one to use. | |||
|
89 | ||||
|
90 | V1 has been used by version prior to 2.9.1 and contains less data than | |||
|
91 | v2. We read both versions and check if no data in v2 contradicts | |||
|
92 | v1. If there is not contradiction we can safely assume that both v1 | |||
|
93 | and v2 were written at the same time and use the extract data in v2. If | |||
|
94 | there is contradiction we ignore v2 content as we assume an old version | |||
|
95 | of Mercurial has overwritten the mergestate file and left an old v2 | |||
|
96 | file around. | |||
|
97 | ||||
|
98 | returns list of record [(TYPE, data), ...]""" | |||
77 | v1records = self._readrecordsv1() |
|
99 | v1records = self._readrecordsv1() | |
78 | v2records = self._readrecordsv2() |
|
100 | v2records = self._readrecordsv2() | |
79 | oldv2 = set() # old format version of v2 record |
|
101 | oldv2 = set() # old format version of v2 record | |
@@ -101,7 +123,15 b' class mergestate(object):' | |||||
101 | return v1records |
|
123 | return v1records | |
102 | else: |
|
124 | else: | |
103 | return v2records |
|
125 | return v2records | |
|
126 | ||||
104 | def _readrecordsv1(self): |
|
127 | def _readrecordsv1(self): | |
|
128 | """read on disk merge state for version 1 file | |||
|
129 | ||||
|
130 | returns list of record [(TYPE, data), ...] | |||
|
131 | ||||
|
132 | Note: the "F" data from this file are one entry short | |||
|
133 | (no "other file node" entry) | |||
|
134 | """ | |||
105 | records = [] |
|
135 | records = [] | |
106 | try: |
|
136 | try: | |
107 | f = self._repo.opener(self.statepathv1) |
|
137 | f = self._repo.opener(self.statepathv1) | |
@@ -115,7 +145,12 b' class mergestate(object):' | |||||
115 | if err.errno != errno.ENOENT: |
|
145 | if err.errno != errno.ENOENT: | |
116 | raise |
|
146 | raise | |
117 | return records |
|
147 | return records | |
|
148 | ||||
118 | def _readrecordsv2(self): |
|
149 | def _readrecordsv2(self): | |
|
150 | """read on disk merge state for version 2 file | |||
|
151 | ||||
|
152 | returns list of record [(TYPE, data), ...] | |||
|
153 | """ | |||
119 | records = [] |
|
154 | records = [] | |
120 | try: |
|
155 | try: | |
121 | f = self._repo.opener(self.statepathv2) |
|
156 | f = self._repo.opener(self.statepathv2) | |
@@ -125,17 +160,19 b' class mergestate(object):' | |||||
125 | while off < end: |
|
160 | while off < end: | |
126 | rtype = data[off] |
|
161 | rtype = data[off] | |
127 | off += 1 |
|
162 | off += 1 | |
128 |
leng |
|
163 | length = _unpack('>I', data[off:(off + 4)])[0] | |
129 | off += 4 |
|
164 | off += 4 | |
130 |
record = data[off:(off + leng |
|
165 | record = data[off:(off + length)] | |
131 |
off += leng |
|
166 | off += length | |
132 | records.append((rtype, record)) |
|
167 | records.append((rtype, record)) | |
133 | f.close() |
|
168 | f.close() | |
134 | except IOError, err: |
|
169 | except IOError, err: | |
135 | if err.errno != errno.ENOENT: |
|
170 | if err.errno != errno.ENOENT: | |
136 | raise |
|
171 | raise | |
137 | return records |
|
172 | return records | |
|
173 | ||||
138 | def commit(self): |
|
174 | def commit(self): | |
|
175 | """Write current state on disk (if necessary)""" | |||
139 | if self._dirty: |
|
176 | if self._dirty: | |
140 | records = [] |
|
177 | records = [] | |
141 | records.append(("L", hex(self._local))) |
|
178 | records.append(("L", hex(self._local))) | |
@@ -144,10 +181,14 b' class mergestate(object):' | |||||
144 | records.append(("F", "\0".join([d] + v))) |
|
181 | records.append(("F", "\0".join([d] + v))) | |
145 | self._writerecords(records) |
|
182 | self._writerecords(records) | |
146 | self._dirty = False |
|
183 | self._dirty = False | |
|
184 | ||||
147 | def _writerecords(self, records): |
|
185 | def _writerecords(self, records): | |
|
186 | """Write current state on disk (both v1 and v2)""" | |||
148 | self._writerecordsv1(records) |
|
187 | self._writerecordsv1(records) | |
149 | self._writerecordsv2(records) |
|
188 | self._writerecordsv2(records) | |
|
189 | ||||
150 | def _writerecordsv1(self, records): |
|
190 | def _writerecordsv1(self, records): | |
|
191 | """Write current state on disk in a version 1 file""" | |||
151 | f = self._repo.opener(self.statepathv1, "w") |
|
192 | f = self._repo.opener(self.statepathv1, "w") | |
152 | irecords = iter(records) |
|
193 | irecords = iter(records) | |
153 | lrecords = irecords.next() |
|
194 | lrecords = irecords.next() | |
@@ -157,14 +198,25 b' class mergestate(object):' | |||||
157 | if rtype == "F": |
|
198 | if rtype == "F": | |
158 | f.write("%s\n" % _droponode(data)) |
|
199 | f.write("%s\n" % _droponode(data)) | |
159 | f.close() |
|
200 | f.close() | |
|
201 | ||||
160 | def _writerecordsv2(self, records): |
|
202 | def _writerecordsv2(self, records): | |
|
203 | """Write current state on disk in a version 2 file""" | |||
161 | f = self._repo.opener(self.statepathv2, "w") |
|
204 | f = self._repo.opener(self.statepathv2, "w") | |
162 | for key, data in records: |
|
205 | for key, data in records: | |
163 | assert len(key) == 1 |
|
206 | assert len(key) == 1 | |
164 | format = ">sI%is" % len(data) |
|
207 | format = ">sI%is" % len(data) | |
165 | f.write(_pack(format, key, len(data), data)) |
|
208 | f.write(_pack(format, key, len(data), data)) | |
166 | f.close() |
|
209 | f.close() | |
|
210 | ||||
167 | def add(self, fcl, fco, fca, fd): |
|
211 | def add(self, fcl, fco, fca, fd): | |
|
212 | """add a new (potentially?) conflicting file the merge state | |||
|
213 | fcl: file context for local, | |||
|
214 | fco: file context for remote, | |||
|
215 | fca: file context for ancestors, | |||
|
216 | fd: file path of the resulting merge. | |||
|
217 | ||||
|
218 | note: also write the local version to the `.hg/merge` directory. | |||
|
219 | """ | |||
168 | hash = util.sha1(fcl.path()).hexdigest() |
|
220 | hash = util.sha1(fcl.path()).hexdigest() | |
169 | self._repo.opener.write("merge/" + hash, fcl.data()) |
|
221 | self._repo.opener.write("merge/" + hash, fcl.data()) | |
170 | self._state[fd] = ['u', hash, fcl.path(), |
|
222 | self._state[fd] = ['u', hash, fcl.path(), | |
@@ -172,21 +224,28 b' class mergestate(object):' | |||||
172 | fco.path(), hex(fco.filenode()), |
|
224 | fco.path(), hex(fco.filenode()), | |
173 | fcl.flags()] |
|
225 | fcl.flags()] | |
174 | self._dirty = True |
|
226 | self._dirty = True | |
|
227 | ||||
175 | def __contains__(self, dfile): |
|
228 | def __contains__(self, dfile): | |
176 | return dfile in self._state |
|
229 | return dfile in self._state | |
|
230 | ||||
177 | def __getitem__(self, dfile): |
|
231 | def __getitem__(self, dfile): | |
178 | return self._state[dfile][0] |
|
232 | return self._state[dfile][0] | |
|
233 | ||||
179 | def __iter__(self): |
|
234 | def __iter__(self): | |
180 | l = self._state.keys() |
|
235 | l = self._state.keys() | |
181 | l.sort() |
|
236 | l.sort() | |
182 | for f in l: |
|
237 | for f in l: | |
183 | yield f |
|
238 | yield f | |
|
239 | ||||
184 | def files(self): |
|
240 | def files(self): | |
185 | return self._state.keys() |
|
241 | return self._state.keys() | |
|
242 | ||||
186 | def mark(self, dfile, state): |
|
243 | def mark(self, dfile, state): | |
187 | self._state[dfile][0] = state |
|
244 | self._state[dfile][0] = state | |
188 | self._dirty = True |
|
245 | self._dirty = True | |
|
246 | ||||
189 | def resolve(self, dfile, wctx): |
|
247 | def resolve(self, dfile, wctx): | |
|
248 | """rerun merge process for file path `dfile`""" | |||
190 | if self[dfile] == 'r': |
|
249 | if self[dfile] == 'r': | |
191 | return 0 |
|
250 | return 0 | |
192 | stateentry = self._state[dfile] |
|
251 | stateentry = self._state[dfile] | |
@@ -212,6 +271,7 b' class mergestate(object):' | |||||
212 | if r is None: |
|
271 | if r is None: | |
213 | # no real conflict |
|
272 | # no real conflict | |
214 | del self._state[dfile] |
|
273 | del self._state[dfile] | |
|
274 | self._dirty = True | |||
215 | elif not r: |
|
275 | elif not r: | |
216 | self.mark(dfile, 'r') |
|
276 | self.mark(dfile, 'r') | |
217 | return r |
|
277 | return r | |
@@ -263,7 +323,7 b' def _forgetremoved(wctx, mctx, branchmer' | |||||
263 |
|
323 | |||
264 | return actions |
|
324 | return actions | |
265 |
|
325 | |||
266 |
def _checkcollision(repo, wmf, actions |
|
326 | def _checkcollision(repo, wmf, actions): | |
267 | # build provisional merged manifest up |
|
327 | # build provisional merged manifest up | |
268 | pmmf = set(wmf) |
|
328 | pmmf = set(wmf) | |
269 |
|
329 | |||
@@ -274,20 +334,23 b' def _checkcollision(repo, wmf, actions, ' | |||||
274 | def nop(f, args): |
|
334 | def nop(f, args): | |
275 | pass |
|
335 | pass | |
276 |
|
336 | |||
277 | def renameop(f, args): |
|
337 | def renamemoveop(f, args): | |
278 |
f2 |
|
338 | f2, flags = args | |
279 | if f: |
|
339 | pmmf.discard(f2) | |
280 |
|
|
340 | pmmf.add(f) | |
281 | pmmf.add(fd) |
|
341 | def renamegetop(f, args): | |
|
342 | f2, flags = args | |||
|
343 | pmmf.add(f) | |||
282 | def mergeop(f, args): |
|
344 | def mergeop(f, args): | |
283 |
f2, f |
|
345 | f1, f2, fa, move, anc = args | |
284 | if move: |
|
346 | if move: | |
285 | pmmf.discard(f) |
|
347 | pmmf.discard(f1) | |
286 |
pmmf.add(f |
|
348 | pmmf.add(f) | |
287 |
|
349 | |||
288 | opmap = { |
|
350 | opmap = { | |
289 | "a": addop, |
|
351 | "a": addop, | |
290 | "d": renameop, |
|
352 | "dm": renamemoveop, | |
|
353 | "dg": renamegetop, | |||
291 | "dr": nop, |
|
354 | "dr": nop, | |
292 | "e": nop, |
|
355 | "e": nop, | |
293 | "f": addop, # untracked file should be kept in working directory |
|
356 | "f": addop, # untracked file should be kept in working directory | |
@@ -295,21 +358,14 b' def _checkcollision(repo, wmf, actions, ' | |||||
295 | "m": mergeop, |
|
358 | "m": mergeop, | |
296 | "r": removeop, |
|
359 | "r": removeop, | |
297 | "rd": nop, |
|
360 | "rd": nop, | |
|
361 | "cd": addop, | |||
|
362 | "dc": addop, | |||
298 | } |
|
363 | } | |
299 | for f, m, args, msg in actions: |
|
364 | for f, m, args, msg in actions: | |
300 | op = opmap.get(m) |
|
365 | op = opmap.get(m) | |
301 | assert op, m |
|
366 | assert op, m | |
302 | op(f, args) |
|
367 | op(f, args) | |
303 |
|
368 | |||
304 | opmap = { |
|
|||
305 | "cd": addop, |
|
|||
306 | "dc": addop, |
|
|||
307 | } |
|
|||
308 | for f, m in prompts: |
|
|||
309 | op = opmap.get(m) |
|
|||
310 | assert op, m |
|
|||
311 | op(f, None) |
|
|||
312 |
|
||||
313 | # check case-folding collision in provisional merged manifest |
|
369 | # check case-folding collision in provisional merged manifest | |
314 | foldmap = {} |
|
370 | foldmap = {} | |
315 | for f in sorted(pmmf): |
|
371 | for f in sorted(pmmf): | |
@@ -320,7 +376,7 b' def _checkcollision(repo, wmf, actions, ' | |||||
320 | foldmap[fold] = f |
|
376 | foldmap[fold] = f | |
321 |
|
377 | |||
322 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial, |
|
378 | def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial, | |
323 |
acceptremote |
|
379 | acceptremote, followcopies): | |
324 | """ |
|
380 | """ | |
325 | Merge p1 and p2 with ancestor pa and generate merge action list |
|
381 | Merge p1 and p2 with ancestor pa and generate merge action list | |
326 |
|
382 | |||
@@ -329,19 +385,8 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
329 | acceptremote = accept the incoming changes without prompting |
|
385 | acceptremote = accept the incoming changes without prompting | |
330 | """ |
|
386 | """ | |
331 |
|
387 | |||
332 | overwrite = force and not branchmerge |
|
|||
333 | actions, copy, movewithdir = [], {}, {} |
|
388 | actions, copy, movewithdir = [], {}, {} | |
334 |
|
389 | |||
335 | followcopies = False |
|
|||
336 | if overwrite: |
|
|||
337 | pa = wctx |
|
|||
338 | elif pa == p2: # backwards |
|
|||
339 | pa = wctx.p1() |
|
|||
340 | elif not branchmerge and not wctx.dirty(missing=True): |
|
|||
341 | pass |
|
|||
342 | elif pa and repo.ui.configbool("merge", "followcopies", True): |
|
|||
343 | followcopies = True |
|
|||
344 |
|
||||
345 | # manifests fetched in order are going to be faster, so prime the caches |
|
390 | # manifests fetched in order are going to be faster, so prime the caches | |
346 | [x.manifest() for x in |
|
391 | [x.manifest() for x in | |
347 | sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())] |
|
392 | sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())] | |
@@ -370,7 +415,7 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
370 | m1['.hgsubstate'] += "+" |
|
415 | m1['.hgsubstate'] += "+" | |
371 | break |
|
416 | break | |
372 |
|
417 | |||
373 |
aborts |
|
418 | aborts = [] | |
374 | # Compare manifests |
|
419 | # Compare manifests | |
375 | fdiff = dicthelpers.diff(m1, m2) |
|
420 | fdiff = dicthelpers.diff(m1, m2) | |
376 | flagsdiff = m1.flagsdiff(m2) |
|
421 | flagsdiff = m1.flagsdiff(m2) | |
@@ -395,11 +440,16 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
395 | if partial and not partial(f): |
|
440 | if partial and not partial(f): | |
396 | continue |
|
441 | continue | |
397 | if n1 and n2: |
|
442 | if n1 and n2: | |
398 |
f |
|
443 | fa = f | |
399 | nol = 'l' not in fl1 + fl2 + fla |
|
|||
400 | a = ma.get(f, nullid) |
|
444 | a = ma.get(f, nullid) | |
|
445 | if a == nullid: | |||
|
446 | fa = copy.get(f, f) | |||
|
447 | # Note: f as default is wrong - we can't really make a 3-way | |||
|
448 | # merge without an ancestor file. | |||
|
449 | fla = ma.flags(fa) | |||
|
450 | nol = 'l' not in fl1 + fl2 + fla | |||
401 | if n2 == a and fl2 == fla: |
|
451 | if n2 == a and fl2 == fla: | |
402 | pass # remote unchanged - keep local |
|
452 | actions.append((f, "k", (), "keep")) # remote unchanged | |
403 | elif n1 == a and fl1 == fla: # local unchanged - use remote |
|
453 | elif n1 == a and fl1 == fla: # local unchanged - use remote | |
404 | if n1 == n2: # optimization: keep local content |
|
454 | if n1 == n2: # optimization: keep local content | |
405 | actions.append((f, "e", (fl2,), "update permissions")) |
|
455 | actions.append((f, "e", (fl2,), "update permissions")) | |
@@ -410,36 +460,40 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
410 | elif nol and n1 == a: # local only changed 'x' |
|
460 | elif nol and n1 == a: # local only changed 'x' | |
411 | actions.append((f, "g", (fl1,), "remote is newer")) |
|
461 | actions.append((f, "g", (fl1,), "remote is newer")) | |
412 | else: # both changed something |
|
462 | else: # both changed something | |
413 |
actions.append((f, "m", (f, f, False), |
|
463 | actions.append((f, "m", (f, f, fa, False, pa.node()), | |
|
464 | "versions differ")) | |||
414 | elif f in copied: # files we'll deal with on m2 side |
|
465 | elif f in copied: # files we'll deal with on m2 side | |
415 | pass |
|
466 | pass | |
416 | elif n1 and f in movewithdir: # directory rename |
|
467 | elif n1 and f in movewithdir: # directory rename, move local | |
417 | f2 = movewithdir[f] |
|
468 | f2 = movewithdir[f] | |
418 |
actions.append((f, "d", ( |
|
469 | actions.append((f2, "dm", (f, fl1), | |
419 |
"remote |
|
470 | "remote directory rename - move from " + f)) | |
420 | elif n1 and f in copy: |
|
471 | elif n1 and f in copy: | |
421 | f2 = copy[f] |
|
472 | f2 = copy[f] | |
422 | actions.append((f, "m", (f2, f, False), |
|
473 | actions.append((f, "m", (f, f2, f2, False, pa.node()), | |
423 |
"local copied/moved |
|
474 | "local copied/moved from " + f2)) | |
424 | elif n1 and f in ma: # clean, a different, no remote |
|
475 | elif n1 and f in ma: # clean, a different, no remote | |
425 | if n1 != ma[f]: |
|
476 | if n1 != ma[f]: | |
426 | prompts.append((f, "cd")) # prompt changed/deleted |
|
477 | if acceptremote: | |
|
478 | actions.append((f, "r", None, "remote delete")) | |||
|
479 | else: | |||
|
480 | actions.append((f, "cd", None, "prompt changed/deleted")) | |||
427 | elif n1[20:] == "a": # added, no remote |
|
481 | elif n1[20:] == "a": # added, no remote | |
428 | actions.append((f, "f", None, "remote deleted")) |
|
482 | actions.append((f, "f", None, "remote deleted")) | |
429 | else: |
|
483 | else: | |
430 | actions.append((f, "r", None, "other deleted")) |
|
484 | actions.append((f, "r", None, "other deleted")) | |
431 | elif n2 and f in movewithdir: |
|
485 | elif n2 and f in movewithdir: | |
432 | f2 = movewithdir[f] |
|
486 | f2 = movewithdir[f] | |
433 |
actions.append(( |
|
487 | actions.append((f2, "dg", (f, fl2), | |
434 |
"local |
|
488 | "local directory rename - get from " + f)) | |
435 | elif n2 and f in copy: |
|
489 | elif n2 and f in copy: | |
436 | f2 = copy[f] |
|
490 | f2 = copy[f] | |
437 | if f2 in m2: |
|
491 | if f2 in m2: | |
438 |
actions.append((f |
|
492 | actions.append((f, "m", (f2, f, f2, False, pa.node()), | |
439 |
"remote copied |
|
493 | "remote copied from " + f2)) | |
440 | else: |
|
494 | else: | |
441 |
actions.append((f |
|
495 | actions.append((f, "m", (f2, f, f2, True, pa.node()), | |
442 |
"remote moved |
|
496 | "remote moved from " + f2)) | |
443 | elif n2 and f not in ma: |
|
497 | elif n2 and f not in ma: | |
444 | # local unknown, remote created: the logic is described by the |
|
498 | # local unknown, remote created: the logic is described by the | |
445 | # following table: |
|
499 | # following table: | |
@@ -458,7 +512,8 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
458 | else: |
|
512 | else: | |
459 | different = _checkunknownfile(repo, wctx, p2, f) |
|
513 | different = _checkunknownfile(repo, wctx, p2, f) | |
460 | if force and branchmerge and different: |
|
514 | if force and branchmerge and different: | |
461 | actions.append((f, "m", (f, f, False), |
|
515 | # FIXME: This is wrong - f is not in ma ... | |
|
516 | actions.append((f, "m", (f, f, f, False, pa.node()), | |||
462 | "remote differs from untracked local")) |
|
517 | "remote differs from untracked local")) | |
463 | elif not force and different: |
|
518 | elif not force and different: | |
464 | aborts.append((f, "ud")) |
|
519 | aborts.append((f, "ud")) | |
@@ -470,7 +525,12 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
470 | aborts.append((f, "ud")) |
|
525 | aborts.append((f, "ud")) | |
471 | else: |
|
526 | else: | |
472 | # if different: old untracked f may be overwritten and lost |
|
527 | # if different: old untracked f may be overwritten and lost | |
473 | prompts.append((f, "dc")) # prompt deleted/changed |
|
528 | if acceptremote: | |
|
529 | actions.append((f, "g", (m2.flags(f),), | |||
|
530 | "remote recreating")) | |||
|
531 | else: | |||
|
532 | actions.append((f, "dc", (m2.flags(f),), | |||
|
533 | "prompt deleted/changed")) | |||
474 |
|
534 | |||
475 | for f, m in sorted(aborts): |
|
535 | for f, m in sorted(aborts): | |
476 | if m == "ud": |
|
536 | if m == "ud": | |
@@ -484,30 +544,10 b' def manifestmerge(repo, wctx, p2, pa, br' | |||||
484 | # check collision between files only in p2 for clean update |
|
544 | # check collision between files only in p2 for clean update | |
485 | if (not branchmerge and |
|
545 | if (not branchmerge and | |
486 | (force or not wctx.dirty(missing=True, branch=False))): |
|
546 | (force or not wctx.dirty(missing=True, branch=False))): | |
487 |
_checkcollision(repo, m2, [] |
|
547 | _checkcollision(repo, m2, []) | |
488 | else: |
|
548 | else: | |
489 |
_checkcollision(repo, m1, actions |
|
549 | _checkcollision(repo, m1, actions) | |
490 |
|
550 | |||
491 | for f, m in sorted(prompts): |
|
|||
492 | if m == "cd": |
|
|||
493 | if acceptremote: |
|
|||
494 | actions.append((f, "r", None, "remote delete")) |
|
|||
495 | elif repo.ui.promptchoice( |
|
|||
496 | _("local changed %s which remote deleted\n" |
|
|||
497 | "use (c)hanged version or (d)elete?" |
|
|||
498 | "$$ &Changed $$ &Delete") % f, 0): |
|
|||
499 | actions.append((f, "r", None, "prompt delete")) |
|
|||
500 | else: |
|
|||
501 | actions.append((f, "a", None, "prompt keep")) |
|
|||
502 | elif m == "dc": |
|
|||
503 | if acceptremote: |
|
|||
504 | actions.append((f, "g", (m2.flags(f),), "remote recreating")) |
|
|||
505 | elif repo.ui.promptchoice( |
|
|||
506 | _("remote changed %s which local deleted\n" |
|
|||
507 | "use (c)hanged version or leave (d)eleted?" |
|
|||
508 | "$$ &Changed $$ &Deleted") % f, 0) == 0: |
|
|||
509 | actions.append((f, "g", (m2.flags(f),), "prompt recreating")) |
|
|||
510 | else: assert False, m |
|
|||
511 | return actions |
|
551 | return actions | |
512 |
|
552 | |||
513 | def actionkey(a): |
|
553 | def actionkey(a): | |
@@ -549,12 +589,11 b' def getremove(repo, mctx, overwrite, arg' | |||||
549 | if i > 0: |
|
589 | if i > 0: | |
550 | yield i, f |
|
590 | yield i, f | |
551 |
|
591 | |||
552 |
def applyupdates(repo, actions, wctx, mctx, |
|
592 | def applyupdates(repo, actions, wctx, mctx, overwrite): | |
553 | """apply the merge action list to the working directory |
|
593 | """apply the merge action list to the working directory | |
554 |
|
594 | |||
555 | wctx is the working copy context |
|
595 | wctx is the working copy context | |
556 | mctx is the context to be merged into the working copy |
|
596 | mctx is the context to be merged into the working copy | |
557 | actx is the context of the common ancestor |
|
|||
558 |
|
597 | |||
559 | Return a tuple of counts (updated, merged, removed, unresolved) that |
|
598 | Return a tuple of counts (updated, merged, removed, unresolved) that | |
560 | describes how many files were affected by the update. |
|
599 | describes how many files were affected by the update. | |
@@ -571,24 +610,20 b' def applyupdates(repo, actions, wctx, mc' | |||||
571 | f, m, args, msg = a |
|
610 | f, m, args, msg = a | |
572 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) |
|
611 | repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) | |
573 | if m == "m": # merge |
|
612 | if m == "m": # merge | |
574 |
f2, f |
|
613 | f1, f2, fa, move, anc = args | |
575 |
if f |
|
614 | if f == '.hgsubstate': # merged internally | |
576 | continue |
|
615 | continue | |
577 |
repo.ui.debug(" preserving %s for resolve of %s\n" % (f, f |
|
616 | repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f)) | |
578 | fcl = wctx[f] |
|
617 | fcl = wctx[f1] | |
579 | fco = mctx[f2] |
|
618 | fco = mctx[f2] | |
580 | if mctx == actx: # backwards, use working dir parent as ancestor |
|
619 | actx = repo[anc] | |
581 |
|
|
620 | if fa in actx: | |
582 |
|
|
621 | fca = actx[fa] | |
583 | else: |
|
|||
584 | fca = repo.filectx(f, fileid=nullrev) |
|
|||
585 | else: |
|
622 | else: | |
586 | fca = fcl.ancestor(fco, actx) |
|
623 | fca = repo.filectx(f1, fileid=nullrev) | |
587 |
|
|
624 | ms.add(fcl, fco, fca, f) | |
588 | fca = repo.filectx(f, fileid=nullrev) |
|
625 | if f1 != f and move: | |
589 | ms.add(fcl, fco, fca, fd) |
|
626 | moves.append(f1) | |
590 | if f != fd and move: |
|
|||
591 | moves.append(f) |
|
|||
592 |
|
627 | |||
593 | audit = repo.wopener.audit |
|
628 | audit = repo.wopener.audit | |
594 |
|
629 | |||
@@ -599,13 +634,13 b' def applyupdates(repo, actions, wctx, mc' | |||||
599 | audit(f) |
|
634 | audit(f) | |
600 | util.unlinkpath(repo.wjoin(f)) |
|
635 | util.unlinkpath(repo.wjoin(f)) | |
601 |
|
636 | |||
602 | numupdates = len(actions) |
|
637 | numupdates = len([a for a in actions if a[1] != 'k']) | |
603 | workeractions = [a for a in actions if a[1] in 'gr'] |
|
638 | workeractions = [a for a in actions if a[1] in 'gr'] | |
604 | updateactions = [a for a in workeractions if a[1] == 'g'] |
|
639 | updateactions = [a for a in workeractions if a[1] == 'g'] | |
605 | updated = len(updateactions) |
|
640 | updated = len(updateactions) | |
606 | removeactions = [a for a in workeractions if a[1] == 'r'] |
|
641 | removeactions = [a for a in workeractions if a[1] == 'r'] | |
607 | removed = len(removeactions) |
|
642 | removed = len(removeactions) | |
608 | actions = [a for a in actions if a[1] not in 'gr'] |
|
643 | actions = [a for a in actions if a[1] not in 'grk'] | |
609 |
|
644 | |||
610 | hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate'] |
|
645 | hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate'] | |
611 | if hgsub and hgsub[0] == 'r': |
|
646 | if hgsub and hgsub[0] == 'r': | |
@@ -636,13 +671,13 b' def applyupdates(repo, actions, wctx, mc' | |||||
636 | f, m, args, msg = a |
|
671 | f, m, args, msg = a | |
637 | progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files) |
|
672 | progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files) | |
638 | if m == "m": # merge |
|
673 | if m == "m": # merge | |
639 |
f2, f |
|
674 | f1, f2, fa, move, anc = args | |
640 |
if f |
|
675 | if f == '.hgsubstate': # subrepo states need updating | |
641 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), |
|
676 | subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), | |
642 | overwrite) |
|
677 | overwrite) | |
643 | continue |
|
678 | continue | |
644 |
audit(f |
|
679 | audit(f) | |
645 |
r = ms.resolve(f |
|
680 | r = ms.resolve(f, wctx) | |
646 | if r is not None and r > 0: |
|
681 | if r is not None and r > 0: | |
647 | unresolved += 1 |
|
682 | unresolved += 1 | |
648 | else: |
|
683 | else: | |
@@ -650,16 +685,17 b' def applyupdates(repo, actions, wctx, mc' | |||||
650 | updated += 1 |
|
685 | updated += 1 | |
651 | else: |
|
686 | else: | |
652 | merged += 1 |
|
687 | merged += 1 | |
653 | elif m == "d": # directory rename |
|
688 | elif m == "dm": # directory rename, move local | |
654 |
f |
|
689 | f0, flags = args | |
655 | if f: |
|
690 | repo.ui.note(_("moving %s to %s\n") % (f0, f)) | |
656 | repo.ui.note(_("moving %s to %s\n") % (f, fd)) |
|
691 | audit(f) | |
657 | audit(fd) |
|
692 | repo.wwrite(f, wctx.filectx(f0).data(), flags) | |
658 | repo.wwrite(fd, wctx.filectx(f).data(), flags) |
|
693 | util.unlinkpath(repo.wjoin(f0)) | |
659 | util.unlinkpath(repo.wjoin(f)) |
|
694 | updated += 1 | |
660 | if f2: |
|
695 | elif m == "dg": # local directory rename, get | |
661 | repo.ui.note(_("getting %s to %s\n") % (f2, fd)) |
|
696 | f0, flags = args | |
662 | repo.wwrite(fd, mctx.filectx(f2).data(), flags) |
|
697 | repo.ui.note(_("getting %s to %s\n") % (f0, f)) | |
|
698 | repo.wwrite(f, mctx.filectx(f0).data(), flags) | |||
663 | updated += 1 |
|
699 | updated += 1 | |
664 | elif m == "dr": # divergent renames |
|
700 | elif m == "dr": # divergent renames | |
665 | fl, = args |
|
701 | fl, = args | |
@@ -683,17 +719,104 b' def applyupdates(repo, actions, wctx, mc' | |||||
683 |
|
719 | |||
684 | return updated, merged, removed, unresolved |
|
720 | return updated, merged, removed, unresolved | |
685 |
|
721 | |||
686 |
def calculateupdates(repo, |
|
722 | def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial, | |
687 |
acceptremote |
|
723 | acceptremote, followcopies): | |
688 |
"Calculate the actions needed to merge mctx into |
|
724 | "Calculate the actions needed to merge mctx into wctx using ancestors" | |
689 | actions = [] |
|
725 | ||
690 | actions += manifestmerge(repo, tctx, mctx, |
|
726 | if len(ancestors) == 1: # default | |
691 | ancestor, |
|
727 | actions = manifestmerge(repo, wctx, mctx, ancestors[0], | |
692 | branchmerge, force, |
|
728 | branchmerge, force, | |
693 | partial, acceptremote) |
|
729 | partial, acceptremote, followcopies) | |
694 | if tctx.rev() is None: |
|
730 | ||
695 | actions += _forgetremoved(tctx, mctx, branchmerge) |
|
731 | else: # only when merge.preferancestor=* - experimentalish code | |
696 | return actions |
|
732 | # Call for bids | |
|
733 | fbids = {} # mapping filename to list af action bids | |||
|
734 | for ancestor in ancestors: | |||
|
735 | repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor) | |||
|
736 | actions = manifestmerge(repo, wctx, mctx, ancestor, | |||
|
737 | branchmerge, force, | |||
|
738 | partial, acceptremote, followcopies) | |||
|
739 | for a in sorted(actions): | |||
|
740 | repo.ui.debug(' %s: %s\n' % (a[0], a[1])) | |||
|
741 | f = a[0] | |||
|
742 | if f in fbids: | |||
|
743 | fbids[f].append(a) | |||
|
744 | else: | |||
|
745 | fbids[f] = [a] | |||
|
746 | ||||
|
747 | # Pick the best bid for each file | |||
|
748 | repo.ui.note(_('\nauction for merging merge bids\n')) | |||
|
749 | actions = [] | |||
|
750 | for f, bidsl in sorted(fbids.items()): | |||
|
751 | # Consensus? | |||
|
752 | a0 = bidsl[0] | |||
|
753 | if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1 | |||
|
754 | repo.ui.note(" %s: consensus for %s\n" % (f, a0[1])) | |||
|
755 | actions.append(a0) | |||
|
756 | continue | |||
|
757 | # Group bids by kind of action | |||
|
758 | bids = {} | |||
|
759 | for a in bidsl: | |||
|
760 | m = a[1] | |||
|
761 | if m in bids: | |||
|
762 | bids[m].append(a) | |||
|
763 | else: | |||
|
764 | bids[m] = [a] | |||
|
765 | # If keep is an option, just do it. | |||
|
766 | if "k" in bids: | |||
|
767 | repo.ui.note(" %s: picking 'keep' action\n" % f) | |||
|
768 | actions.append(bids["k"][0]) | |||
|
769 | continue | |||
|
770 | # If all gets agree [how could they not?], just do it. | |||
|
771 | if "g" in bids: | |||
|
772 | ga0 = bids["g"][0] | |||
|
773 | if util.all(a == ga0 for a in bids["g"][1:]): | |||
|
774 | repo.ui.note(" %s: picking 'get' action\n" % f) | |||
|
775 | actions.append(ga0) | |||
|
776 | continue | |||
|
777 | # TODO: Consider other simple actions such as mode changes | |||
|
778 | # Handle inefficient democrazy. | |||
|
779 | repo.ui.note(_(' %s: multiple merge bids:\n') % (f, m)) | |||
|
780 | for a in bidsl: | |||
|
781 | repo.ui.note(' %s: %s\n' % (f, a[1])) | |||
|
782 | # Pick random action. TODO: Instead, prompt user when resolving | |||
|
783 | a0 = bidsl[0] | |||
|
784 | repo.ui.warn(_(' %s: ambiguous merge - picked %s action)\n') % | |||
|
785 | (f, a0[1])) | |||
|
786 | actions.append(a0) | |||
|
787 | continue | |||
|
788 | repo.ui.note(_('end of auction\n\n')) | |||
|
789 | ||||
|
790 | # Filter out prompts. | |||
|
791 | newactions, prompts = [], [] | |||
|
792 | for a in actions: | |||
|
793 | if a[1] in ("cd", "dc"): | |||
|
794 | prompts.append(a) | |||
|
795 | else: | |||
|
796 | newactions.append(a) | |||
|
797 | # Prompt and create actions. TODO: Move this towards resolve phase. | |||
|
798 | for f, m, args, msg in sorted(prompts): | |||
|
799 | if m == "cd": | |||
|
800 | if repo.ui.promptchoice( | |||
|
801 | _("local changed %s which remote deleted\n" | |||
|
802 | "use (c)hanged version or (d)elete?" | |||
|
803 | "$$ &Changed $$ &Delete") % f, 0): | |||
|
804 | newactions.append((f, "r", None, "prompt delete")) | |||
|
805 | else: | |||
|
806 | newactions.append((f, "a", None, "prompt keep")) | |||
|
807 | elif m == "dc": | |||
|
808 | flags, = args | |||
|
809 | if repo.ui.promptchoice( | |||
|
810 | _("remote changed %s which local deleted\n" | |||
|
811 | "use (c)hanged version or leave (d)eleted?" | |||
|
812 | "$$ &Changed $$ &Deleted") % f, 0) == 0: | |||
|
813 | newactions.append((f, "g", (flags,), "prompt recreating")) | |||
|
814 | else: assert False, m | |||
|
815 | ||||
|
816 | if wctx.rev() is None: | |||
|
817 | newactions += _forgetremoved(wctx, mctx, branchmerge) | |||
|
818 | ||||
|
819 | return newactions | |||
697 |
|
820 | |||
698 | def recordupdates(repo, actions, branchmerge): |
|
821 | def recordupdates(repo, actions, branchmerge): | |
699 | "record merge actions to the dirstate" |
|
822 | "record merge actions to the dirstate" | |
@@ -712,50 +835,55 b' def recordupdates(repo, actions, branchm' | |||||
712 | repo.dirstate.drop(f) |
|
835 | repo.dirstate.drop(f) | |
713 | elif m == "e": # exec change |
|
836 | elif m == "e": # exec change | |
714 | repo.dirstate.normallookup(f) |
|
837 | repo.dirstate.normallookup(f) | |
|
838 | elif m == "k": # keep | |||
|
839 | pass | |||
715 | elif m == "g": # get |
|
840 | elif m == "g": # get | |
716 | if branchmerge: |
|
841 | if branchmerge: | |
717 | repo.dirstate.otherparent(f) |
|
842 | repo.dirstate.otherparent(f) | |
718 | else: |
|
843 | else: | |
719 | repo.dirstate.normal(f) |
|
844 | repo.dirstate.normal(f) | |
720 | elif m == "m": # merge |
|
845 | elif m == "m": # merge | |
721 |
f2, f |
|
846 | f1, f2, fa, move, anc = args | |
722 | if branchmerge: |
|
847 | if branchmerge: | |
723 | # We've done a branch merge, mark this file as merged |
|
848 | # We've done a branch merge, mark this file as merged | |
724 | # so that we properly record the merger later |
|
849 | # so that we properly record the merger later | |
725 |
repo.dirstate.merge(f |
|
850 | repo.dirstate.merge(f) | |
726 | if f != f2: # copy/rename |
|
851 | if f1 != f2: # copy/rename | |
727 | if move: |
|
852 | if move: | |
728 | repo.dirstate.remove(f) |
|
853 | repo.dirstate.remove(f1) | |
729 |
if f != f |
|
854 | if f1 != f: | |
730 |
repo.dirstate.copy(f, f |
|
855 | repo.dirstate.copy(f1, f) | |
731 | else: |
|
856 | else: | |
732 |
repo.dirstate.copy(f2, f |
|
857 | repo.dirstate.copy(f2, f) | |
733 | else: |
|
858 | else: | |
734 | # We've update-merged a locally modified file, so |
|
859 | # We've update-merged a locally modified file, so | |
735 | # we set the dirstate to emulate a normal checkout |
|
860 | # we set the dirstate to emulate a normal checkout | |
736 | # of that file some time in the past. Thus our |
|
861 | # of that file some time in the past. Thus our | |
737 | # merge will appear as a normal local file |
|
862 | # merge will appear as a normal local file | |
738 | # modification. |
|
863 | # modification. | |
739 |
if f2 == f |
|
864 | if f2 == f: # file not locally copied/moved | |
740 |
repo.dirstate.normallookup(f |
|
865 | repo.dirstate.normallookup(f) | |
741 | if move: |
|
866 | if move: | |
742 | repo.dirstate.drop(f) |
|
867 | repo.dirstate.drop(f1) | |
743 | elif m == "d": # directory rename |
|
868 | elif m == "dm": # directory rename, move local | |
744 |
f |
|
869 | f0, flag = args | |
745 |
if |
|
870 | if f0 not in repo.dirstate: | |
746 | # untracked file moved |
|
871 | # untracked file moved | |
747 | continue |
|
872 | continue | |
748 | if branchmerge: |
|
873 | if branchmerge: | |
749 |
repo.dirstate.add(f |
|
874 | repo.dirstate.add(f) | |
750 | if f: |
|
875 | repo.dirstate.remove(f0) | |
751 |
|
|
876 | repo.dirstate.copy(f0, f) | |
752 | repo.dirstate.copy(f, fd) |
|
|||
753 | if f2: |
|
|||
754 | repo.dirstate.copy(f2, fd) |
|
|||
755 | else: |
|
877 | else: | |
756 |
repo.dirstate.normal(f |
|
878 | repo.dirstate.normal(f) | |
757 |
|
|
879 | repo.dirstate.drop(f0) | |
758 | repo.dirstate.drop(f) |
|
880 | elif m == "dg": # directory rename, get | |
|
881 | f0, flag = args | |||
|
882 | if branchmerge: | |||
|
883 | repo.dirstate.add(f) | |||
|
884 | repo.dirstate.copy(f0, f) | |||
|
885 | else: | |||
|
886 | repo.dirstate.normal(f) | |||
759 |
|
887 | |||
760 | def update(repo, node, branchmerge, force, partial, ancestor=None, |
|
888 | def update(repo, node, branchmerge, force, partial, ancestor=None, | |
761 | mergeancestor=False): |
|
889 | mergeancestor=False): | |
@@ -808,9 +936,9 b' def update(repo, node, branchmerge, forc' | |||||
808 | wc = repo[None] |
|
936 | wc = repo[None] | |
809 | pl = wc.parents() |
|
937 | pl = wc.parents() | |
810 | p1 = pl[0] |
|
938 | p1 = pl[0] | |
811 | pa = None |
|
939 | pas = [None] | |
812 | if ancestor: |
|
940 | if ancestor: | |
813 | pa = repo[ancestor] |
|
941 | pas = [repo[ancestor]] | |
814 |
|
942 | |||
815 | if node is None: |
|
943 | if node is None: | |
816 | # Here is where we should consider bookmarks, divergent bookmarks, |
|
944 | # Here is where we should consider bookmarks, divergent bookmarks, | |
@@ -849,13 +977,17 b' def update(repo, node, branchmerge, forc' | |||||
849 | # get the max revision for the given successors set, |
|
977 | # get the max revision for the given successors set, | |
850 | # i.e. the 'tip' of a set |
|
978 | # i.e. the 'tip' of a set | |
851 | node = repo.revs("max(%ln)", successors)[0] |
|
979 | node = repo.revs("max(%ln)", successors)[0] | |
852 | pa = p1 |
|
980 | pas = [p1] | |
853 |
|
981 | |||
854 | overwrite = force and not branchmerge |
|
982 | overwrite = force and not branchmerge | |
855 |
|
983 | |||
856 | p2 = repo[node] |
|
984 | p2 = repo[node] | |
857 | if pa is None: |
|
985 | if pas[0] is None: | |
858 | pa = p1.ancestor(p2) |
|
986 | if repo.ui.config("merge", "preferancestor") == '*': | |
|
987 | cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node()) | |||
|
988 | pas = [repo[anc] for anc in (sorted(cahs) or [nullid])] | |||
|
989 | else: | |||
|
990 | pas = [p1.ancestor(p2)] | |||
859 |
|
991 | |||
860 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) |
|
992 | fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) | |
861 |
|
993 | |||
@@ -863,10 +995,10 b' def update(repo, node, branchmerge, forc' | |||||
863 | if not overwrite and len(pl) > 1: |
|
995 | if not overwrite and len(pl) > 1: | |
864 | raise util.Abort(_("outstanding uncommitted merges")) |
|
996 | raise util.Abort(_("outstanding uncommitted merges")) | |
865 | if branchmerge: |
|
997 | if branchmerge: | |
866 | if pa == p2: |
|
998 | if pas == [p2]: | |
867 | raise util.Abort(_("merging with a working directory ancestor" |
|
999 | raise util.Abort(_("merging with a working directory ancestor" | |
868 | " has no effect")) |
|
1000 | " has no effect")) | |
869 | elif pa == p1: |
|
1001 | elif pas == [p1]: | |
870 | if not mergeancestor and p1.branch() == p2.branch(): |
|
1002 | if not mergeancestor and p1.branch() == p2.branch(): | |
871 | raise util.Abort(_("nothing to merge"), |
|
1003 | raise util.Abort(_("nothing to merge"), | |
872 | hint=_("use 'hg update' " |
|
1004 | hint=_("use 'hg update' " | |
@@ -886,7 +1018,7 b' def update(repo, node, branchmerge, forc' | |||||
886 | repo.hook('update', parent1=xp2, parent2='', error=0) |
|
1018 | repo.hook('update', parent1=xp2, parent2='', error=0) | |
887 | return 0, 0, 0, 0 |
|
1019 | return 0, 0, 0, 0 | |
888 |
|
1020 | |||
889 | if pa not in (p1, p2): # nonlinear |
|
1021 | if pas not in ([p1], [p2]): # nonlinear | |
890 | dirty = wc.dirty(missing=True) |
|
1022 | dirty = wc.dirty(missing=True) | |
891 | if dirty or onode is None: |
|
1023 | if dirty or onode is None: | |
892 | # Branching is a bit strange to ensure we do the minimal |
|
1024 | # Branching is a bit strange to ensure we do the minimal | |
@@ -894,7 +1026,7 b' def update(repo, node, branchmerge, forc' | |||||
894 | foreground = obsolete.foreground(repo, [p1.node()]) |
|
1026 | foreground = obsolete.foreground(repo, [p1.node()]) | |
895 | # note: the <node> variable contains a random identifier |
|
1027 | # note: the <node> variable contains a random identifier | |
896 | if repo[node].node() in foreground: |
|
1028 | if repo[node].node() in foreground: | |
897 | pa = p1 # allow updating to successors |
|
1029 | pas = [p1] # allow updating to successors | |
898 | elif dirty: |
|
1030 | elif dirty: | |
899 | msg = _("uncommitted changes") |
|
1031 | msg = _("uncommitted changes") | |
900 | if onode is None: |
|
1032 | if onode is None: | |
@@ -910,11 +1042,21 b' def update(repo, node, branchmerge, forc' | |||||
910 | raise util.Abort(msg, hint=hint) |
|
1042 | raise util.Abort(msg, hint=hint) | |
911 | else: |
|
1043 | else: | |
912 | # Allow jumping branches if clean and specific rev given |
|
1044 | # Allow jumping branches if clean and specific rev given | |
913 | pa = p1 |
|
1045 | pas = [p1] | |
|
1046 | ||||
|
1047 | followcopies = False | |||
|
1048 | if overwrite: | |||
|
1049 | pas = [wc] | |||
|
1050 | elif pas == [p2]: # backwards | |||
|
1051 | pas = [wc.p1()] | |||
|
1052 | elif not branchmerge and not wc.dirty(missing=True): | |||
|
1053 | pass | |||
|
1054 | elif pas[0] and repo.ui.configbool("merge", "followcopies", True): | |||
|
1055 | followcopies = True | |||
914 |
|
1056 | |||
915 | ### calculate phase |
|
1057 | ### calculate phase | |
916 | actions = calculateupdates(repo, wc, p2, pa, |
|
1058 | actions = calculateupdates(repo, wc, p2, pas, branchmerge, force, | |
917 |
|
|
1059 | partial, mergeancestor, followcopies) | |
918 |
|
1060 | |||
919 | ### apply phase |
|
1061 | ### apply phase | |
920 | if not branchmerge: # just jump to the new rev |
|
1062 | if not branchmerge: # just jump to the new rev | |
@@ -924,7 +1066,7 b' def update(repo, node, branchmerge, forc' | |||||
924 | # note that we're in the middle of an update |
|
1066 | # note that we're in the middle of an update | |
925 | repo.vfs.write('updatestate', p2.hex()) |
|
1067 | repo.vfs.write('updatestate', p2.hex()) | |
926 |
|
1068 | |||
927 |
stats = applyupdates(repo, actions, wc, p2, |
|
1069 | stats = applyupdates(repo, actions, wc, p2, overwrite) | |
928 |
|
1070 | |||
929 | if not partial: |
|
1071 | if not partial: | |
930 | repo.setparents(fp1, fp2) |
|
1072 | repo.setparents(fp1, fp2) |
@@ -73,7 +73,7 b' def findblocks(text):' | |||||
73 | if lines: |
|
73 | if lines: | |
74 | indent = min((len(l) - len(l.lstrip())) for l in lines) |
|
74 | indent = min((len(l) - len(l.lstrip())) for l in lines) | |
75 | lines = [l[indent:] for l in lines] |
|
75 | lines = [l[indent:] for l in lines] | |
76 |
blocks.append( |
|
76 | blocks.append({'indent': indent, 'lines': lines}) | |
77 | return blocks |
|
77 | return blocks | |
78 |
|
78 | |||
79 | def findliteralblocks(blocks): |
|
79 | def findliteralblocks(blocks): | |
@@ -109,7 +109,7 b' def findliteralblocks(blocks):' | |||||
109 | elif len(blocks[i]['lines']) == 1 and \ |
|
109 | elif len(blocks[i]['lines']) == 1 and \ | |
110 | blocks[i]['lines'][0].lstrip(' ').startswith('.. ') and \ |
|
110 | blocks[i]['lines'][0].lstrip(' ').startswith('.. ') and \ | |
111 | blocks[i]['lines'][0].find(' ', 3) == -1: |
|
111 | blocks[i]['lines'][0].find(' ', 3) == -1: | |
112 |
# directive on its o |
|
112 | # directive on its own line, not a literal block | |
113 | i += 1 |
|
113 | i += 1 | |
114 | continue |
|
114 | continue | |
115 | else: |
|
115 | else: | |
@@ -174,8 +174,8 b' def splitparagraphs(blocks):' | |||||
174 | items = [] |
|
174 | items = [] | |
175 | for j, line in enumerate(lines): |
|
175 | for j, line in enumerate(lines): | |
176 | if match(lines, j, itemre, singleline): |
|
176 | if match(lines, j, itemre, singleline): | |
177 |
items.append( |
|
177 | items.append({'type': type, 'lines': [], | |
178 |
|
|
178 | 'indent': blocks[i]['indent']}) | |
179 | items[-1]['lines'].append(line) |
|
179 | items[-1]['lines'].append(line) | |
180 | blocks[i:i + 1] = items |
|
180 | blocks[i:i + 1] = items | |
181 | break |
|
181 | break | |
@@ -382,10 +382,10 b' def addmargins(blocks):' | |||||
382 | blocks[i]['type'] in ('bullet', 'option', 'field')): |
|
382 | blocks[i]['type'] in ('bullet', 'option', 'field')): | |
383 | i += 1 |
|
383 | i += 1 | |
384 | elif not blocks[i - 1]['lines']: |
|
384 | elif not blocks[i - 1]['lines']: | |
385 |
# no lines in previous block, do not sep |
|
385 | # no lines in previous block, do not separate | |
386 | i += 1 |
|
386 | i += 1 | |
387 | else: |
|
387 | else: | |
388 |
blocks.insert(i, |
|
388 | blocks.insert(i, {'lines': [''], 'indent': 0, 'type': 'margin'}) | |
389 | i += 2 |
|
389 | i += 2 | |
390 | return blocks |
|
390 | return blocks | |
391 |
|
391 | |||
@@ -697,6 +697,10 b' def maketable(data, indent=0, header=Fal' | |||||
697 | for row in data: |
|
697 | for row in data: | |
698 | l = [] |
|
698 | l = [] | |
699 | for w, v in zip(widths, row): |
|
699 | for w, v in zip(widths, row): | |
|
700 | if '\n' in v: | |||
|
701 | # only remove line breaks and indentation, long lines are | |||
|
702 | # handled by the next tool | |||
|
703 | v = ' '.join(e.lstrip() for e in v.split('\n')) | |||
700 | pad = ' ' * (w - encoding.colwidth(v)) |
|
704 | pad = ' ' * (w - encoding.colwidth(v)) | |
701 | l.append(v + pad) |
|
705 | l.append(v + pad) | |
702 | out.append(indent + ' '.join(l) + "\n") |
|
706 | out.append(indent + ' '.join(l) + "\n") |
@@ -176,7 +176,7 b' def encodemeta(meta):' | |||||
176 | if ':' in key or '\0' in key: |
|
176 | if ':' in key or '\0' in key: | |
177 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
177 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
178 | if '\0' in value: |
|
178 | if '\0' in value: | |
179 |
raise ValueError("':' |
|
179 | raise ValueError("':' is forbidden in metadata value'") | |
180 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
180 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
181 |
|
181 | |||
182 | def decodemeta(data): |
|
182 | def decodemeta(data): | |
@@ -247,6 +247,9 b' class obsstore(object):' | |||||
247 | def __iter__(self): |
|
247 | def __iter__(self): | |
248 | return iter(self._all) |
|
248 | return iter(self._all) | |
249 |
|
249 | |||
|
250 | def __len__(self): | |||
|
251 | return len(self._all) | |||
|
252 | ||||
250 | def __nonzero__(self): |
|
253 | def __nonzero__(self): | |
251 | return bool(self._all) |
|
254 | return bool(self._all) | |
252 |
|
255 | |||
@@ -256,6 +259,12 b' class obsstore(object):' | |||||
256 | * ensuring it is hashable |
|
259 | * ensuring it is hashable | |
257 | * check mandatory metadata |
|
260 | * check mandatory metadata | |
258 | * encode metadata |
|
261 | * encode metadata | |
|
262 | ||||
|
263 | If you are a human writing code creating marker you want to use the | |||
|
264 | `createmarkers` function in this module instead. | |||
|
265 | ||||
|
266 | return True if a new marker have been added, False if the markers | |||
|
267 | already existed (no op). | |||
259 | """ |
|
268 | """ | |
260 | if metadata is None: |
|
269 | if metadata is None: | |
261 | metadata = {} |
|
270 | metadata = {} | |
@@ -267,7 +276,7 b' class obsstore(object):' | |||||
267 | if len(succ) != 20: |
|
276 | if len(succ) != 20: | |
268 | raise ValueError(succ) |
|
277 | raise ValueError(succ) | |
269 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
|
278 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) | |
270 | self.add(transaction, [marker]) |
|
279 | return bool(self.add(transaction, [marker])) | |
271 |
|
280 | |||
272 | def add(self, transaction, markers): |
|
281 | def add(self, transaction, markers): | |
273 | """Add new markers to the store |
|
282 | """Add new markers to the store | |
@@ -343,14 +352,15 b' def _encodeonemarker(marker):' | |||||
343 | # - the base85 encoding |
|
352 | # - the base85 encoding | |
344 | _maxpayload = 5300 |
|
353 | _maxpayload = 5300 | |
345 |
|
354 | |||
346 | def listmarkers(repo): |
|
355 | def _pushkeyescape(markers): | |
347 | """List markers over pushkey""" |
|
356 | """encode markers into a dict suitable for pushkey exchange | |
348 | if not repo.obsstore: |
|
357 | ||
349 | return {} |
|
358 | - binary data is base85 encoded | |
|
359 | - split in chunks smaller than 5300 bytes""" | |||
350 | keys = {} |
|
360 | keys = {} | |
351 | parts = [] |
|
361 | parts = [] | |
352 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
362 | currentlen = _maxpayload * 2 # ensure we create a new part | |
353 |
for marker in |
|
363 | for marker in markers: | |
354 | nextdata = _encodeonemarker(marker) |
|
364 | nextdata = _encodeonemarker(marker) | |
355 | if (len(nextdata) + currentlen > _maxpayload): |
|
365 | if (len(nextdata) + currentlen > _maxpayload): | |
356 | currentpart = [] |
|
366 | currentpart = [] | |
@@ -363,13 +373,19 b' def listmarkers(repo):' | |||||
363 | keys['dump%i' % idx] = base85.b85encode(data) |
|
373 | keys['dump%i' % idx] = base85.b85encode(data) | |
364 | return keys |
|
374 | return keys | |
365 |
|
375 | |||
|
376 | def listmarkers(repo): | |||
|
377 | """List markers over pushkey""" | |||
|
378 | if not repo.obsstore: | |||
|
379 | return {} | |||
|
380 | return _pushkeyescape(repo.obsstore) | |||
|
381 | ||||
366 | def pushmarker(repo, key, old, new): |
|
382 | def pushmarker(repo, key, old, new): | |
367 | """Push markers over pushkey""" |
|
383 | """Push markers over pushkey""" | |
368 | if not key.startswith('dump'): |
|
384 | if not key.startswith('dump'): | |
369 | repo.ui.warn(_('unknown key: %r') % key) |
|
385 | repo.ui.warn(_('unknown key: %r') % key) | |
370 | return 0 |
|
386 | return 0 | |
371 | if old: |
|
387 | if old: | |
372 | repo.ui.warn(_('unexpected old value') % key) |
|
388 | repo.ui.warn(_('unexpected old value for %r') % key) | |
373 | return 0 |
|
389 | return 0 | |
374 | data = base85.b85decode(new) |
|
390 | data = base85.b85decode(new) | |
375 | lock = repo.lock() |
|
391 | lock = repo.lock() | |
@@ -384,43 +400,6 b' def pushmarker(repo, key, old, new):' | |||||
384 | finally: |
|
400 | finally: | |
385 | lock.release() |
|
401 | lock.release() | |
386 |
|
402 | |||
387 | def syncpush(repo, remote): |
|
|||
388 | """utility function to push obsolete markers to a remote |
|
|||
389 |
|
||||
390 | Exist mostly to allow overriding for experimentation purpose""" |
|
|||
391 | if (_enabled and repo.obsstore and |
|
|||
392 | 'obsolete' in remote.listkeys('namespaces')): |
|
|||
393 | rslts = [] |
|
|||
394 | remotedata = repo.listkeys('obsolete') |
|
|||
395 | for key in sorted(remotedata, reverse=True): |
|
|||
396 | # reverse sort to ensure we end with dump0 |
|
|||
397 | data = remotedata[key] |
|
|||
398 | rslts.append(remote.pushkey('obsolete', key, '', data)) |
|
|||
399 | if [r for r in rslts if not r]: |
|
|||
400 | msg = _('failed to push some obsolete markers!\n') |
|
|||
401 | repo.ui.warn(msg) |
|
|||
402 |
|
||||
403 | def syncpull(repo, remote, gettransaction): |
|
|||
404 | """utility function to pull obsolete markers from a remote |
|
|||
405 |
|
||||
406 | The `gettransaction` is function that return the pull transaction, creating |
|
|||
407 | one if necessary. We return the transaction to inform the calling code that |
|
|||
408 | a new transaction have been created (when applicable). |
|
|||
409 |
|
||||
410 | Exists mostly to allow overriding for experimentation purpose""" |
|
|||
411 | tr = None |
|
|||
412 | if _enabled: |
|
|||
413 | repo.ui.debug('fetching remote obsolete markers\n') |
|
|||
414 | remoteobs = remote.listkeys('obsolete') |
|
|||
415 | if 'dump0' in remoteobs: |
|
|||
416 | tr = gettransaction() |
|
|||
417 | for key in sorted(remoteobs, reverse=True): |
|
|||
418 | if key.startswith('dump'): |
|
|||
419 | data = base85.b85decode(remoteobs[key]) |
|
|||
420 | repo.obsstore.mergemarkers(tr, data) |
|
|||
421 | repo.invalidatevolatilesets() |
|
|||
422 | return tr |
|
|||
423 |
|
||||
424 | def allmarkers(repo): |
|
403 | def allmarkers(repo): | |
425 | """all obsolete markers known in a repository""" |
|
404 | """all obsolete markers known in a repository""" | |
426 | for markerdata in repo.obsstore: |
|
405 | for markerdata in repo.obsstore: | |
@@ -673,7 +652,7 b' def successorssets(repo, initialnode, ca' | |||||
673 | # Within a marker, a successor may have divergent successors |
|
652 | # Within a marker, a successor may have divergent successors | |
674 | # sets. In such a case, the marker will contribute multiple |
|
653 | # sets. In such a case, the marker will contribute multiple | |
675 | # divergent successors sets. If multiple successors have |
|
654 | # divergent successors sets. If multiple successors have | |
676 |
# divergent successors sets, a |
|
655 | # divergent successors sets, a Cartesian product is used. | |
677 | # |
|
656 | # | |
678 | # At the end we post-process successors sets to remove |
|
657 | # At the end we post-process successors sets to remove | |
679 | # duplicated entry and successors set that are strict subset of |
|
658 | # duplicated entry and successors set that are strict subset of | |
@@ -800,7 +779,7 b' def _computeextinctset(repo):' | |||||
800 | def _computebumpedset(repo): |
|
779 | def _computebumpedset(repo): | |
801 | """the set of revs trying to obsolete public revisions""" |
|
780 | """the set of revs trying to obsolete public revisions""" | |
802 | bumped = set() |
|
781 | bumped = set() | |
803 |
# util |
|
782 | # util function (avoid attribute lookup in the loop) | |
804 | phase = repo._phasecache.phase # would be faster to grab the full list |
|
783 | phase = repo._phasecache.phase # would be faster to grab the full list | |
805 | public = phases.public |
|
784 | public = phases.public | |
806 | cl = repo.changelog |
|
785 | cl = repo.changelog | |
@@ -845,8 +824,10 b' def _computedivergentset(repo):' | |||||
845 | def createmarkers(repo, relations, flag=0, metadata=None): |
|
824 | def createmarkers(repo, relations, flag=0, metadata=None): | |
846 | """Add obsolete markers between changesets in a repo |
|
825 | """Add obsolete markers between changesets in a repo | |
847 |
|
826 | |||
848 |
<relations> must be an iterable of (<old>, (<new>, ...) |
|
827 | <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}]) | |
849 | `old` and `news` are changectx. |
|
828 | tuple. `old` and `news` are changectx. metadata is an optional dictionary | |
|
829 | containing metadata for this marker only. It is merged with the global | |||
|
830 | metadata specified through the `metadata` argument of this function, | |||
850 |
|
831 | |||
851 | Trying to obsolete a public changeset will raise an exception. |
|
832 | Trying to obsolete a public changeset will raise an exception. | |
852 |
|
833 | |||
@@ -865,7 +846,13 b' def createmarkers(repo, relations, flag=' | |||||
865 | metadata['user'] = repo.ui.username() |
|
846 | metadata['user'] = repo.ui.username() | |
866 | tr = repo.transaction('add-obsolescence-marker') |
|
847 | tr = repo.transaction('add-obsolescence-marker') | |
867 | try: |
|
848 | try: | |
868 |
for |
|
849 | for rel in relations: | |
|
850 | prec = rel[0] | |||
|
851 | sucs = rel[1] | |||
|
852 | localmetadata = metadata.copy() | |||
|
853 | if 2 < len(rel): | |||
|
854 | localmetadata.update(rel[2]) | |||
|
855 | ||||
869 | if not prec.mutable(): |
|
856 | if not prec.mutable(): | |
870 | raise util.Abort("cannot obsolete immutable changeset: %s" |
|
857 | raise util.Abort("cannot obsolete immutable changeset: %s" | |
871 | % prec) |
|
858 | % prec) | |
@@ -873,7 +860,7 b' def createmarkers(repo, relations, flag=' | |||||
873 | nsucs = tuple(s.node() for s in sucs) |
|
860 | nsucs = tuple(s.node() for s in sucs) | |
874 | if nprec in nsucs: |
|
861 | if nprec in nsucs: | |
875 | raise util.Abort("changeset %s cannot obsolete itself" % prec) |
|
862 | raise util.Abort("changeset %s cannot obsolete itself" % prec) | |
876 | repo.obsstore.create(tr, nprec, nsucs, flag, metadata) |
|
863 | repo.obsstore.create(tr, nprec, nsucs, flag, localmetadata) | |
877 | repo.filteredrevcache.clear() |
|
864 | repo.filteredrevcache.clear() | |
878 | tr.close() |
|
865 | tr.close() | |
879 | finally: |
|
866 | finally: |
@@ -75,9 +75,12 b' class parser(object):' | |||||
75 | if len(infix) == 3: |
|
75 | if len(infix) == 3: | |
76 | self._match(infix[2], pos) |
|
76 | self._match(infix[2], pos) | |
77 | return expr |
|
77 | return expr | |
78 | def parse(self, message): |
|
78 | def parse(self, message, lookup=None): | |
79 | 'generate a parse tree from a message' |
|
79 | 'generate a parse tree from a message' | |
80 | self._iter = self._tokenizer(message) |
|
80 | if lookup: | |
|
81 | self._iter = self._tokenizer(message, lookup) | |||
|
82 | else: | |||
|
83 | self._iter = self._tokenizer(message) | |||
81 | self._advance() |
|
84 | self._advance() | |
82 | res = self._parse() |
|
85 | res = self._parse() | |
83 | token, value, pos = self.current |
|
86 | token, value, pos = self.current |
@@ -14,6 +14,8 b'' | |||||
14 |
|
14 | |||
15 | #include "util.h" |
|
15 | #include "util.h" | |
16 |
|
16 | |||
|
17 | static char *versionerrortext = "Python minor version mismatch"; | |||
|
18 | ||||
17 | static int8_t hextable[256] = { |
|
19 | static int8_t hextable[256] = { | |
18 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, |
|
20 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
19 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, |
|
21 | -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, | |
@@ -1208,7 +1210,7 b' static PyObject *find_gca_candidates(ind' | |||||
1208 | const bitmask allseen = (1ull << revcount) - 1; |
|
1210 | const bitmask allseen = (1ull << revcount) - 1; | |
1209 | const bitmask poison = 1ull << revcount; |
|
1211 | const bitmask poison = 1ull << revcount; | |
1210 | PyObject *gca = PyList_New(0); |
|
1212 | PyObject *gca = PyList_New(0); | |
1211 |
int i, v, interesting |
|
1213 | int i, v, interesting; | |
1212 | int maxrev = -1; |
|
1214 | int maxrev = -1; | |
1213 | long sp; |
|
1215 | long sp; | |
1214 | bitmask *seen; |
|
1216 | bitmask *seen; | |
@@ -1230,7 +1232,7 b' static PyObject *find_gca_candidates(ind' | |||||
1230 | for (i = 0; i < revcount; i++) |
|
1232 | for (i = 0; i < revcount; i++) | |
1231 | seen[revs[i]] = 1ull << i; |
|
1233 | seen[revs[i]] = 1ull << i; | |
1232 |
|
1234 | |||
1233 |
interesting = |
|
1235 | interesting = revcount; | |
1234 |
|
1236 | |||
1235 | for (v = maxrev; v >= 0 && interesting; v--) { |
|
1237 | for (v = maxrev; v >= 0 && interesting; v--) { | |
1236 | long sv = seen[v]; |
|
1238 | long sv = seen[v]; | |
@@ -1251,11 +1253,8 b' static PyObject *find_gca_candidates(ind' | |||||
1251 | } |
|
1253 | } | |
1252 | sv |= poison; |
|
1254 | sv |= poison; | |
1253 | for (i = 0; i < revcount; i++) { |
|
1255 | for (i = 0; i < revcount; i++) { | |
1254 |
if (revs[i] == v) |
|
1256 | if (revs[i] == v) | |
1255 | if (--left <= 1) |
|
1257 | goto done; | |
1256 | goto done; |
|
|||
1257 | break; |
|
|||
1258 | } |
|
|||
1259 | } |
|
1258 | } | |
1260 | } |
|
1259 | } | |
1261 | } |
|
1260 | } | |
@@ -1529,10 +1528,6 b' static PyObject *index_ancestors(indexOb' | |||||
1529 | ret = gca; |
|
1528 | ret = gca; | |
1530 | Py_INCREF(gca); |
|
1529 | Py_INCREF(gca); | |
1531 | } |
|
1530 | } | |
1532 | else if (PyList_GET_SIZE(gca) == 1) { |
|
|||
1533 | ret = PyList_GET_ITEM(gca, 0); |
|
|||
1534 | Py_INCREF(ret); |
|
|||
1535 | } |
|
|||
1536 | else ret = find_deepest(self, gca); |
|
1531 | else ret = find_deepest(self, gca); | |
1537 |
|
1532 | |||
1538 | done: |
|
1533 | done: | |
@@ -1549,6 +1544,97 b' bail:' | |||||
1549 | } |
|
1544 | } | |
1550 |
|
1545 | |||
1551 | /* |
|
1546 | /* | |
|
1547 | * Given a (possibly overlapping) set of revs, return all the | |||
|
1548 | * common ancestors heads: heads(::args[0] and ::a[1] and ...) | |||
|
1549 | */ | |||
|
1550 | static PyObject *index_commonancestorsheads(indexObject *self, PyObject *args) | |||
|
1551 | { | |||
|
1552 | PyObject *ret = NULL; | |||
|
1553 | Py_ssize_t argcount, i, len; | |||
|
1554 | bitmask repeat = 0; | |||
|
1555 | int revcount = 0; | |||
|
1556 | int *revs; | |||
|
1557 | ||||
|
1558 | argcount = PySequence_Length(args); | |||
|
1559 | revs = malloc(argcount * sizeof(*revs)); | |||
|
1560 | if (argcount > 0 && revs == NULL) | |||
|
1561 | return PyErr_NoMemory(); | |||
|
1562 | len = index_length(self) - 1; | |||
|
1563 | ||||
|
1564 | for (i = 0; i < argcount; i++) { | |||
|
1565 | static const int capacity = 24; | |||
|
1566 | PyObject *obj = PySequence_GetItem(args, i); | |||
|
1567 | bitmask x; | |||
|
1568 | long val; | |||
|
1569 | ||||
|
1570 | if (!PyInt_Check(obj)) { | |||
|
1571 | PyErr_SetString(PyExc_TypeError, | |||
|
1572 | "arguments must all be ints"); | |||
|
1573 | goto bail; | |||
|
1574 | } | |||
|
1575 | val = PyInt_AsLong(obj); | |||
|
1576 | if (val == -1) { | |||
|
1577 | ret = PyList_New(0); | |||
|
1578 | goto done; | |||
|
1579 | } | |||
|
1580 | if (val < 0 || val >= len) { | |||
|
1581 | PyErr_SetString(PyExc_IndexError, | |||
|
1582 | "index out of range"); | |||
|
1583 | goto bail; | |||
|
1584 | } | |||
|
1585 | /* this cheesy bloom filter lets us avoid some more | |||
|
1586 | * expensive duplicate checks in the common set-is-disjoint | |||
|
1587 | * case */ | |||
|
1588 | x = 1ull << (val & 0x3f); | |||
|
1589 | if (repeat & x) { | |||
|
1590 | int k; | |||
|
1591 | for (k = 0; k < revcount; k++) { | |||
|
1592 | if (val == revs[k]) | |||
|
1593 | goto duplicate; | |||
|
1594 | } | |||
|
1595 | } | |||
|
1596 | else repeat |= x; | |||
|
1597 | if (revcount >= capacity) { | |||
|
1598 | PyErr_Format(PyExc_OverflowError, | |||
|
1599 | "bitset size (%d) > capacity (%d)", | |||
|
1600 | revcount, capacity); | |||
|
1601 | goto bail; | |||
|
1602 | } | |||
|
1603 | revs[revcount++] = (int)val; | |||
|
1604 | duplicate:; | |||
|
1605 | } | |||
|
1606 | ||||
|
1607 | if (revcount == 0) { | |||
|
1608 | ret = PyList_New(0); | |||
|
1609 | goto done; | |||
|
1610 | } | |||
|
1611 | if (revcount == 1) { | |||
|
1612 | PyObject *obj; | |||
|
1613 | ret = PyList_New(1); | |||
|
1614 | if (ret == NULL) | |||
|
1615 | goto bail; | |||
|
1616 | obj = PyInt_FromLong(revs[0]); | |||
|
1617 | if (obj == NULL) | |||
|
1618 | goto bail; | |||
|
1619 | PyList_SET_ITEM(ret, 0, obj); | |||
|
1620 | goto done; | |||
|
1621 | } | |||
|
1622 | ||||
|
1623 | ret = find_gca_candidates(self, revs, revcount); | |||
|
1624 | if (ret == NULL) | |||
|
1625 | goto bail; | |||
|
1626 | ||||
|
1627 | done: | |||
|
1628 | free(revs); | |||
|
1629 | return ret; | |||
|
1630 | ||||
|
1631 | bail: | |||
|
1632 | free(revs); | |||
|
1633 | Py_XDECREF(ret); | |||
|
1634 | return NULL; | |||
|
1635 | } | |||
|
1636 | ||||
|
1637 | /* | |||
1552 | * Invalidate any trie entries introduced by added revs. |
|
1638 | * Invalidate any trie entries introduced by added revs. | |
1553 | */ |
|
1639 | */ | |
1554 | static void nt_invalidate_added(indexObject *self, Py_ssize_t start) |
|
1640 | static void nt_invalidate_added(indexObject *self, Py_ssize_t start) | |
@@ -1792,6 +1878,9 b' static PyMappingMethods index_mapping_me' | |||||
1792 | static PyMethodDef index_methods[] = { |
|
1878 | static PyMethodDef index_methods[] = { | |
1793 | {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS, |
|
1879 | {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS, | |
1794 | "return the gca set of the given revs"}, |
|
1880 | "return the gca set of the given revs"}, | |
|
1881 | {"commonancestorsheads", (PyCFunction)index_commonancestorsheads, | |||
|
1882 | METH_VARARGS, | |||
|
1883 | "return the heads of the common ancestors of the given revs"}, | |||
1795 | {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS, |
|
1884 | {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS, | |
1796 | "clear the index caches"}, |
|
1885 | "clear the index caches"}, | |
1797 | {"get", (PyCFunction)index_m_get, METH_VARARGS, |
|
1886 | {"get", (PyCFunction)index_m_get, METH_VARARGS, | |
@@ -1918,6 +2007,16 b' void dirs_module_init(PyObject *mod);' | |||||
1918 |
|
2007 | |||
1919 | static void module_init(PyObject *mod) |
|
2008 | static void module_init(PyObject *mod) | |
1920 | { |
|
2009 | { | |
|
2010 | /* This module constant has two purposes. First, it lets us unit test | |||
|
2011 | * the ImportError raised without hard-coding any error text. This | |||
|
2012 | * means we can change the text in the future without breaking tests, | |||
|
2013 | * even across changesets without a recompile. Second, its presence | |||
|
2014 | * can be used to determine whether the version-checking logic is | |||
|
2015 | * present, which also helps in testing across changesets without a | |||
|
2016 | * recompile. Note that this means the pure-Python version of parsers | |||
|
2017 | * should not have this module constant. */ | |||
|
2018 | PyModule_AddStringConstant(mod, "versionerrortext", versionerrortext); | |||
|
2019 | ||||
1921 | dirs_module_init(mod); |
|
2020 | dirs_module_init(mod); | |
1922 |
|
2021 | |||
1923 | indexType.tp_new = PyType_GenericNew; |
|
2022 | indexType.tp_new = PyType_GenericNew; | |
@@ -1935,6 +2034,24 b' static void module_init(PyObject *mod)' | |||||
1935 | dirstate_unset = Py_BuildValue("ciii", 'n', 0, -1, -1); |
|
2034 | dirstate_unset = Py_BuildValue("ciii", 'n', 0, -1, -1); | |
1936 | } |
|
2035 | } | |
1937 |
|
2036 | |||
|
2037 | static int check_python_version(void) | |||
|
2038 | { | |||
|
2039 | PyObject *sys = PyImport_ImportModule("sys"); | |||
|
2040 | long hexversion = PyInt_AsLong(PyObject_GetAttrString(sys, "hexversion")); | |||
|
2041 | /* sys.hexversion is a 32-bit number by default, so the -1 case | |||
|
2042 | * should only occur in unusual circumstances (e.g. if sys.hexversion | |||
|
2043 | * is manually set to an invalid value). */ | |||
|
2044 | if ((hexversion == -1) || (hexversion >> 16 != PY_VERSION_HEX >> 16)) { | |||
|
2045 | PyErr_Format(PyExc_ImportError, "%s: The Mercurial extension " | |||
|
2046 | "modules were compiled with Python " PY_VERSION ", but " | |||
|
2047 | "Mercurial is currently using Python with sys.hexversion=%ld: " | |||
|
2048 | "Python %s\n at: %s", versionerrortext, hexversion, | |||
|
2049 | Py_GetVersion(), Py_GetProgramFullPath()); | |||
|
2050 | return -1; | |||
|
2051 | } | |||
|
2052 | return 0; | |||
|
2053 | } | |||
|
2054 | ||||
1938 | #ifdef IS_PY3K |
|
2055 | #ifdef IS_PY3K | |
1939 | static struct PyModuleDef parsers_module = { |
|
2056 | static struct PyModuleDef parsers_module = { | |
1940 | PyModuleDef_HEAD_INIT, |
|
2057 | PyModuleDef_HEAD_INIT, | |
@@ -1946,14 +2063,22 b' static struct PyModuleDef parsers_module' | |||||
1946 |
|
2063 | |||
1947 | PyMODINIT_FUNC PyInit_parsers(void) |
|
2064 | PyMODINIT_FUNC PyInit_parsers(void) | |
1948 | { |
|
2065 | { | |
1949 | PyObject *mod = PyModule_Create(&parsers_module); |
|
2066 | PyObject *mod; | |
|
2067 | ||||
|
2068 | if (check_python_version() == -1) | |||
|
2069 | return; | |||
|
2070 | mod = PyModule_Create(&parsers_module); | |||
1950 | module_init(mod); |
|
2071 | module_init(mod); | |
1951 | return mod; |
|
2072 | return mod; | |
1952 | } |
|
2073 | } | |
1953 | #else |
|
2074 | #else | |
1954 | PyMODINIT_FUNC initparsers(void) |
|
2075 | PyMODINIT_FUNC initparsers(void) | |
1955 | { |
|
2076 | { | |
1956 | PyObject *mod = Py_InitModule3("parsers", methods, parsers_doc); |
|
2077 | PyObject *mod; | |
|
2078 | ||||
|
2079 | if (check_python_version() == -1) | |||
|
2080 | return; | |||
|
2081 | mod = Py_InitModule3("parsers", methods, parsers_doc); | |||
1957 | module_init(mod); |
|
2082 | module_init(mod); | |
1958 | } |
|
2083 | } | |
1959 | #endif |
|
2084 | #endif |
@@ -1859,7 +1859,7 b' def diffstatdata(lines):' | |||||
1859 | # set numbers to 0 anyway when starting new file |
|
1859 | # set numbers to 0 anyway when starting new file | |
1860 | adds, removes, isbinary = 0, 0, False |
|
1860 | adds, removes, isbinary = 0, 0, False | |
1861 | if line.startswith('diff --git a/'): |
|
1861 | if line.startswith('diff --git a/'): | |
1862 |
filename = gitre.search(line).group( |
|
1862 | filename = gitre.search(line).group(2) | |
1863 | elif line.startswith('diff -r'): |
|
1863 | elif line.startswith('diff -r'): | |
1864 | # format: "diff -r ... -r ... filename" |
|
1864 | # format: "diff -r ... -r ... filename" | |
1865 | filename = diffre.search(line).group(1) |
|
1865 | filename = diffre.search(line).group(1) |
@@ -258,7 +258,7 b' class phasecache(object):' | |||||
258 | filtered = False |
|
258 | filtered = False | |
259 | nodemap = repo.changelog.nodemap # to filter unknown nodes |
|
259 | nodemap = repo.changelog.nodemap # to filter unknown nodes | |
260 | for phase, nodes in enumerate(self.phaseroots): |
|
260 | for phase, nodes in enumerate(self.phaseroots): | |
261 |
missing = |
|
261 | missing = sorted(node for node in nodes if node not in nodemap) | |
262 | if missing: |
|
262 | if missing: | |
263 | for mnode in missing: |
|
263 | for mnode in missing: | |
264 | repo.ui.debug( |
|
264 | repo.ui.debug( |
@@ -6,24 +6,24 b'' | |||||
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from mercurial import changegroup |
|
9 | from mercurial import changegroup, exchange | |
10 | from mercurial.node import short |
|
10 | from mercurial.node import short | |
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | import os |
|
|||
13 | import errno |
|
12 | import errno | |
14 |
|
13 | |||
15 | def _bundle(repo, bases, heads, node, suffix, compress=True): |
|
14 | def _bundle(repo, bases, heads, node, suffix, compress=True): | |
16 | """create a bundle with the specified revisions as a backup""" |
|
15 | """create a bundle with the specified revisions as a backup""" | |
17 |
cg = |
|
16 | cg = changegroup.changegroupsubset(repo, bases, heads, 'strip') | |
18 |
backupdir = |
|
17 | backupdir = "strip-backup" | |
19 | if not os.path.isdir(backupdir): |
|
18 | vfs = repo.vfs | |
20 |
|
|
19 | if not vfs.isdir(backupdir): | |
21 | name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix)) |
|
20 | vfs.mkdir(backupdir) | |
|
21 | name = "%s/%s-%s.hg" % (backupdir, short(node), suffix) | |||
22 | if compress: |
|
22 | if compress: | |
23 | bundletype = "HG10BZ" |
|
23 | bundletype = "HG10BZ" | |
24 | else: |
|
24 | else: | |
25 | bundletype = "HG10UN" |
|
25 | bundletype = "HG10UN" | |
26 | return changegroup.writebundle(cg, name, bundletype) |
|
26 | return changegroup.writebundle(cg, name, bundletype, vfs) | |
27 |
|
27 | |||
28 | def _collectfiles(repo, striprev): |
|
28 | def _collectfiles(repo, striprev): | |
29 | """find out the filelogs affected by the strip""" |
|
29 | """find out the filelogs affected by the strip""" | |
@@ -108,10 +108,13 b' def strip(ui, repo, nodelist, backup="al' | |||||
108 |
|
108 | |||
109 | # create a changegroup for all the branches we need to keep |
|
109 | # create a changegroup for all the branches we need to keep | |
110 | backupfile = None |
|
110 | backupfile = None | |
|
111 | vfs = repo.vfs | |||
111 | if backup == "all": |
|
112 | if backup == "all": | |
112 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) |
|
113 | backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) | |
113 |
repo.ui.status(_("saved backup bundle to %s\n") % |
|
114 | repo.ui.status(_("saved backup bundle to %s\n") % | |
114 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile) |
|
115 | vfs.join(backupfile)) | |
|
116 | repo.ui.log("backupbundle", "saved backup bundle to %s\n", | |||
|
117 | vfs.join(backupfile)) | |||
115 | if saveheads or savebases: |
|
118 | if saveheads or savebases: | |
116 | # do not compress partial bundle if we remove it from disk later |
|
119 | # do not compress partial bundle if we remove it from disk later | |
117 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', |
|
120 | chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', | |
@@ -134,6 +137,8 b' def strip(ui, repo, nodelist, backup="al' | |||||
134 | for i in xrange(offset, len(tr.entries)): |
|
137 | for i in xrange(offset, len(tr.entries)): | |
135 | file, troffset, ignore = tr.entries[i] |
|
138 | file, troffset, ignore = tr.entries[i] | |
136 | repo.sopener(file, 'a').truncate(troffset) |
|
139 | repo.sopener(file, 'a').truncate(troffset) | |
|
140 | if troffset == 0: | |||
|
141 | repo.store.markremoved(file) | |||
137 | tr.close() |
|
142 | tr.close() | |
138 | except: # re-raises |
|
143 | except: # re-raises | |
139 | tr.abort() |
|
144 | tr.abort() | |
@@ -141,25 +146,27 b' def strip(ui, repo, nodelist, backup="al' | |||||
141 |
|
146 | |||
142 | if saveheads or savebases: |
|
147 | if saveheads or savebases: | |
143 | ui.note(_("adding branch\n")) |
|
148 | ui.note(_("adding branch\n")) | |
144 | f = open(chgrpfile, "rb") |
|
149 | f = vfs.open(chgrpfile, "rb") | |
145 |
gen = change |
|
150 | gen = exchange.readbundle(ui, f, chgrpfile, vfs) | |
146 | if not repo.ui.verbose: |
|
151 | if not repo.ui.verbose: | |
147 | # silence internal shuffling chatter |
|
152 | # silence internal shuffling chatter | |
148 | repo.ui.pushbuffer() |
|
153 | repo.ui.pushbuffer() | |
149 |
|
|
154 | changegroup.addchangegroup(repo, gen, 'strip', | |
|
155 | 'bundle:' + vfs.join(chgrpfile), True) | |||
150 | if not repo.ui.verbose: |
|
156 | if not repo.ui.verbose: | |
151 | repo.ui.popbuffer() |
|
157 | repo.ui.popbuffer() | |
152 | f.close() |
|
158 | f.close() | |
153 | if not keeppartialbundle: |
|
159 | if not keeppartialbundle: | |
154 |
|
|
160 | vfs.unlink(chgrpfile) | |
155 |
|
161 | |||
156 | # remove undo files |
|
162 | # remove undo files | |
157 | for undofile in repo.undofiles(): |
|
163 | for undovfs, undofile in repo.undofiles(): | |
158 | try: |
|
164 | try: | |
159 | os.unlink(undofile) |
|
165 | undovfs.unlink(undofile) | |
160 | except OSError, e: |
|
166 | except OSError, e: | |
161 | if e.errno != errno.ENOENT: |
|
167 | if e.errno != errno.ENOENT: | |
162 |
ui.warn(_('error removing %s: %s\n') % |
|
168 | ui.warn(_('error removing %s: %s\n') % | |
|
169 | (undovfs.join(undofile), str(e))) | |||
163 |
|
170 | |||
164 | for m in updatebm: |
|
171 | for m in updatebm: | |
165 | bm[m] = repo[newbmtarget].node() |
|
172 | bm[m] = repo[newbmtarget].node() | |
@@ -167,10 +174,10 b' def strip(ui, repo, nodelist, backup="al' | |||||
167 | except: # re-raises |
|
174 | except: # re-raises | |
168 | if backupfile: |
|
175 | if backupfile: | |
169 | ui.warn(_("strip failed, full bundle stored in '%s'\n") |
|
176 | ui.warn(_("strip failed, full bundle stored in '%s'\n") | |
170 | % backupfile) |
|
177 | % vfs.join(backupfile)) | |
171 | elif saveheads: |
|
178 | elif saveheads: | |
172 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") |
|
179 | ui.warn(_("strip failed, partial bundle stored in '%s'\n") | |
173 | % chgrpfile) |
|
180 | % vfs.join(chgrpfile)) | |
174 | raise |
|
181 | raise | |
175 |
|
182 | |||
176 | repo.destroyed() |
|
183 | repo.destroyed() |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
This diff has been collapsed as it changes many lines, (649 lines changed) Show them Hide them |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now