Show More
@@ -1,3110 +1,3117 b'' | |||||
1 | # debugcommands.py - command processing for debug* commands |
|
1 | # debugcommands.py - command processing for debug* commands | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2016 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2016 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import codecs |
|
10 | import codecs | |
11 | import collections |
|
11 | import collections | |
12 | import difflib |
|
12 | import difflib | |
13 | import errno |
|
13 | import errno | |
14 | import operator |
|
14 | import operator | |
15 | import os |
|
15 | import os | |
16 | import random |
|
16 | import random | |
17 | import re |
|
17 | import re | |
18 | import socket |
|
18 | import socket | |
19 | import ssl |
|
19 | import ssl | |
20 | import stat |
|
20 | import stat | |
21 | import string |
|
21 | import string | |
22 | import subprocess |
|
22 | import subprocess | |
23 | import sys |
|
23 | import sys | |
24 | import tempfile |
|
24 | import tempfile | |
25 | import time |
|
25 | import time | |
26 |
|
26 | |||
27 | from .i18n import _ |
|
27 | from .i18n import _ | |
28 | from .node import ( |
|
28 | from .node import ( | |
29 | bin, |
|
29 | bin, | |
30 | hex, |
|
30 | hex, | |
31 | nullhex, |
|
31 | nullhex, | |
32 | nullid, |
|
32 | nullid, | |
33 | nullrev, |
|
33 | nullrev, | |
34 | short, |
|
34 | short, | |
35 | ) |
|
35 | ) | |
36 | from .thirdparty import ( |
|
36 | from .thirdparty import ( | |
37 | cbor, |
|
37 | cbor, | |
38 | ) |
|
38 | ) | |
39 | from . import ( |
|
39 | from . import ( | |
40 | bundle2, |
|
40 | bundle2, | |
41 | changegroup, |
|
41 | changegroup, | |
42 | cmdutil, |
|
42 | cmdutil, | |
43 | color, |
|
43 | color, | |
44 | context, |
|
44 | context, | |
45 | dagparser, |
|
45 | dagparser, | |
46 | dagutil, |
|
46 | dagutil, | |
47 | encoding, |
|
47 | encoding, | |
48 | error, |
|
48 | error, | |
49 | exchange, |
|
49 | exchange, | |
50 | extensions, |
|
50 | extensions, | |
51 | filemerge, |
|
51 | filemerge, | |
52 | fileset, |
|
52 | fileset, | |
53 | formatter, |
|
53 | formatter, | |
54 | hg, |
|
54 | hg, | |
55 | httppeer, |
|
55 | httppeer, | |
56 | localrepo, |
|
56 | localrepo, | |
57 | lock as lockmod, |
|
57 | lock as lockmod, | |
58 | logcmdutil, |
|
58 | logcmdutil, | |
59 | merge as mergemod, |
|
59 | merge as mergemod, | |
60 | obsolete, |
|
60 | obsolete, | |
61 | obsutil, |
|
61 | obsutil, | |
62 | phases, |
|
62 | phases, | |
63 | policy, |
|
63 | policy, | |
64 | pvec, |
|
64 | pvec, | |
65 | pycompat, |
|
65 | pycompat, | |
66 | registrar, |
|
66 | registrar, | |
67 | repair, |
|
67 | repair, | |
68 | revlog, |
|
68 | revlog, | |
69 | revset, |
|
69 | revset, | |
70 | revsetlang, |
|
70 | revsetlang, | |
71 | scmutil, |
|
71 | scmutil, | |
72 | setdiscovery, |
|
72 | setdiscovery, | |
73 | simplemerge, |
|
73 | simplemerge, | |
74 | smartset, |
|
74 | smartset, | |
75 | sshpeer, |
|
75 | sshpeer, | |
76 | sslutil, |
|
76 | sslutil, | |
77 | streamclone, |
|
77 | streamclone, | |
78 | templater, |
|
78 | templater, | |
79 | treediscovery, |
|
79 | treediscovery, | |
80 | upgrade, |
|
80 | upgrade, | |
81 | url as urlmod, |
|
81 | url as urlmod, | |
82 | util, |
|
82 | util, | |
83 | vfs as vfsmod, |
|
83 | vfs as vfsmod, | |
84 | wireprotoframing, |
|
84 | wireprotoframing, | |
85 | wireprotoserver, |
|
85 | wireprotoserver, | |
86 | ) |
|
86 | ) | |
87 | from .utils import ( |
|
87 | from .utils import ( | |
88 | dateutil, |
|
88 | dateutil, | |
89 | procutil, |
|
89 | procutil, | |
90 | stringutil, |
|
90 | stringutil, | |
91 | ) |
|
91 | ) | |
92 |
|
92 | |||
93 | release = lockmod.release |
|
93 | release = lockmod.release | |
94 |
|
94 | |||
95 | command = registrar.command() |
|
95 | command = registrar.command() | |
96 |
|
96 | |||
97 | @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True) |
|
97 | @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True) | |
98 | def debugancestor(ui, repo, *args): |
|
98 | def debugancestor(ui, repo, *args): | |
99 | """find the ancestor revision of two revisions in a given index""" |
|
99 | """find the ancestor revision of two revisions in a given index""" | |
100 | if len(args) == 3: |
|
100 | if len(args) == 3: | |
101 | index, rev1, rev2 = args |
|
101 | index, rev1, rev2 = args | |
102 | r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index) |
|
102 | r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index) | |
103 | lookup = r.lookup |
|
103 | lookup = r.lookup | |
104 | elif len(args) == 2: |
|
104 | elif len(args) == 2: | |
105 | if not repo: |
|
105 | if not repo: | |
106 | raise error.Abort(_('there is no Mercurial repository here ' |
|
106 | raise error.Abort(_('there is no Mercurial repository here ' | |
107 | '(.hg not found)')) |
|
107 | '(.hg not found)')) | |
108 | rev1, rev2 = args |
|
108 | rev1, rev2 = args | |
109 | r = repo.changelog |
|
109 | r = repo.changelog | |
110 | lookup = repo.lookup |
|
110 | lookup = repo.lookup | |
111 | else: |
|
111 | else: | |
112 | raise error.Abort(_('either two or three arguments required')) |
|
112 | raise error.Abort(_('either two or three arguments required')) | |
113 | a = r.ancestor(lookup(rev1), lookup(rev2)) |
|
113 | a = r.ancestor(lookup(rev1), lookup(rev2)) | |
114 | ui.write('%d:%s\n' % (r.rev(a), hex(a))) |
|
114 | ui.write('%d:%s\n' % (r.rev(a), hex(a))) | |
115 |
|
115 | |||
116 | @command('debugapplystreamclonebundle', [], 'FILE') |
|
116 | @command('debugapplystreamclonebundle', [], 'FILE') | |
117 | def debugapplystreamclonebundle(ui, repo, fname): |
|
117 | def debugapplystreamclonebundle(ui, repo, fname): | |
118 | """apply a stream clone bundle file""" |
|
118 | """apply a stream clone bundle file""" | |
119 | f = hg.openpath(ui, fname) |
|
119 | f = hg.openpath(ui, fname) | |
120 | gen = exchange.readbundle(ui, f, fname) |
|
120 | gen = exchange.readbundle(ui, f, fname) | |
121 | gen.apply(repo) |
|
121 | gen.apply(repo) | |
122 |
|
122 | |||
123 | @command('debugbuilddag', |
|
123 | @command('debugbuilddag', | |
124 | [('m', 'mergeable-file', None, _('add single file mergeable changes')), |
|
124 | [('m', 'mergeable-file', None, _('add single file mergeable changes')), | |
125 | ('o', 'overwritten-file', None, _('add single file all revs overwrite')), |
|
125 | ('o', 'overwritten-file', None, _('add single file all revs overwrite')), | |
126 | ('n', 'new-file', None, _('add new file at each rev'))], |
|
126 | ('n', 'new-file', None, _('add new file at each rev'))], | |
127 | _('[OPTION]... [TEXT]')) |
|
127 | _('[OPTION]... [TEXT]')) | |
128 | def debugbuilddag(ui, repo, text=None, |
|
128 | def debugbuilddag(ui, repo, text=None, | |
129 | mergeable_file=False, |
|
129 | mergeable_file=False, | |
130 | overwritten_file=False, |
|
130 | overwritten_file=False, | |
131 | new_file=False): |
|
131 | new_file=False): | |
132 | """builds a repo with a given DAG from scratch in the current empty repo |
|
132 | """builds a repo with a given DAG from scratch in the current empty repo | |
133 |
|
133 | |||
134 | The description of the DAG is read from stdin if not given on the |
|
134 | The description of the DAG is read from stdin if not given on the | |
135 | command line. |
|
135 | command line. | |
136 |
|
136 | |||
137 | Elements: |
|
137 | Elements: | |
138 |
|
138 | |||
139 | - "+n" is a linear run of n nodes based on the current default parent |
|
139 | - "+n" is a linear run of n nodes based on the current default parent | |
140 | - "." is a single node based on the current default parent |
|
140 | - "." is a single node based on the current default parent | |
141 | - "$" resets the default parent to null (implied at the start); |
|
141 | - "$" resets the default parent to null (implied at the start); | |
142 | otherwise the default parent is always the last node created |
|
142 | otherwise the default parent is always the last node created | |
143 | - "<p" sets the default parent to the backref p |
|
143 | - "<p" sets the default parent to the backref p | |
144 | - "*p" is a fork at parent p, which is a backref |
|
144 | - "*p" is a fork at parent p, which is a backref | |
145 | - "*p1/p2" is a merge of parents p1 and p2, which are backrefs |
|
145 | - "*p1/p2" is a merge of parents p1 and p2, which are backrefs | |
146 | - "/p2" is a merge of the preceding node and p2 |
|
146 | - "/p2" is a merge of the preceding node and p2 | |
147 | - ":tag" defines a local tag for the preceding node |
|
147 | - ":tag" defines a local tag for the preceding node | |
148 | - "@branch" sets the named branch for subsequent nodes |
|
148 | - "@branch" sets the named branch for subsequent nodes | |
149 | - "#...\\n" is a comment up to the end of the line |
|
149 | - "#...\\n" is a comment up to the end of the line | |
150 |
|
150 | |||
151 | Whitespace between the above elements is ignored. |
|
151 | Whitespace between the above elements is ignored. | |
152 |
|
152 | |||
153 | A backref is either |
|
153 | A backref is either | |
154 |
|
154 | |||
155 | - a number n, which references the node curr-n, where curr is the current |
|
155 | - a number n, which references the node curr-n, where curr is the current | |
156 | node, or |
|
156 | node, or | |
157 | - the name of a local tag you placed earlier using ":tag", or |
|
157 | - the name of a local tag you placed earlier using ":tag", or | |
158 | - empty to denote the default parent. |
|
158 | - empty to denote the default parent. | |
159 |
|
159 | |||
160 | All string valued-elements are either strictly alphanumeric, or must |
|
160 | All string valued-elements are either strictly alphanumeric, or must | |
161 | be enclosed in double quotes ("..."), with "\\" as escape character. |
|
161 | be enclosed in double quotes ("..."), with "\\" as escape character. | |
162 | """ |
|
162 | """ | |
163 |
|
163 | |||
164 | if text is None: |
|
164 | if text is None: | |
165 | ui.status(_("reading DAG from stdin\n")) |
|
165 | ui.status(_("reading DAG from stdin\n")) | |
166 | text = ui.fin.read() |
|
166 | text = ui.fin.read() | |
167 |
|
167 | |||
168 | cl = repo.changelog |
|
168 | cl = repo.changelog | |
169 | if len(cl) > 0: |
|
169 | if len(cl) > 0: | |
170 | raise error.Abort(_('repository is not empty')) |
|
170 | raise error.Abort(_('repository is not empty')) | |
171 |
|
171 | |||
172 | # determine number of revs in DAG |
|
172 | # determine number of revs in DAG | |
173 | total = 0 |
|
173 | total = 0 | |
174 | for type, data in dagparser.parsedag(text): |
|
174 | for type, data in dagparser.parsedag(text): | |
175 | if type == 'n': |
|
175 | if type == 'n': | |
176 | total += 1 |
|
176 | total += 1 | |
177 |
|
177 | |||
178 | if mergeable_file: |
|
178 | if mergeable_file: | |
179 | linesperrev = 2 |
|
179 | linesperrev = 2 | |
180 | # make a file with k lines per rev |
|
180 | # make a file with k lines per rev | |
181 | initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)] |
|
181 | initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)] | |
182 | initialmergedlines.append("") |
|
182 | initialmergedlines.append("") | |
183 |
|
183 | |||
184 | tags = [] |
|
184 | tags = [] | |
185 |
|
185 | |||
186 | wlock = lock = tr = None |
|
186 | wlock = lock = tr = None | |
187 | try: |
|
187 | try: | |
188 | wlock = repo.wlock() |
|
188 | wlock = repo.wlock() | |
189 | lock = repo.lock() |
|
189 | lock = repo.lock() | |
190 | tr = repo.transaction("builddag") |
|
190 | tr = repo.transaction("builddag") | |
191 |
|
191 | |||
192 | at = -1 |
|
192 | at = -1 | |
193 | atbranch = 'default' |
|
193 | atbranch = 'default' | |
194 | nodeids = [] |
|
194 | nodeids = [] | |
195 | id = 0 |
|
195 | id = 0 | |
196 | ui.progress(_('building'), id, unit=_('revisions'), total=total) |
|
196 | ui.progress(_('building'), id, unit=_('revisions'), total=total) | |
197 | for type, data in dagparser.parsedag(text): |
|
197 | for type, data in dagparser.parsedag(text): | |
198 | if type == 'n': |
|
198 | if type == 'n': | |
199 | ui.note(('node %s\n' % pycompat.bytestr(data))) |
|
199 | ui.note(('node %s\n' % pycompat.bytestr(data))) | |
200 | id, ps = data |
|
200 | id, ps = data | |
201 |
|
201 | |||
202 | files = [] |
|
202 | files = [] | |
203 | filecontent = {} |
|
203 | filecontent = {} | |
204 |
|
204 | |||
205 | p2 = None |
|
205 | p2 = None | |
206 | if mergeable_file: |
|
206 | if mergeable_file: | |
207 | fn = "mf" |
|
207 | fn = "mf" | |
208 | p1 = repo[ps[0]] |
|
208 | p1 = repo[ps[0]] | |
209 | if len(ps) > 1: |
|
209 | if len(ps) > 1: | |
210 | p2 = repo[ps[1]] |
|
210 | p2 = repo[ps[1]] | |
211 | pa = p1.ancestor(p2) |
|
211 | pa = p1.ancestor(p2) | |
212 | base, local, other = [x[fn].data() for x in (pa, p1, |
|
212 | base, local, other = [x[fn].data() for x in (pa, p1, | |
213 | p2)] |
|
213 | p2)] | |
214 | m3 = simplemerge.Merge3Text(base, local, other) |
|
214 | m3 = simplemerge.Merge3Text(base, local, other) | |
215 | ml = [l.strip() for l in m3.merge_lines()] |
|
215 | ml = [l.strip() for l in m3.merge_lines()] | |
216 | ml.append("") |
|
216 | ml.append("") | |
217 | elif at > 0: |
|
217 | elif at > 0: | |
218 | ml = p1[fn].data().split("\n") |
|
218 | ml = p1[fn].data().split("\n") | |
219 | else: |
|
219 | else: | |
220 | ml = initialmergedlines |
|
220 | ml = initialmergedlines | |
221 | ml[id * linesperrev] += " r%i" % id |
|
221 | ml[id * linesperrev] += " r%i" % id | |
222 | mergedtext = "\n".join(ml) |
|
222 | mergedtext = "\n".join(ml) | |
223 | files.append(fn) |
|
223 | files.append(fn) | |
224 | filecontent[fn] = mergedtext |
|
224 | filecontent[fn] = mergedtext | |
225 |
|
225 | |||
226 | if overwritten_file: |
|
226 | if overwritten_file: | |
227 | fn = "of" |
|
227 | fn = "of" | |
228 | files.append(fn) |
|
228 | files.append(fn) | |
229 | filecontent[fn] = "r%i\n" % id |
|
229 | filecontent[fn] = "r%i\n" % id | |
230 |
|
230 | |||
231 | if new_file: |
|
231 | if new_file: | |
232 | fn = "nf%i" % id |
|
232 | fn = "nf%i" % id | |
233 | files.append(fn) |
|
233 | files.append(fn) | |
234 | filecontent[fn] = "r%i\n" % id |
|
234 | filecontent[fn] = "r%i\n" % id | |
235 | if len(ps) > 1: |
|
235 | if len(ps) > 1: | |
236 | if not p2: |
|
236 | if not p2: | |
237 | p2 = repo[ps[1]] |
|
237 | p2 = repo[ps[1]] | |
238 | for fn in p2: |
|
238 | for fn in p2: | |
239 | if fn.startswith("nf"): |
|
239 | if fn.startswith("nf"): | |
240 | files.append(fn) |
|
240 | files.append(fn) | |
241 | filecontent[fn] = p2[fn].data() |
|
241 | filecontent[fn] = p2[fn].data() | |
242 |
|
242 | |||
243 | def fctxfn(repo, cx, path): |
|
243 | def fctxfn(repo, cx, path): | |
244 | if path in filecontent: |
|
244 | if path in filecontent: | |
245 | return context.memfilectx(repo, cx, path, |
|
245 | return context.memfilectx(repo, cx, path, | |
246 | filecontent[path]) |
|
246 | filecontent[path]) | |
247 | return None |
|
247 | return None | |
248 |
|
248 | |||
249 | if len(ps) == 0 or ps[0] < 0: |
|
249 | if len(ps) == 0 or ps[0] < 0: | |
250 | pars = [None, None] |
|
250 | pars = [None, None] | |
251 | elif len(ps) == 1: |
|
251 | elif len(ps) == 1: | |
252 | pars = [nodeids[ps[0]], None] |
|
252 | pars = [nodeids[ps[0]], None] | |
253 | else: |
|
253 | else: | |
254 | pars = [nodeids[p] for p in ps] |
|
254 | pars = [nodeids[p] for p in ps] | |
255 | cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn, |
|
255 | cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn, | |
256 | date=(id, 0), |
|
256 | date=(id, 0), | |
257 | user="debugbuilddag", |
|
257 | user="debugbuilddag", | |
258 | extra={'branch': atbranch}) |
|
258 | extra={'branch': atbranch}) | |
259 | nodeid = repo.commitctx(cx) |
|
259 | nodeid = repo.commitctx(cx) | |
260 | nodeids.append(nodeid) |
|
260 | nodeids.append(nodeid) | |
261 | at = id |
|
261 | at = id | |
262 | elif type == 'l': |
|
262 | elif type == 'l': | |
263 | id, name = data |
|
263 | id, name = data | |
264 | ui.note(('tag %s\n' % name)) |
|
264 | ui.note(('tag %s\n' % name)) | |
265 | tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) |
|
265 | tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) | |
266 | elif type == 'a': |
|
266 | elif type == 'a': | |
267 | ui.note(('branch %s\n' % data)) |
|
267 | ui.note(('branch %s\n' % data)) | |
268 | atbranch = data |
|
268 | atbranch = data | |
269 | ui.progress(_('building'), id, unit=_('revisions'), total=total) |
|
269 | ui.progress(_('building'), id, unit=_('revisions'), total=total) | |
270 | tr.close() |
|
270 | tr.close() | |
271 |
|
271 | |||
272 | if tags: |
|
272 | if tags: | |
273 | repo.vfs.write("localtags", "".join(tags)) |
|
273 | repo.vfs.write("localtags", "".join(tags)) | |
274 | finally: |
|
274 | finally: | |
275 | ui.progress(_('building'), None) |
|
275 | ui.progress(_('building'), None) | |
276 | release(tr, lock, wlock) |
|
276 | release(tr, lock, wlock) | |
277 |
|
277 | |||
278 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): |
|
278 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): | |
279 | indent_string = ' ' * indent |
|
279 | indent_string = ' ' * indent | |
280 | if all: |
|
280 | if all: | |
281 | ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n") |
|
281 | ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n") | |
282 | % indent_string) |
|
282 | % indent_string) | |
283 |
|
283 | |||
284 | def showchunks(named): |
|
284 | def showchunks(named): | |
285 | ui.write("\n%s%s\n" % (indent_string, named)) |
|
285 | ui.write("\n%s%s\n" % (indent_string, named)) | |
286 | for deltadata in gen.deltaiter(): |
|
286 | for deltadata in gen.deltaiter(): | |
287 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
287 | node, p1, p2, cs, deltabase, delta, flags = deltadata | |
288 | ui.write("%s%s %s %s %s %s %d\n" % |
|
288 | ui.write("%s%s %s %s %s %s %d\n" % | |
289 | (indent_string, hex(node), hex(p1), hex(p2), |
|
289 | (indent_string, hex(node), hex(p1), hex(p2), | |
290 | hex(cs), hex(deltabase), len(delta))) |
|
290 | hex(cs), hex(deltabase), len(delta))) | |
291 |
|
291 | |||
292 | chunkdata = gen.changelogheader() |
|
292 | chunkdata = gen.changelogheader() | |
293 | showchunks("changelog") |
|
293 | showchunks("changelog") | |
294 | chunkdata = gen.manifestheader() |
|
294 | chunkdata = gen.manifestheader() | |
295 | showchunks("manifest") |
|
295 | showchunks("manifest") | |
296 | for chunkdata in iter(gen.filelogheader, {}): |
|
296 | for chunkdata in iter(gen.filelogheader, {}): | |
297 | fname = chunkdata['filename'] |
|
297 | fname = chunkdata['filename'] | |
298 | showchunks(fname) |
|
298 | showchunks(fname) | |
299 | else: |
|
299 | else: | |
300 | if isinstance(gen, bundle2.unbundle20): |
|
300 | if isinstance(gen, bundle2.unbundle20): | |
301 | raise error.Abort(_('use debugbundle2 for this file')) |
|
301 | raise error.Abort(_('use debugbundle2 for this file')) | |
302 | chunkdata = gen.changelogheader() |
|
302 | chunkdata = gen.changelogheader() | |
303 | for deltadata in gen.deltaiter(): |
|
303 | for deltadata in gen.deltaiter(): | |
304 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
304 | node, p1, p2, cs, deltabase, delta, flags = deltadata | |
305 | ui.write("%s%s\n" % (indent_string, hex(node))) |
|
305 | ui.write("%s%s\n" % (indent_string, hex(node))) | |
306 |
|
306 | |||
307 | def _debugobsmarkers(ui, part, indent=0, **opts): |
|
307 | def _debugobsmarkers(ui, part, indent=0, **opts): | |
308 | """display version and markers contained in 'data'""" |
|
308 | """display version and markers contained in 'data'""" | |
309 | opts = pycompat.byteskwargs(opts) |
|
309 | opts = pycompat.byteskwargs(opts) | |
310 | data = part.read() |
|
310 | data = part.read() | |
311 | indent_string = ' ' * indent |
|
311 | indent_string = ' ' * indent | |
312 | try: |
|
312 | try: | |
313 | version, markers = obsolete._readmarkers(data) |
|
313 | version, markers = obsolete._readmarkers(data) | |
314 | except error.UnknownVersion as exc: |
|
314 | except error.UnknownVersion as exc: | |
315 | msg = "%sunsupported version: %s (%d bytes)\n" |
|
315 | msg = "%sunsupported version: %s (%d bytes)\n" | |
316 | msg %= indent_string, exc.version, len(data) |
|
316 | msg %= indent_string, exc.version, len(data) | |
317 | ui.write(msg) |
|
317 | ui.write(msg) | |
318 | else: |
|
318 | else: | |
319 | msg = "%sversion: %d (%d bytes)\n" |
|
319 | msg = "%sversion: %d (%d bytes)\n" | |
320 | msg %= indent_string, version, len(data) |
|
320 | msg %= indent_string, version, len(data) | |
321 | ui.write(msg) |
|
321 | ui.write(msg) | |
322 | fm = ui.formatter('debugobsolete', opts) |
|
322 | fm = ui.formatter('debugobsolete', opts) | |
323 | for rawmarker in sorted(markers): |
|
323 | for rawmarker in sorted(markers): | |
324 | m = obsutil.marker(None, rawmarker) |
|
324 | m = obsutil.marker(None, rawmarker) | |
325 | fm.startitem() |
|
325 | fm.startitem() | |
326 | fm.plain(indent_string) |
|
326 | fm.plain(indent_string) | |
327 | cmdutil.showmarker(fm, m) |
|
327 | cmdutil.showmarker(fm, m) | |
328 | fm.end() |
|
328 | fm.end() | |
329 |
|
329 | |||
330 | def _debugphaseheads(ui, data, indent=0): |
|
330 | def _debugphaseheads(ui, data, indent=0): | |
331 | """display version and markers contained in 'data'""" |
|
331 | """display version and markers contained in 'data'""" | |
332 | indent_string = ' ' * indent |
|
332 | indent_string = ' ' * indent | |
333 | headsbyphase = phases.binarydecode(data) |
|
333 | headsbyphase = phases.binarydecode(data) | |
334 | for phase in phases.allphases: |
|
334 | for phase in phases.allphases: | |
335 | for head in headsbyphase[phase]: |
|
335 | for head in headsbyphase[phase]: | |
336 | ui.write(indent_string) |
|
336 | ui.write(indent_string) | |
337 | ui.write('%s %s\n' % (hex(head), phases.phasenames[phase])) |
|
337 | ui.write('%s %s\n' % (hex(head), phases.phasenames[phase])) | |
338 |
|
338 | |||
339 | def _quasirepr(thing): |
|
339 | def _quasirepr(thing): | |
340 | if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)): |
|
340 | if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)): | |
341 | return '{%s}' % ( |
|
341 | return '{%s}' % ( | |
342 | b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))) |
|
342 | b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))) | |
343 | return pycompat.bytestr(repr(thing)) |
|
343 | return pycompat.bytestr(repr(thing)) | |
344 |
|
344 | |||
345 | def _debugbundle2(ui, gen, all=None, **opts): |
|
345 | def _debugbundle2(ui, gen, all=None, **opts): | |
346 | """lists the contents of a bundle2""" |
|
346 | """lists the contents of a bundle2""" | |
347 | if not isinstance(gen, bundle2.unbundle20): |
|
347 | if not isinstance(gen, bundle2.unbundle20): | |
348 | raise error.Abort(_('not a bundle2 file')) |
|
348 | raise error.Abort(_('not a bundle2 file')) | |
349 | ui.write(('Stream params: %s\n' % _quasirepr(gen.params))) |
|
349 | ui.write(('Stream params: %s\n' % _quasirepr(gen.params))) | |
350 | parttypes = opts.get(r'part_type', []) |
|
350 | parttypes = opts.get(r'part_type', []) | |
351 | for part in gen.iterparts(): |
|
351 | for part in gen.iterparts(): | |
352 | if parttypes and part.type not in parttypes: |
|
352 | if parttypes and part.type not in parttypes: | |
353 | continue |
|
353 | continue | |
354 | ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params))) |
|
354 | ui.write('%s -- %s\n' % (part.type, _quasirepr(part.params))) | |
355 | if part.type == 'changegroup': |
|
355 | if part.type == 'changegroup': | |
356 | version = part.params.get('version', '01') |
|
356 | version = part.params.get('version', '01') | |
357 | cg = changegroup.getunbundler(version, part, 'UN') |
|
357 | cg = changegroup.getunbundler(version, part, 'UN') | |
358 | if not ui.quiet: |
|
358 | if not ui.quiet: | |
359 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) |
|
359 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) | |
360 | if part.type == 'obsmarkers': |
|
360 | if part.type == 'obsmarkers': | |
361 | if not ui.quiet: |
|
361 | if not ui.quiet: | |
362 | _debugobsmarkers(ui, part, indent=4, **opts) |
|
362 | _debugobsmarkers(ui, part, indent=4, **opts) | |
363 | if part.type == 'phase-heads': |
|
363 | if part.type == 'phase-heads': | |
364 | if not ui.quiet: |
|
364 | if not ui.quiet: | |
365 | _debugphaseheads(ui, part, indent=4) |
|
365 | _debugphaseheads(ui, part, indent=4) | |
366 |
|
366 | |||
367 | @command('debugbundle', |
|
367 | @command('debugbundle', | |
368 | [('a', 'all', None, _('show all details')), |
|
368 | [('a', 'all', None, _('show all details')), | |
369 | ('', 'part-type', [], _('show only the named part type')), |
|
369 | ('', 'part-type', [], _('show only the named part type')), | |
370 | ('', 'spec', None, _('print the bundlespec of the bundle'))], |
|
370 | ('', 'spec', None, _('print the bundlespec of the bundle'))], | |
371 | _('FILE'), |
|
371 | _('FILE'), | |
372 | norepo=True) |
|
372 | norepo=True) | |
373 | def debugbundle(ui, bundlepath, all=None, spec=None, **opts): |
|
373 | def debugbundle(ui, bundlepath, all=None, spec=None, **opts): | |
374 | """lists the contents of a bundle""" |
|
374 | """lists the contents of a bundle""" | |
375 | with hg.openpath(ui, bundlepath) as f: |
|
375 | with hg.openpath(ui, bundlepath) as f: | |
376 | if spec: |
|
376 | if spec: | |
377 | spec = exchange.getbundlespec(ui, f) |
|
377 | spec = exchange.getbundlespec(ui, f) | |
378 | ui.write('%s\n' % spec) |
|
378 | ui.write('%s\n' % spec) | |
379 | return |
|
379 | return | |
380 |
|
380 | |||
381 | gen = exchange.readbundle(ui, f, bundlepath) |
|
381 | gen = exchange.readbundle(ui, f, bundlepath) | |
382 | if isinstance(gen, bundle2.unbundle20): |
|
382 | if isinstance(gen, bundle2.unbundle20): | |
383 | return _debugbundle2(ui, gen, all=all, **opts) |
|
383 | return _debugbundle2(ui, gen, all=all, **opts) | |
384 | _debugchangegroup(ui, gen, all=all, **opts) |
|
384 | _debugchangegroup(ui, gen, all=all, **opts) | |
385 |
|
385 | |||
386 | @command('debugcapabilities', |
|
386 | @command('debugcapabilities', | |
387 | [], _('PATH'), |
|
387 | [], _('PATH'), | |
388 | norepo=True) |
|
388 | norepo=True) | |
389 | def debugcapabilities(ui, path, **opts): |
|
389 | def debugcapabilities(ui, path, **opts): | |
390 | """lists the capabilities of a remote peer""" |
|
390 | """lists the capabilities of a remote peer""" | |
391 | opts = pycompat.byteskwargs(opts) |
|
391 | opts = pycompat.byteskwargs(opts) | |
392 | peer = hg.peer(ui, opts, path) |
|
392 | peer = hg.peer(ui, opts, path) | |
393 | caps = peer.capabilities() |
|
393 | caps = peer.capabilities() | |
394 | ui.write(('Main capabilities:\n')) |
|
394 | ui.write(('Main capabilities:\n')) | |
395 | for c in sorted(caps): |
|
395 | for c in sorted(caps): | |
396 | ui.write((' %s\n') % c) |
|
396 | ui.write((' %s\n') % c) | |
397 | b2caps = bundle2.bundle2caps(peer) |
|
397 | b2caps = bundle2.bundle2caps(peer) | |
398 | if b2caps: |
|
398 | if b2caps: | |
399 | ui.write(('Bundle2 capabilities:\n')) |
|
399 | ui.write(('Bundle2 capabilities:\n')) | |
400 | for key, values in sorted(b2caps.iteritems()): |
|
400 | for key, values in sorted(b2caps.iteritems()): | |
401 | ui.write((' %s\n') % key) |
|
401 | ui.write((' %s\n') % key) | |
402 | for v in values: |
|
402 | for v in values: | |
403 | ui.write((' %s\n') % v) |
|
403 | ui.write((' %s\n') % v) | |
404 |
|
404 | |||
405 | @command('debugcheckstate', [], '') |
|
405 | @command('debugcheckstate', [], '') | |
406 | def debugcheckstate(ui, repo): |
|
406 | def debugcheckstate(ui, repo): | |
407 | """validate the correctness of the current dirstate""" |
|
407 | """validate the correctness of the current dirstate""" | |
408 | parent1, parent2 = repo.dirstate.parents() |
|
408 | parent1, parent2 = repo.dirstate.parents() | |
409 | m1 = repo[parent1].manifest() |
|
409 | m1 = repo[parent1].manifest() | |
410 | m2 = repo[parent2].manifest() |
|
410 | m2 = repo[parent2].manifest() | |
411 | errors = 0 |
|
411 | errors = 0 | |
412 | for f in repo.dirstate: |
|
412 | for f in repo.dirstate: | |
413 | state = repo.dirstate[f] |
|
413 | state = repo.dirstate[f] | |
414 | if state in "nr" and f not in m1: |
|
414 | if state in "nr" and f not in m1: | |
415 | ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) |
|
415 | ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) | |
416 | errors += 1 |
|
416 | errors += 1 | |
417 | if state in "a" and f in m1: |
|
417 | if state in "a" and f in m1: | |
418 | ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) |
|
418 | ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) | |
419 | errors += 1 |
|
419 | errors += 1 | |
420 | if state in "m" and f not in m1 and f not in m2: |
|
420 | if state in "m" and f not in m1 and f not in m2: | |
421 | ui.warn(_("%s in state %s, but not in either manifest\n") % |
|
421 | ui.warn(_("%s in state %s, but not in either manifest\n") % | |
422 | (f, state)) |
|
422 | (f, state)) | |
423 | errors += 1 |
|
423 | errors += 1 | |
424 | for f in m1: |
|
424 | for f in m1: | |
425 | state = repo.dirstate[f] |
|
425 | state = repo.dirstate[f] | |
426 | if state not in "nrm": |
|
426 | if state not in "nrm": | |
427 | ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) |
|
427 | ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) | |
428 | errors += 1 |
|
428 | errors += 1 | |
429 | if errors: |
|
429 | if errors: | |
430 | error = _(".hg/dirstate inconsistent with current parent's manifest") |
|
430 | error = _(".hg/dirstate inconsistent with current parent's manifest") | |
431 | raise error.Abort(error) |
|
431 | raise error.Abort(error) | |
432 |
|
432 | |||
433 | @command('debugcolor', |
|
433 | @command('debugcolor', | |
434 | [('', 'style', None, _('show all configured styles'))], |
|
434 | [('', 'style', None, _('show all configured styles'))], | |
435 | 'hg debugcolor') |
|
435 | 'hg debugcolor') | |
436 | def debugcolor(ui, repo, **opts): |
|
436 | def debugcolor(ui, repo, **opts): | |
437 | """show available color, effects or style""" |
|
437 | """show available color, effects or style""" | |
438 | ui.write(('color mode: %s\n') % ui._colormode) |
|
438 | ui.write(('color mode: %s\n') % ui._colormode) | |
439 | if opts.get(r'style'): |
|
439 | if opts.get(r'style'): | |
440 | return _debugdisplaystyle(ui) |
|
440 | return _debugdisplaystyle(ui) | |
441 | else: |
|
441 | else: | |
442 | return _debugdisplaycolor(ui) |
|
442 | return _debugdisplaycolor(ui) | |
443 |
|
443 | |||
444 | def _debugdisplaycolor(ui): |
|
444 | def _debugdisplaycolor(ui): | |
445 | ui = ui.copy() |
|
445 | ui = ui.copy() | |
446 | ui._styles.clear() |
|
446 | ui._styles.clear() | |
447 | for effect in color._activeeffects(ui).keys(): |
|
447 | for effect in color._activeeffects(ui).keys(): | |
448 | ui._styles[effect] = effect |
|
448 | ui._styles[effect] = effect | |
449 | if ui._terminfoparams: |
|
449 | if ui._terminfoparams: | |
450 | for k, v in ui.configitems('color'): |
|
450 | for k, v in ui.configitems('color'): | |
451 | if k.startswith('color.'): |
|
451 | if k.startswith('color.'): | |
452 | ui._styles[k] = k[6:] |
|
452 | ui._styles[k] = k[6:] | |
453 | elif k.startswith('terminfo.'): |
|
453 | elif k.startswith('terminfo.'): | |
454 | ui._styles[k] = k[9:] |
|
454 | ui._styles[k] = k[9:] | |
455 | ui.write(_('available colors:\n')) |
|
455 | ui.write(_('available colors:\n')) | |
456 | # sort label with a '_' after the other to group '_background' entry. |
|
456 | # sort label with a '_' after the other to group '_background' entry. | |
457 | items = sorted(ui._styles.items(), |
|
457 | items = sorted(ui._styles.items(), | |
458 | key=lambda i: ('_' in i[0], i[0], i[1])) |
|
458 | key=lambda i: ('_' in i[0], i[0], i[1])) | |
459 | for colorname, label in items: |
|
459 | for colorname, label in items: | |
460 | ui.write(('%s\n') % colorname, label=label) |
|
460 | ui.write(('%s\n') % colorname, label=label) | |
461 |
|
461 | |||
462 | def _debugdisplaystyle(ui): |
|
462 | def _debugdisplaystyle(ui): | |
463 | ui.write(_('available style:\n')) |
|
463 | ui.write(_('available style:\n')) | |
464 | width = max(len(s) for s in ui._styles) |
|
464 | width = max(len(s) for s in ui._styles) | |
465 | for label, effects in sorted(ui._styles.items()): |
|
465 | for label, effects in sorted(ui._styles.items()): | |
466 | ui.write('%s' % label, label=label) |
|
466 | ui.write('%s' % label, label=label) | |
467 | if effects: |
|
467 | if effects: | |
468 | # 50 |
|
468 | # 50 | |
469 | ui.write(': ') |
|
469 | ui.write(': ') | |
470 | ui.write(' ' * (max(0, width - len(label)))) |
|
470 | ui.write(' ' * (max(0, width - len(label)))) | |
471 | ui.write(', '.join(ui.label(e, e) for e in effects.split())) |
|
471 | ui.write(', '.join(ui.label(e, e) for e in effects.split())) | |
472 | ui.write('\n') |
|
472 | ui.write('\n') | |
473 |
|
473 | |||
474 | @command('debugcreatestreamclonebundle', [], 'FILE') |
|
474 | @command('debugcreatestreamclonebundle', [], 'FILE') | |
475 | def debugcreatestreamclonebundle(ui, repo, fname): |
|
475 | def debugcreatestreamclonebundle(ui, repo, fname): | |
476 | """create a stream clone bundle file |
|
476 | """create a stream clone bundle file | |
477 |
|
477 | |||
478 | Stream bundles are special bundles that are essentially archives of |
|
478 | Stream bundles are special bundles that are essentially archives of | |
479 | revlog files. They are commonly used for cloning very quickly. |
|
479 | revlog files. They are commonly used for cloning very quickly. | |
480 | """ |
|
480 | """ | |
481 | # TODO we may want to turn this into an abort when this functionality |
|
481 | # TODO we may want to turn this into an abort when this functionality | |
482 | # is moved into `hg bundle`. |
|
482 | # is moved into `hg bundle`. | |
483 | if phases.hassecret(repo): |
|
483 | if phases.hassecret(repo): | |
484 | ui.warn(_('(warning: stream clone bundle will contain secret ' |
|
484 | ui.warn(_('(warning: stream clone bundle will contain secret ' | |
485 | 'revisions)\n')) |
|
485 | 'revisions)\n')) | |
486 |
|
486 | |||
487 | requirements, gen = streamclone.generatebundlev1(repo) |
|
487 | requirements, gen = streamclone.generatebundlev1(repo) | |
488 | changegroup.writechunks(ui, gen, fname) |
|
488 | changegroup.writechunks(ui, gen, fname) | |
489 |
|
489 | |||
490 | ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements))) |
|
490 | ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements))) | |
491 |
|
491 | |||
492 | @command('debugdag', |
|
492 | @command('debugdag', | |
493 | [('t', 'tags', None, _('use tags as labels')), |
|
493 | [('t', 'tags', None, _('use tags as labels')), | |
494 | ('b', 'branches', None, _('annotate with branch names')), |
|
494 | ('b', 'branches', None, _('annotate with branch names')), | |
495 | ('', 'dots', None, _('use dots for runs')), |
|
495 | ('', 'dots', None, _('use dots for runs')), | |
496 | ('s', 'spaces', None, _('separate elements by spaces'))], |
|
496 | ('s', 'spaces', None, _('separate elements by spaces'))], | |
497 | _('[OPTION]... [FILE [REV]...]'), |
|
497 | _('[OPTION]... [FILE [REV]...]'), | |
498 | optionalrepo=True) |
|
498 | optionalrepo=True) | |
499 | def debugdag(ui, repo, file_=None, *revs, **opts): |
|
499 | def debugdag(ui, repo, file_=None, *revs, **opts): | |
500 | """format the changelog or an index DAG as a concise textual description |
|
500 | """format the changelog or an index DAG as a concise textual description | |
501 |
|
501 | |||
502 | If you pass a revlog index, the revlog's DAG is emitted. If you list |
|
502 | If you pass a revlog index, the revlog's DAG is emitted. If you list | |
503 | revision numbers, they get labeled in the output as rN. |
|
503 | revision numbers, they get labeled in the output as rN. | |
504 |
|
504 | |||
505 | Otherwise, the changelog DAG of the current repo is emitted. |
|
505 | Otherwise, the changelog DAG of the current repo is emitted. | |
506 | """ |
|
506 | """ | |
507 | spaces = opts.get(r'spaces') |
|
507 | spaces = opts.get(r'spaces') | |
508 | dots = opts.get(r'dots') |
|
508 | dots = opts.get(r'dots') | |
509 | if file_: |
|
509 | if file_: | |
510 | rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), |
|
510 | rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), | |
511 | file_) |
|
511 | file_) | |
512 | revs = set((int(r) for r in revs)) |
|
512 | revs = set((int(r) for r in revs)) | |
513 | def events(): |
|
513 | def events(): | |
514 | for r in rlog: |
|
514 | for r in rlog: | |
515 | yield 'n', (r, list(p for p in rlog.parentrevs(r) |
|
515 | yield 'n', (r, list(p for p in rlog.parentrevs(r) | |
516 | if p != -1)) |
|
516 | if p != -1)) | |
517 | if r in revs: |
|
517 | if r in revs: | |
518 | yield 'l', (r, "r%i" % r) |
|
518 | yield 'l', (r, "r%i" % r) | |
519 | elif repo: |
|
519 | elif repo: | |
520 | cl = repo.changelog |
|
520 | cl = repo.changelog | |
521 | tags = opts.get(r'tags') |
|
521 | tags = opts.get(r'tags') | |
522 | branches = opts.get(r'branches') |
|
522 | branches = opts.get(r'branches') | |
523 | if tags: |
|
523 | if tags: | |
524 | labels = {} |
|
524 | labels = {} | |
525 | for l, n in repo.tags().items(): |
|
525 | for l, n in repo.tags().items(): | |
526 | labels.setdefault(cl.rev(n), []).append(l) |
|
526 | labels.setdefault(cl.rev(n), []).append(l) | |
527 | def events(): |
|
527 | def events(): | |
528 | b = "default" |
|
528 | b = "default" | |
529 | for r in cl: |
|
529 | for r in cl: | |
530 | if branches: |
|
530 | if branches: | |
531 | newb = cl.read(cl.node(r))[5]['branch'] |
|
531 | newb = cl.read(cl.node(r))[5]['branch'] | |
532 | if newb != b: |
|
532 | if newb != b: | |
533 | yield 'a', newb |
|
533 | yield 'a', newb | |
534 | b = newb |
|
534 | b = newb | |
535 | yield 'n', (r, list(p for p in cl.parentrevs(r) |
|
535 | yield 'n', (r, list(p for p in cl.parentrevs(r) | |
536 | if p != -1)) |
|
536 | if p != -1)) | |
537 | if tags: |
|
537 | if tags: | |
538 | ls = labels.get(r) |
|
538 | ls = labels.get(r) | |
539 | if ls: |
|
539 | if ls: | |
540 | for l in ls: |
|
540 | for l in ls: | |
541 | yield 'l', (r, l) |
|
541 | yield 'l', (r, l) | |
542 | else: |
|
542 | else: | |
543 | raise error.Abort(_('need repo for changelog dag')) |
|
543 | raise error.Abort(_('need repo for changelog dag')) | |
544 |
|
544 | |||
545 | for line in dagparser.dagtextlines(events(), |
|
545 | for line in dagparser.dagtextlines(events(), | |
546 | addspaces=spaces, |
|
546 | addspaces=spaces, | |
547 | wraplabels=True, |
|
547 | wraplabels=True, | |
548 | wrapannotations=True, |
|
548 | wrapannotations=True, | |
549 | wrapnonlinear=dots, |
|
549 | wrapnonlinear=dots, | |
550 | usedots=dots, |
|
550 | usedots=dots, | |
551 | maxlinewidth=70): |
|
551 | maxlinewidth=70): | |
552 | ui.write(line) |
|
552 | ui.write(line) | |
553 | ui.write("\n") |
|
553 | ui.write("\n") | |
554 |
|
554 | |||
555 | @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV')) |
|
555 | @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV')) | |
556 | def debugdata(ui, repo, file_, rev=None, **opts): |
|
556 | def debugdata(ui, repo, file_, rev=None, **opts): | |
557 | """dump the contents of a data file revision""" |
|
557 | """dump the contents of a data file revision""" | |
558 | opts = pycompat.byteskwargs(opts) |
|
558 | opts = pycompat.byteskwargs(opts) | |
559 | if opts.get('changelog') or opts.get('manifest') or opts.get('dir'): |
|
559 | if opts.get('changelog') or opts.get('manifest') or opts.get('dir'): | |
560 | if rev is not None: |
|
560 | if rev is not None: | |
561 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
561 | raise error.CommandError('debugdata', _('invalid arguments')) | |
562 | file_, rev = None, file_ |
|
562 | file_, rev = None, file_ | |
563 | elif rev is None: |
|
563 | elif rev is None: | |
564 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
564 | raise error.CommandError('debugdata', _('invalid arguments')) | |
565 | r = cmdutil.openrevlog(repo, 'debugdata', file_, opts) |
|
565 | r = cmdutil.openrevlog(repo, 'debugdata', file_, opts) | |
566 | try: |
|
566 | try: | |
567 | ui.write(r.revision(r.lookup(rev), raw=True)) |
|
567 | ui.write(r.revision(r.lookup(rev), raw=True)) | |
568 | except KeyError: |
|
568 | except KeyError: | |
569 | raise error.Abort(_('invalid revision identifier %s') % rev) |
|
569 | raise error.Abort(_('invalid revision identifier %s') % rev) | |
570 |
|
570 | |||
571 | @command('debugdate', |
|
571 | @command('debugdate', | |
572 | [('e', 'extended', None, _('try extended date formats'))], |
|
572 | [('e', 'extended', None, _('try extended date formats'))], | |
573 | _('[-e] DATE [RANGE]'), |
|
573 | _('[-e] DATE [RANGE]'), | |
574 | norepo=True, optionalrepo=True) |
|
574 | norepo=True, optionalrepo=True) | |
575 | def debugdate(ui, date, range=None, **opts): |
|
575 | def debugdate(ui, date, range=None, **opts): | |
576 | """parse and display a date""" |
|
576 | """parse and display a date""" | |
577 | if opts[r"extended"]: |
|
577 | if opts[r"extended"]: | |
578 | d = dateutil.parsedate(date, util.extendeddateformats) |
|
578 | d = dateutil.parsedate(date, util.extendeddateformats) | |
579 | else: |
|
579 | else: | |
580 | d = dateutil.parsedate(date) |
|
580 | d = dateutil.parsedate(date) | |
581 | ui.write(("internal: %d %d\n") % d) |
|
581 | ui.write(("internal: %d %d\n") % d) | |
582 | ui.write(("standard: %s\n") % dateutil.datestr(d)) |
|
582 | ui.write(("standard: %s\n") % dateutil.datestr(d)) | |
583 | if range: |
|
583 | if range: | |
584 | m = dateutil.matchdate(range) |
|
584 | m = dateutil.matchdate(range) | |
585 | ui.write(("match: %s\n") % m(d[0])) |
|
585 | ui.write(("match: %s\n") % m(d[0])) | |
586 |
|
586 | |||
587 | @command('debugdeltachain', |
|
587 | @command('debugdeltachain', | |
588 | cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
588 | cmdutil.debugrevlogopts + cmdutil.formatteropts, | |
589 | _('-c|-m|FILE'), |
|
589 | _('-c|-m|FILE'), | |
590 | optionalrepo=True) |
|
590 | optionalrepo=True) | |
591 | def debugdeltachain(ui, repo, file_=None, **opts): |
|
591 | def debugdeltachain(ui, repo, file_=None, **opts): | |
592 | """dump information about delta chains in a revlog |
|
592 | """dump information about delta chains in a revlog | |
593 |
|
593 | |||
594 | Output can be templatized. Available template keywords are: |
|
594 | Output can be templatized. Available template keywords are: | |
595 |
|
595 | |||
596 | :``rev``: revision number |
|
596 | :``rev``: revision number | |
597 | :``chainid``: delta chain identifier (numbered by unique base) |
|
597 | :``chainid``: delta chain identifier (numbered by unique base) | |
598 | :``chainlen``: delta chain length to this revision |
|
598 | :``chainlen``: delta chain length to this revision | |
599 | :``prevrev``: previous revision in delta chain |
|
599 | :``prevrev``: previous revision in delta chain | |
600 | :``deltatype``: role of delta / how it was computed |
|
600 | :``deltatype``: role of delta / how it was computed | |
601 | :``compsize``: compressed size of revision |
|
601 | :``compsize``: compressed size of revision | |
602 | :``uncompsize``: uncompressed size of revision |
|
602 | :``uncompsize``: uncompressed size of revision | |
603 | :``chainsize``: total size of compressed revisions in chain |
|
603 | :``chainsize``: total size of compressed revisions in chain | |
604 | :``chainratio``: total chain size divided by uncompressed revision size |
|
604 | :``chainratio``: total chain size divided by uncompressed revision size | |
605 | (new delta chains typically start at ratio 2.00) |
|
605 | (new delta chains typically start at ratio 2.00) | |
606 | :``lindist``: linear distance from base revision in delta chain to end |
|
606 | :``lindist``: linear distance from base revision in delta chain to end | |
607 | of this revision |
|
607 | of this revision | |
608 | :``extradist``: total size of revisions not part of this delta chain from |
|
608 | :``extradist``: total size of revisions not part of this delta chain from | |
609 | base of delta chain to end of this revision; a measurement |
|
609 | base of delta chain to end of this revision; a measurement | |
610 | of how much extra data we need to read/seek across to read |
|
610 | of how much extra data we need to read/seek across to read | |
611 | the delta chain for this revision |
|
611 | the delta chain for this revision | |
612 | :``extraratio``: extradist divided by chainsize; another representation of |
|
612 | :``extraratio``: extradist divided by chainsize; another representation of | |
613 | how much unrelated data is needed to load this delta chain |
|
613 | how much unrelated data is needed to load this delta chain | |
614 |
|
614 | |||
615 | If the repository is configured to use the sparse read, additional keywords |
|
615 | If the repository is configured to use the sparse read, additional keywords | |
616 | are available: |
|
616 | are available: | |
617 |
|
617 | |||
618 | :``readsize``: total size of data read from the disk for a revision |
|
618 | :``readsize``: total size of data read from the disk for a revision | |
619 | (sum of the sizes of all the blocks) |
|
619 | (sum of the sizes of all the blocks) | |
620 | :``largestblock``: size of the largest block of data read from the disk |
|
620 | :``largestblock``: size of the largest block of data read from the disk | |
621 | :``readdensity``: density of useful bytes in the data read from the disk |
|
621 | :``readdensity``: density of useful bytes in the data read from the disk | |
622 | :``srchunks``: in how many data hunks the whole revision would be read |
|
622 | :``srchunks``: in how many data hunks the whole revision would be read | |
623 |
|
623 | |||
624 | The sparse read can be enabled with experimental.sparse-read = True |
|
624 | The sparse read can be enabled with experimental.sparse-read = True | |
625 | """ |
|
625 | """ | |
626 | opts = pycompat.byteskwargs(opts) |
|
626 | opts = pycompat.byteskwargs(opts) | |
627 | r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) |
|
627 | r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) | |
628 | index = r.index |
|
628 | index = r.index | |
629 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
629 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |
630 | withsparseread = getattr(r, '_withsparseread', False) |
|
630 | withsparseread = getattr(r, '_withsparseread', False) | |
631 |
|
631 | |||
632 | def revinfo(rev): |
|
632 | def revinfo(rev): | |
633 | e = index[rev] |
|
633 | e = index[rev] | |
634 | compsize = e[1] |
|
634 | compsize = e[1] | |
635 | uncompsize = e[2] |
|
635 | uncompsize = e[2] | |
636 | chainsize = 0 |
|
636 | chainsize = 0 | |
637 |
|
637 | |||
638 | if generaldelta: |
|
638 | if generaldelta: | |
639 | if e[3] == e[5]: |
|
639 | if e[3] == e[5]: | |
640 | deltatype = 'p1' |
|
640 | deltatype = 'p1' | |
641 | elif e[3] == e[6]: |
|
641 | elif e[3] == e[6]: | |
642 | deltatype = 'p2' |
|
642 | deltatype = 'p2' | |
643 | elif e[3] == rev - 1: |
|
643 | elif e[3] == rev - 1: | |
644 | deltatype = 'prev' |
|
644 | deltatype = 'prev' | |
645 | elif e[3] == rev: |
|
645 | elif e[3] == rev: | |
646 | deltatype = 'base' |
|
646 | deltatype = 'base' | |
647 | else: |
|
647 | else: | |
648 | deltatype = 'other' |
|
648 | deltatype = 'other' | |
649 | else: |
|
649 | else: | |
650 | if e[3] == rev: |
|
650 | if e[3] == rev: | |
651 | deltatype = 'base' |
|
651 | deltatype = 'base' | |
652 | else: |
|
652 | else: | |
653 | deltatype = 'prev' |
|
653 | deltatype = 'prev' | |
654 |
|
654 | |||
655 | chain = r._deltachain(rev)[0] |
|
655 | chain = r._deltachain(rev)[0] | |
656 | for iterrev in chain: |
|
656 | for iterrev in chain: | |
657 | e = index[iterrev] |
|
657 | e = index[iterrev] | |
658 | chainsize += e[1] |
|
658 | chainsize += e[1] | |
659 |
|
659 | |||
660 | return compsize, uncompsize, deltatype, chain, chainsize |
|
660 | return compsize, uncompsize, deltatype, chain, chainsize | |
661 |
|
661 | |||
662 | fm = ui.formatter('debugdeltachain', opts) |
|
662 | fm = ui.formatter('debugdeltachain', opts) | |
663 |
|
663 | |||
664 | fm.plain(' rev chain# chainlen prev delta ' |
|
664 | fm.plain(' rev chain# chainlen prev delta ' | |
665 | 'size rawsize chainsize ratio lindist extradist ' |
|
665 | 'size rawsize chainsize ratio lindist extradist ' | |
666 | 'extraratio') |
|
666 | 'extraratio') | |
667 | if withsparseread: |
|
667 | if withsparseread: | |
668 | fm.plain(' readsize largestblk rddensity srchunks') |
|
668 | fm.plain(' readsize largestblk rddensity srchunks') | |
669 | fm.plain('\n') |
|
669 | fm.plain('\n') | |
670 |
|
670 | |||
671 | chainbases = {} |
|
671 | chainbases = {} | |
672 | for rev in r: |
|
672 | for rev in r: | |
673 | comp, uncomp, deltatype, chain, chainsize = revinfo(rev) |
|
673 | comp, uncomp, deltatype, chain, chainsize = revinfo(rev) | |
674 | chainbase = chain[0] |
|
674 | chainbase = chain[0] | |
675 | chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) |
|
675 | chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) | |
676 | start = r.start |
|
676 | start = r.start | |
677 | length = r.length |
|
677 | length = r.length | |
678 | basestart = start(chainbase) |
|
678 | basestart = start(chainbase) | |
679 | revstart = start(rev) |
|
679 | revstart = start(rev) | |
680 | lineardist = revstart + comp - basestart |
|
680 | lineardist = revstart + comp - basestart | |
681 | extradist = lineardist - chainsize |
|
681 | extradist = lineardist - chainsize | |
682 | try: |
|
682 | try: | |
683 | prevrev = chain[-2] |
|
683 | prevrev = chain[-2] | |
684 | except IndexError: |
|
684 | except IndexError: | |
685 | prevrev = -1 |
|
685 | prevrev = -1 | |
686 |
|
686 | |||
687 | chainratio = float(chainsize) / float(uncomp) |
|
687 | chainratio = float(chainsize) / float(uncomp) | |
688 | extraratio = float(extradist) / float(chainsize) |
|
688 | extraratio = float(extradist) / float(chainsize) | |
689 |
|
689 | |||
690 | fm.startitem() |
|
690 | fm.startitem() | |
691 | fm.write('rev chainid chainlen prevrev deltatype compsize ' |
|
691 | fm.write('rev chainid chainlen prevrev deltatype compsize ' | |
692 | 'uncompsize chainsize chainratio lindist extradist ' |
|
692 | 'uncompsize chainsize chainratio lindist extradist ' | |
693 | 'extraratio', |
|
693 | 'extraratio', | |
694 | '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', |
|
694 | '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', | |
695 | rev, chainid, len(chain), prevrev, deltatype, comp, |
|
695 | rev, chainid, len(chain), prevrev, deltatype, comp, | |
696 | uncomp, chainsize, chainratio, lineardist, extradist, |
|
696 | uncomp, chainsize, chainratio, lineardist, extradist, | |
697 | extraratio, |
|
697 | extraratio, | |
698 | rev=rev, chainid=chainid, chainlen=len(chain), |
|
698 | rev=rev, chainid=chainid, chainlen=len(chain), | |
699 | prevrev=prevrev, deltatype=deltatype, compsize=comp, |
|
699 | prevrev=prevrev, deltatype=deltatype, compsize=comp, | |
700 | uncompsize=uncomp, chainsize=chainsize, |
|
700 | uncompsize=uncomp, chainsize=chainsize, | |
701 | chainratio=chainratio, lindist=lineardist, |
|
701 | chainratio=chainratio, lindist=lineardist, | |
702 | extradist=extradist, extraratio=extraratio) |
|
702 | extradist=extradist, extraratio=extraratio) | |
703 | if withsparseread: |
|
703 | if withsparseread: | |
704 | readsize = 0 |
|
704 | readsize = 0 | |
705 | largestblock = 0 |
|
705 | largestblock = 0 | |
706 | srchunks = 0 |
|
706 | srchunks = 0 | |
707 |
|
707 | |||
708 | for revschunk in revlog._slicechunk(r, chain): |
|
708 | for revschunk in revlog._slicechunk(r, chain): | |
709 | srchunks += 1 |
|
709 | srchunks += 1 | |
710 | blkend = start(revschunk[-1]) + length(revschunk[-1]) |
|
710 | blkend = start(revschunk[-1]) + length(revschunk[-1]) | |
711 | blksize = blkend - start(revschunk[0]) |
|
711 | blksize = blkend - start(revschunk[0]) | |
712 |
|
712 | |||
713 | readsize += blksize |
|
713 | readsize += blksize | |
714 | if largestblock < blksize: |
|
714 | if largestblock < blksize: | |
715 | largestblock = blksize |
|
715 | largestblock = blksize | |
716 |
|
716 | |||
717 | readdensity = float(chainsize) / float(readsize) |
|
717 | readdensity = float(chainsize) / float(readsize) | |
718 |
|
718 | |||
719 | fm.write('readsize largestblock readdensity srchunks', |
|
719 | fm.write('readsize largestblock readdensity srchunks', | |
720 | ' %10d %10d %9.5f %8d', |
|
720 | ' %10d %10d %9.5f %8d', | |
721 | readsize, largestblock, readdensity, srchunks, |
|
721 | readsize, largestblock, readdensity, srchunks, | |
722 | readsize=readsize, largestblock=largestblock, |
|
722 | readsize=readsize, largestblock=largestblock, | |
723 | readdensity=readdensity, srchunks=srchunks) |
|
723 | readdensity=readdensity, srchunks=srchunks) | |
724 |
|
724 | |||
725 | fm.plain('\n') |
|
725 | fm.plain('\n') | |
726 |
|
726 | |||
727 | fm.end() |
|
727 | fm.end() | |
728 |
|
728 | |||
729 | @command('debugdirstate|debugstate', |
|
729 | @command('debugdirstate|debugstate', | |
730 | [('', 'nodates', None, _('do not display the saved mtime')), |
|
730 | [('', 'nodates', None, _('do not display the saved mtime')), | |
731 | ('', 'datesort', None, _('sort by saved mtime'))], |
|
731 | ('', 'datesort', None, _('sort by saved mtime'))], | |
732 | _('[OPTION]...')) |
|
732 | _('[OPTION]...')) | |
733 | def debugstate(ui, repo, **opts): |
|
733 | def debugstate(ui, repo, **opts): | |
734 | """show the contents of the current dirstate""" |
|
734 | """show the contents of the current dirstate""" | |
735 |
|
735 | |||
736 | nodates = opts.get(r'nodates') |
|
736 | nodates = opts.get(r'nodates') | |
737 | datesort = opts.get(r'datesort') |
|
737 | datesort = opts.get(r'datesort') | |
738 |
|
738 | |||
739 | timestr = "" |
|
739 | timestr = "" | |
740 | if datesort: |
|
740 | if datesort: | |
741 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename |
|
741 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename | |
742 | else: |
|
742 | else: | |
743 | keyfunc = None # sort by filename |
|
743 | keyfunc = None # sort by filename | |
744 | for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): |
|
744 | for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): | |
745 | if ent[3] == -1: |
|
745 | if ent[3] == -1: | |
746 | timestr = 'unset ' |
|
746 | timestr = 'unset ' | |
747 | elif nodates: |
|
747 | elif nodates: | |
748 | timestr = 'set ' |
|
748 | timestr = 'set ' | |
749 | else: |
|
749 | else: | |
750 | timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ", |
|
750 | timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ", | |
751 | time.localtime(ent[3])) |
|
751 | time.localtime(ent[3])) | |
752 | timestr = encoding.strtolocal(timestr) |
|
752 | timestr = encoding.strtolocal(timestr) | |
753 | if ent[1] & 0o20000: |
|
753 | if ent[1] & 0o20000: | |
754 | mode = 'lnk' |
|
754 | mode = 'lnk' | |
755 | else: |
|
755 | else: | |
756 | mode = '%3o' % (ent[1] & 0o777 & ~util.umask) |
|
756 | mode = '%3o' % (ent[1] & 0o777 & ~util.umask) | |
757 | ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) |
|
757 | ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) | |
758 | for f in repo.dirstate.copies(): |
|
758 | for f in repo.dirstate.copies(): | |
759 | ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
759 | ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) | |
760 |
|
760 | |||
761 | @command('debugdiscovery', |
|
761 | @command('debugdiscovery', | |
762 | [('', 'old', None, _('use old-style discovery')), |
|
762 | [('', 'old', None, _('use old-style discovery')), | |
763 | ('', 'nonheads', None, |
|
763 | ('', 'nonheads', None, | |
764 | _('use old-style discovery with non-heads included')), |
|
764 | _('use old-style discovery with non-heads included')), | |
765 | ('', 'rev', [], 'restrict discovery to this set of revs'), |
|
765 | ('', 'rev', [], 'restrict discovery to this set of revs'), | |
766 | ] + cmdutil.remoteopts, |
|
766 | ] + cmdutil.remoteopts, | |
767 | _('[--rev REV] [OTHER]')) |
|
767 | _('[--rev REV] [OTHER]')) | |
768 | def debugdiscovery(ui, repo, remoteurl="default", **opts): |
|
768 | def debugdiscovery(ui, repo, remoteurl="default", **opts): | |
769 | """runs the changeset discovery protocol in isolation""" |
|
769 | """runs the changeset discovery protocol in isolation""" | |
770 | opts = pycompat.byteskwargs(opts) |
|
770 | opts = pycompat.byteskwargs(opts) | |
771 | remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) |
|
771 | remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) | |
772 | remote = hg.peer(repo, opts, remoteurl) |
|
772 | remote = hg.peer(repo, opts, remoteurl) | |
773 | ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) |
|
773 | ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) | |
774 |
|
774 | |||
775 | # make sure tests are repeatable |
|
775 | # make sure tests are repeatable | |
776 | random.seed(12323) |
|
776 | random.seed(12323) | |
777 |
|
777 | |||
778 | def doit(pushedrevs, remoteheads, remote=remote): |
|
778 | def doit(pushedrevs, remoteheads, remote=remote): | |
779 | if opts.get('old'): |
|
779 | if opts.get('old'): | |
780 | if not util.safehasattr(remote, 'branches'): |
|
780 | if not util.safehasattr(remote, 'branches'): | |
781 | # enable in-client legacy support |
|
781 | # enable in-client legacy support | |
782 | remote = localrepo.locallegacypeer(remote.local()) |
|
782 | remote = localrepo.locallegacypeer(remote.local()) | |
783 | common, _in, hds = treediscovery.findcommonincoming(repo, remote, |
|
783 | common, _in, hds = treediscovery.findcommonincoming(repo, remote, | |
784 | force=True) |
|
784 | force=True) | |
785 | common = set(common) |
|
785 | common = set(common) | |
786 | if not opts.get('nonheads'): |
|
786 | if not opts.get('nonheads'): | |
787 | ui.write(("unpruned common: %s\n") % |
|
787 | ui.write(("unpruned common: %s\n") % | |
788 | " ".join(sorted(short(n) for n in common))) |
|
788 | " ".join(sorted(short(n) for n in common))) | |
789 | dag = dagutil.revlogdag(repo.changelog) |
|
789 | dag = dagutil.revlogdag(repo.changelog) | |
790 | all = dag.ancestorset(dag.internalizeall(common)) |
|
790 | all = dag.ancestorset(dag.internalizeall(common)) | |
791 | common = dag.externalizeall(dag.headsetofconnecteds(all)) |
|
791 | common = dag.externalizeall(dag.headsetofconnecteds(all)) | |
792 | else: |
|
792 | else: | |
793 | nodes = None |
|
793 | nodes = None | |
794 | if pushedrevs: |
|
794 | if pushedrevs: | |
795 | revs = scmutil.revrange(repo, pushedrevs) |
|
795 | revs = scmutil.revrange(repo, pushedrevs) | |
796 | nodes = [repo[r].node() for r in revs] |
|
796 | nodes = [repo[r].node() for r in revs] | |
797 | common, any, hds = setdiscovery.findcommonheads(ui, repo, remote, |
|
797 | common, any, hds = setdiscovery.findcommonheads(ui, repo, remote, | |
798 | ancestorsof=nodes) |
|
798 | ancestorsof=nodes) | |
799 | common = set(common) |
|
799 | common = set(common) | |
800 | rheads = set(hds) |
|
800 | rheads = set(hds) | |
801 | lheads = set(repo.heads()) |
|
801 | lheads = set(repo.heads()) | |
802 | ui.write(("common heads: %s\n") % |
|
802 | ui.write(("common heads: %s\n") % | |
803 | " ".join(sorted(short(n) for n in common))) |
|
803 | " ".join(sorted(short(n) for n in common))) | |
804 | if lheads <= common: |
|
804 | if lheads <= common: | |
805 | ui.write(("local is subset\n")) |
|
805 | ui.write(("local is subset\n")) | |
806 | elif rheads <= common: |
|
806 | elif rheads <= common: | |
807 | ui.write(("remote is subset\n")) |
|
807 | ui.write(("remote is subset\n")) | |
808 |
|
808 | |||
809 | remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) |
|
809 | remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) | |
810 | localrevs = opts['rev'] |
|
810 | localrevs = opts['rev'] | |
811 | doit(localrevs, remoterevs) |
|
811 | doit(localrevs, remoterevs) | |
812 |
|
812 | |||
813 | _chunksize = 4 << 10 |
|
813 | _chunksize = 4 << 10 | |
814 |
|
814 | |||
815 | @command('debugdownload', |
|
815 | @command('debugdownload', | |
816 | [ |
|
816 | [ | |
817 | ('o', 'output', '', _('path')), |
|
817 | ('o', 'output', '', _('path')), | |
818 | ], |
|
818 | ], | |
819 | optionalrepo=True) |
|
819 | optionalrepo=True) | |
820 | def debugdownload(ui, repo, url, output=None, **opts): |
|
820 | def debugdownload(ui, repo, url, output=None, **opts): | |
821 | """download a resource using Mercurial logic and config |
|
821 | """download a resource using Mercurial logic and config | |
822 | """ |
|
822 | """ | |
823 | fh = urlmod.open(ui, url, output) |
|
823 | fh = urlmod.open(ui, url, output) | |
824 |
|
824 | |||
825 | dest = ui |
|
825 | dest = ui | |
826 | if output: |
|
826 | if output: | |
827 | dest = open(output, "wb", _chunksize) |
|
827 | dest = open(output, "wb", _chunksize) | |
828 | try: |
|
828 | try: | |
829 | data = fh.read(_chunksize) |
|
829 | data = fh.read(_chunksize) | |
830 | while data: |
|
830 | while data: | |
831 | dest.write(data) |
|
831 | dest.write(data) | |
832 | data = fh.read(_chunksize) |
|
832 | data = fh.read(_chunksize) | |
833 | finally: |
|
833 | finally: | |
834 | if output: |
|
834 | if output: | |
835 | dest.close() |
|
835 | dest.close() | |
836 |
|
836 | |||
837 | @command('debugextensions', cmdutil.formatteropts, [], norepo=True) |
|
837 | @command('debugextensions', cmdutil.formatteropts, [], norepo=True) | |
838 | def debugextensions(ui, **opts): |
|
838 | def debugextensions(ui, **opts): | |
839 | '''show information about active extensions''' |
|
839 | '''show information about active extensions''' | |
840 | opts = pycompat.byteskwargs(opts) |
|
840 | opts = pycompat.byteskwargs(opts) | |
841 | exts = extensions.extensions(ui) |
|
841 | exts = extensions.extensions(ui) | |
842 | hgver = util.version() |
|
842 | hgver = util.version() | |
843 | fm = ui.formatter('debugextensions', opts) |
|
843 | fm = ui.formatter('debugextensions', opts) | |
844 | for extname, extmod in sorted(exts, key=operator.itemgetter(0)): |
|
844 | for extname, extmod in sorted(exts, key=operator.itemgetter(0)): | |
845 | isinternal = extensions.ismoduleinternal(extmod) |
|
845 | isinternal = extensions.ismoduleinternal(extmod) | |
846 | extsource = pycompat.fsencode(extmod.__file__) |
|
846 | extsource = pycompat.fsencode(extmod.__file__) | |
847 | if isinternal: |
|
847 | if isinternal: | |
848 | exttestedwith = [] # never expose magic string to users |
|
848 | exttestedwith = [] # never expose magic string to users | |
849 | else: |
|
849 | else: | |
850 | exttestedwith = getattr(extmod, 'testedwith', '').split() |
|
850 | exttestedwith = getattr(extmod, 'testedwith', '').split() | |
851 | extbuglink = getattr(extmod, 'buglink', None) |
|
851 | extbuglink = getattr(extmod, 'buglink', None) | |
852 |
|
852 | |||
853 | fm.startitem() |
|
853 | fm.startitem() | |
854 |
|
854 | |||
855 | if ui.quiet or ui.verbose: |
|
855 | if ui.quiet or ui.verbose: | |
856 | fm.write('name', '%s\n', extname) |
|
856 | fm.write('name', '%s\n', extname) | |
857 | else: |
|
857 | else: | |
858 | fm.write('name', '%s', extname) |
|
858 | fm.write('name', '%s', extname) | |
859 | if isinternal or hgver in exttestedwith: |
|
859 | if isinternal or hgver in exttestedwith: | |
860 | fm.plain('\n') |
|
860 | fm.plain('\n') | |
861 | elif not exttestedwith: |
|
861 | elif not exttestedwith: | |
862 | fm.plain(_(' (untested!)\n')) |
|
862 | fm.plain(_(' (untested!)\n')) | |
863 | else: |
|
863 | else: | |
864 | lasttestedversion = exttestedwith[-1] |
|
864 | lasttestedversion = exttestedwith[-1] | |
865 | fm.plain(' (%s!)\n' % lasttestedversion) |
|
865 | fm.plain(' (%s!)\n' % lasttestedversion) | |
866 |
|
866 | |||
867 | fm.condwrite(ui.verbose and extsource, 'source', |
|
867 | fm.condwrite(ui.verbose and extsource, 'source', | |
868 | _(' location: %s\n'), extsource or "") |
|
868 | _(' location: %s\n'), extsource or "") | |
869 |
|
869 | |||
870 | if ui.verbose: |
|
870 | if ui.verbose: | |
871 | fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal]) |
|
871 | fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal]) | |
872 | fm.data(bundled=isinternal) |
|
872 | fm.data(bundled=isinternal) | |
873 |
|
873 | |||
874 | fm.condwrite(ui.verbose and exttestedwith, 'testedwith', |
|
874 | fm.condwrite(ui.verbose and exttestedwith, 'testedwith', | |
875 | _(' tested with: %s\n'), |
|
875 | _(' tested with: %s\n'), | |
876 | fm.formatlist(exttestedwith, name='ver')) |
|
876 | fm.formatlist(exttestedwith, name='ver')) | |
877 |
|
877 | |||
878 | fm.condwrite(ui.verbose and extbuglink, 'buglink', |
|
878 | fm.condwrite(ui.verbose and extbuglink, 'buglink', | |
879 | _(' bug reporting: %s\n'), extbuglink or "") |
|
879 | _(' bug reporting: %s\n'), extbuglink or "") | |
880 |
|
880 | |||
881 | fm.end() |
|
881 | fm.end() | |
882 |
|
882 | |||
883 | @command('debugfileset', |
|
883 | @command('debugfileset', | |
884 | [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))], |
|
884 | [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))], | |
885 | _('[-r REV] FILESPEC')) |
|
885 | _('[-r REV] FILESPEC')) | |
886 | def debugfileset(ui, repo, expr, **opts): |
|
886 | def debugfileset(ui, repo, expr, **opts): | |
887 | '''parse and apply a fileset specification''' |
|
887 | '''parse and apply a fileset specification''' | |
888 | ctx = scmutil.revsingle(repo, opts.get(r'rev'), None) |
|
888 | ctx = scmutil.revsingle(repo, opts.get(r'rev'), None) | |
889 | if ui.verbose: |
|
889 | if ui.verbose: | |
890 | tree = fileset.parse(expr) |
|
890 | tree = fileset.parse(expr) | |
891 | ui.note(fileset.prettyformat(tree), "\n") |
|
891 | ui.note(fileset.prettyformat(tree), "\n") | |
892 |
|
892 | |||
893 | for f in ctx.getfileset(expr): |
|
893 | for f in ctx.getfileset(expr): | |
894 | ui.write("%s\n" % f) |
|
894 | ui.write("%s\n" % f) | |
895 |
|
895 | |||
896 | @command('debugformat', |
|
896 | @command('debugformat', | |
897 | [] + cmdutil.formatteropts, |
|
897 | [] + cmdutil.formatteropts, | |
898 | _('')) |
|
898 | _('')) | |
899 | def debugformat(ui, repo, **opts): |
|
899 | def debugformat(ui, repo, **opts): | |
900 | """display format information about the current repository |
|
900 | """display format information about the current repository | |
901 |
|
901 | |||
902 | Use --verbose to get extra information about current config value and |
|
902 | Use --verbose to get extra information about current config value and | |
903 | Mercurial default.""" |
|
903 | Mercurial default.""" | |
904 | opts = pycompat.byteskwargs(opts) |
|
904 | opts = pycompat.byteskwargs(opts) | |
905 | maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) |
|
905 | maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) | |
906 | maxvariantlength = max(len('format-variant'), maxvariantlength) |
|
906 | maxvariantlength = max(len('format-variant'), maxvariantlength) | |
907 |
|
907 | |||
908 | def makeformatname(name): |
|
908 | def makeformatname(name): | |
909 | return '%s:' + (' ' * (maxvariantlength - len(name))) |
|
909 | return '%s:' + (' ' * (maxvariantlength - len(name))) | |
910 |
|
910 | |||
911 | fm = ui.formatter('debugformat', opts) |
|
911 | fm = ui.formatter('debugformat', opts) | |
912 | if fm.isplain(): |
|
912 | if fm.isplain(): | |
913 | def formatvalue(value): |
|
913 | def formatvalue(value): | |
914 | if util.safehasattr(value, 'startswith'): |
|
914 | if util.safehasattr(value, 'startswith'): | |
915 | return value |
|
915 | return value | |
916 | if value: |
|
916 | if value: | |
917 | return 'yes' |
|
917 | return 'yes' | |
918 | else: |
|
918 | else: | |
919 | return 'no' |
|
919 | return 'no' | |
920 | else: |
|
920 | else: | |
921 | formatvalue = pycompat.identity |
|
921 | formatvalue = pycompat.identity | |
922 |
|
922 | |||
923 | fm.plain('format-variant') |
|
923 | fm.plain('format-variant') | |
924 | fm.plain(' ' * (maxvariantlength - len('format-variant'))) |
|
924 | fm.plain(' ' * (maxvariantlength - len('format-variant'))) | |
925 | fm.plain(' repo') |
|
925 | fm.plain(' repo') | |
926 | if ui.verbose: |
|
926 | if ui.verbose: | |
927 | fm.plain(' config default') |
|
927 | fm.plain(' config default') | |
928 | fm.plain('\n') |
|
928 | fm.plain('\n') | |
929 | for fv in upgrade.allformatvariant: |
|
929 | for fv in upgrade.allformatvariant: | |
930 | fm.startitem() |
|
930 | fm.startitem() | |
931 | repovalue = fv.fromrepo(repo) |
|
931 | repovalue = fv.fromrepo(repo) | |
932 | configvalue = fv.fromconfig(repo) |
|
932 | configvalue = fv.fromconfig(repo) | |
933 |
|
933 | |||
934 | if repovalue != configvalue: |
|
934 | if repovalue != configvalue: | |
935 | namelabel = 'formatvariant.name.mismatchconfig' |
|
935 | namelabel = 'formatvariant.name.mismatchconfig' | |
936 | repolabel = 'formatvariant.repo.mismatchconfig' |
|
936 | repolabel = 'formatvariant.repo.mismatchconfig' | |
937 | elif repovalue != fv.default: |
|
937 | elif repovalue != fv.default: | |
938 | namelabel = 'formatvariant.name.mismatchdefault' |
|
938 | namelabel = 'formatvariant.name.mismatchdefault' | |
939 | repolabel = 'formatvariant.repo.mismatchdefault' |
|
939 | repolabel = 'formatvariant.repo.mismatchdefault' | |
940 | else: |
|
940 | else: | |
941 | namelabel = 'formatvariant.name.uptodate' |
|
941 | namelabel = 'formatvariant.name.uptodate' | |
942 | repolabel = 'formatvariant.repo.uptodate' |
|
942 | repolabel = 'formatvariant.repo.uptodate' | |
943 |
|
943 | |||
944 | fm.write('name', makeformatname(fv.name), fv.name, |
|
944 | fm.write('name', makeformatname(fv.name), fv.name, | |
945 | label=namelabel) |
|
945 | label=namelabel) | |
946 | fm.write('repo', ' %3s', formatvalue(repovalue), |
|
946 | fm.write('repo', ' %3s', formatvalue(repovalue), | |
947 | label=repolabel) |
|
947 | label=repolabel) | |
948 | if fv.default != configvalue: |
|
948 | if fv.default != configvalue: | |
949 | configlabel = 'formatvariant.config.special' |
|
949 | configlabel = 'formatvariant.config.special' | |
950 | else: |
|
950 | else: | |
951 | configlabel = 'formatvariant.config.default' |
|
951 | configlabel = 'formatvariant.config.default' | |
952 | fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue), |
|
952 | fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue), | |
953 | label=configlabel) |
|
953 | label=configlabel) | |
954 | fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default), |
|
954 | fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default), | |
955 | label='formatvariant.default') |
|
955 | label='formatvariant.default') | |
956 | fm.plain('\n') |
|
956 | fm.plain('\n') | |
957 | fm.end() |
|
957 | fm.end() | |
958 |
|
958 | |||
959 | @command('debugfsinfo', [], _('[PATH]'), norepo=True) |
|
959 | @command('debugfsinfo', [], _('[PATH]'), norepo=True) | |
960 | def debugfsinfo(ui, path="."): |
|
960 | def debugfsinfo(ui, path="."): | |
961 | """show information detected about current filesystem""" |
|
961 | """show information detected about current filesystem""" | |
962 | ui.write(('path: %s\n') % path) |
|
962 | ui.write(('path: %s\n') % path) | |
963 | ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)')) |
|
963 | ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)')) | |
964 | ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) |
|
964 | ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) | |
965 | ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)')) |
|
965 | ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)')) | |
966 | ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) |
|
966 | ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) | |
967 | ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no')) |
|
967 | ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no')) | |
968 | casesensitive = '(unknown)' |
|
968 | casesensitive = '(unknown)' | |
969 | try: |
|
969 | try: | |
970 | with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f: |
|
970 | with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f: | |
971 | casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no' |
|
971 | casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no' | |
972 | except OSError: |
|
972 | except OSError: | |
973 | pass |
|
973 | pass | |
974 | ui.write(('case-sensitive: %s\n') % casesensitive) |
|
974 | ui.write(('case-sensitive: %s\n') % casesensitive) | |
975 |
|
975 | |||
976 | @command('debuggetbundle', |
|
976 | @command('debuggetbundle', | |
977 | [('H', 'head', [], _('id of head node'), _('ID')), |
|
977 | [('H', 'head', [], _('id of head node'), _('ID')), | |
978 | ('C', 'common', [], _('id of common node'), _('ID')), |
|
978 | ('C', 'common', [], _('id of common node'), _('ID')), | |
979 | ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))], |
|
979 | ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))], | |
980 | _('REPO FILE [-H|-C ID]...'), |
|
980 | _('REPO FILE [-H|-C ID]...'), | |
981 | norepo=True) |
|
981 | norepo=True) | |
982 | def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): |
|
982 | def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): | |
983 | """retrieves a bundle from a repo |
|
983 | """retrieves a bundle from a repo | |
984 |
|
984 | |||
985 | Every ID must be a full-length hex node id string. Saves the bundle to the |
|
985 | Every ID must be a full-length hex node id string. Saves the bundle to the | |
986 | given file. |
|
986 | given file. | |
987 | """ |
|
987 | """ | |
988 | opts = pycompat.byteskwargs(opts) |
|
988 | opts = pycompat.byteskwargs(opts) | |
989 | repo = hg.peer(ui, opts, repopath) |
|
989 | repo = hg.peer(ui, opts, repopath) | |
990 | if not repo.capable('getbundle'): |
|
990 | if not repo.capable('getbundle'): | |
991 | raise error.Abort("getbundle() not supported by target repository") |
|
991 | raise error.Abort("getbundle() not supported by target repository") | |
992 | args = {} |
|
992 | args = {} | |
993 | if common: |
|
993 | if common: | |
994 | args[r'common'] = [bin(s) for s in common] |
|
994 | args[r'common'] = [bin(s) for s in common] | |
995 | if head: |
|
995 | if head: | |
996 | args[r'heads'] = [bin(s) for s in head] |
|
996 | args[r'heads'] = [bin(s) for s in head] | |
997 | # TODO: get desired bundlecaps from command line. |
|
997 | # TODO: get desired bundlecaps from command line. | |
998 | args[r'bundlecaps'] = None |
|
998 | args[r'bundlecaps'] = None | |
999 | bundle = repo.getbundle('debug', **args) |
|
999 | bundle = repo.getbundle('debug', **args) | |
1000 |
|
1000 | |||
1001 | bundletype = opts.get('type', 'bzip2').lower() |
|
1001 | bundletype = opts.get('type', 'bzip2').lower() | |
1002 | btypes = {'none': 'HG10UN', |
|
1002 | btypes = {'none': 'HG10UN', | |
1003 | 'bzip2': 'HG10BZ', |
|
1003 | 'bzip2': 'HG10BZ', | |
1004 | 'gzip': 'HG10GZ', |
|
1004 | 'gzip': 'HG10GZ', | |
1005 | 'bundle2': 'HG20'} |
|
1005 | 'bundle2': 'HG20'} | |
1006 | bundletype = btypes.get(bundletype) |
|
1006 | bundletype = btypes.get(bundletype) | |
1007 | if bundletype not in bundle2.bundletypes: |
|
1007 | if bundletype not in bundle2.bundletypes: | |
1008 | raise error.Abort(_('unknown bundle type specified with --type')) |
|
1008 | raise error.Abort(_('unknown bundle type specified with --type')) | |
1009 | bundle2.writebundle(ui, bundle, bundlepath, bundletype) |
|
1009 | bundle2.writebundle(ui, bundle, bundlepath, bundletype) | |
1010 |
|
1010 | |||
1011 | @command('debugignore', [], '[FILE]') |
|
1011 | @command('debugignore', [], '[FILE]') | |
1012 | def debugignore(ui, repo, *files, **opts): |
|
1012 | def debugignore(ui, repo, *files, **opts): | |
1013 | """display the combined ignore pattern and information about ignored files |
|
1013 | """display the combined ignore pattern and information about ignored files | |
1014 |
|
1014 | |||
1015 | With no argument display the combined ignore pattern. |
|
1015 | With no argument display the combined ignore pattern. | |
1016 |
|
1016 | |||
1017 | Given space separated file names, shows if the given file is ignored and |
|
1017 | Given space separated file names, shows if the given file is ignored and | |
1018 | if so, show the ignore rule (file and line number) that matched it. |
|
1018 | if so, show the ignore rule (file and line number) that matched it. | |
1019 | """ |
|
1019 | """ | |
1020 | ignore = repo.dirstate._ignore |
|
1020 | ignore = repo.dirstate._ignore | |
1021 | if not files: |
|
1021 | if not files: | |
1022 | # Show all the patterns |
|
1022 | # Show all the patterns | |
1023 | ui.write("%s\n" % pycompat.byterepr(ignore)) |
|
1023 | ui.write("%s\n" % pycompat.byterepr(ignore)) | |
1024 | else: |
|
1024 | else: | |
1025 | m = scmutil.match(repo[None], pats=files) |
|
1025 | m = scmutil.match(repo[None], pats=files) | |
1026 | for f in m.files(): |
|
1026 | for f in m.files(): | |
1027 | nf = util.normpath(f) |
|
1027 | nf = util.normpath(f) | |
1028 | ignored = None |
|
1028 | ignored = None | |
1029 | ignoredata = None |
|
1029 | ignoredata = None | |
1030 | if nf != '.': |
|
1030 | if nf != '.': | |
1031 | if ignore(nf): |
|
1031 | if ignore(nf): | |
1032 | ignored = nf |
|
1032 | ignored = nf | |
1033 | ignoredata = repo.dirstate._ignorefileandline(nf) |
|
1033 | ignoredata = repo.dirstate._ignorefileandline(nf) | |
1034 | else: |
|
1034 | else: | |
1035 | for p in util.finddirs(nf): |
|
1035 | for p in util.finddirs(nf): | |
1036 | if ignore(p): |
|
1036 | if ignore(p): | |
1037 | ignored = p |
|
1037 | ignored = p | |
1038 | ignoredata = repo.dirstate._ignorefileandline(p) |
|
1038 | ignoredata = repo.dirstate._ignorefileandline(p) | |
1039 | break |
|
1039 | break | |
1040 | if ignored: |
|
1040 | if ignored: | |
1041 | if ignored == nf: |
|
1041 | if ignored == nf: | |
1042 | ui.write(_("%s is ignored\n") % m.uipath(f)) |
|
1042 | ui.write(_("%s is ignored\n") % m.uipath(f)) | |
1043 | else: |
|
1043 | else: | |
1044 | ui.write(_("%s is ignored because of " |
|
1044 | ui.write(_("%s is ignored because of " | |
1045 | "containing folder %s\n") |
|
1045 | "containing folder %s\n") | |
1046 | % (m.uipath(f), ignored)) |
|
1046 | % (m.uipath(f), ignored)) | |
1047 | ignorefile, lineno, line = ignoredata |
|
1047 | ignorefile, lineno, line = ignoredata | |
1048 | ui.write(_("(ignore rule in %s, line %d: '%s')\n") |
|
1048 | ui.write(_("(ignore rule in %s, line %d: '%s')\n") | |
1049 | % (ignorefile, lineno, line)) |
|
1049 | % (ignorefile, lineno, line)) | |
1050 | else: |
|
1050 | else: | |
1051 | ui.write(_("%s is not ignored\n") % m.uipath(f)) |
|
1051 | ui.write(_("%s is not ignored\n") % m.uipath(f)) | |
1052 |
|
1052 | |||
1053 | @command('debugindex', cmdutil.debugrevlogopts + |
|
1053 | @command('debugindex', cmdutil.debugrevlogopts + | |
1054 | [('f', 'format', 0, _('revlog format'), _('FORMAT'))], |
|
1054 | [('f', 'format', 0, _('revlog format'), _('FORMAT'))], | |
1055 | _('[-f FORMAT] -c|-m|FILE'), |
|
1055 | _('[-f FORMAT] -c|-m|FILE'), | |
1056 | optionalrepo=True) |
|
1056 | optionalrepo=True) | |
1057 | def debugindex(ui, repo, file_=None, **opts): |
|
1057 | def debugindex(ui, repo, file_=None, **opts): | |
1058 | """dump the contents of an index file""" |
|
1058 | """dump the contents of an index file""" | |
1059 | opts = pycompat.byteskwargs(opts) |
|
1059 | opts = pycompat.byteskwargs(opts) | |
1060 | r = cmdutil.openrevlog(repo, 'debugindex', file_, opts) |
|
1060 | r = cmdutil.openrevlog(repo, 'debugindex', file_, opts) | |
1061 | format = opts.get('format', 0) |
|
1061 | format = opts.get('format', 0) | |
1062 | if format not in (0, 1): |
|
1062 | if format not in (0, 1): | |
1063 | raise error.Abort(_("unknown format %d") % format) |
|
1063 | raise error.Abort(_("unknown format %d") % format) | |
1064 |
|
1064 | |||
1065 | if ui.debugflag: |
|
1065 | if ui.debugflag: | |
1066 | shortfn = hex |
|
1066 | shortfn = hex | |
1067 | else: |
|
1067 | else: | |
1068 | shortfn = short |
|
1068 | shortfn = short | |
1069 |
|
1069 | |||
1070 | # There might not be anything in r, so have a sane default |
|
1070 | # There might not be anything in r, so have a sane default | |
1071 | idlen = 12 |
|
1071 | idlen = 12 | |
1072 | for i in r: |
|
1072 | for i in r: | |
1073 | idlen = len(shortfn(r.node(i))) |
|
1073 | idlen = len(shortfn(r.node(i))) | |
1074 | break |
|
1074 | break | |
1075 |
|
1075 | |||
1076 | if format == 0: |
|
1076 | if format == 0: | |
1077 | if ui.verbose: |
|
1077 | if ui.verbose: | |
1078 | ui.write((" rev offset length linkrev" |
|
1078 | ui.write((" rev offset length linkrev" | |
1079 | " %s %s p2\n") % ("nodeid".ljust(idlen), |
|
1079 | " %s %s p2\n") % ("nodeid".ljust(idlen), | |
1080 | "p1".ljust(idlen))) |
|
1080 | "p1".ljust(idlen))) | |
1081 | else: |
|
1081 | else: | |
1082 | ui.write((" rev linkrev %s %s p2\n") % ( |
|
1082 | ui.write((" rev linkrev %s %s p2\n") % ( | |
1083 | "nodeid".ljust(idlen), "p1".ljust(idlen))) |
|
1083 | "nodeid".ljust(idlen), "p1".ljust(idlen))) | |
1084 | elif format == 1: |
|
1084 | elif format == 1: | |
1085 | if ui.verbose: |
|
1085 | if ui.verbose: | |
1086 | ui.write((" rev flag offset length size link p1" |
|
1086 | ui.write((" rev flag offset length size link p1" | |
1087 | " p2 %s\n") % "nodeid".rjust(idlen)) |
|
1087 | " p2 %s\n") % "nodeid".rjust(idlen)) | |
1088 | else: |
|
1088 | else: | |
1089 | ui.write((" rev flag size link p1 p2 %s\n") % |
|
1089 | ui.write((" rev flag size link p1 p2 %s\n") % | |
1090 | "nodeid".rjust(idlen)) |
|
1090 | "nodeid".rjust(idlen)) | |
1091 |
|
1091 | |||
1092 | for i in r: |
|
1092 | for i in r: | |
1093 | node = r.node(i) |
|
1093 | node = r.node(i) | |
1094 | if format == 0: |
|
1094 | if format == 0: | |
1095 | try: |
|
1095 | try: | |
1096 | pp = r.parents(node) |
|
1096 | pp = r.parents(node) | |
1097 | except Exception: |
|
1097 | except Exception: | |
1098 | pp = [nullid, nullid] |
|
1098 | pp = [nullid, nullid] | |
1099 | if ui.verbose: |
|
1099 | if ui.verbose: | |
1100 | ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % ( |
|
1100 | ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % ( | |
1101 | i, r.start(i), r.length(i), r.linkrev(i), |
|
1101 | i, r.start(i), r.length(i), r.linkrev(i), | |
1102 | shortfn(node), shortfn(pp[0]), shortfn(pp[1]))) |
|
1102 | shortfn(node), shortfn(pp[0]), shortfn(pp[1]))) | |
1103 | else: |
|
1103 | else: | |
1104 | ui.write("% 6d % 7d %s %s %s\n" % ( |
|
1104 | ui.write("% 6d % 7d %s %s %s\n" % ( | |
1105 | i, r.linkrev(i), shortfn(node), shortfn(pp[0]), |
|
1105 | i, r.linkrev(i), shortfn(node), shortfn(pp[0]), | |
1106 | shortfn(pp[1]))) |
|
1106 | shortfn(pp[1]))) | |
1107 | elif format == 1: |
|
1107 | elif format == 1: | |
1108 | pr = r.parentrevs(i) |
|
1108 | pr = r.parentrevs(i) | |
1109 | if ui.verbose: |
|
1109 | if ui.verbose: | |
1110 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % ( |
|
1110 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % ( | |
1111 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
1111 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), | |
1112 | r.linkrev(i), pr[0], pr[1], shortfn(node))) |
|
1112 | r.linkrev(i), pr[0], pr[1], shortfn(node))) | |
1113 | else: |
|
1113 | else: | |
1114 | ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % ( |
|
1114 | ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % ( | |
1115 | i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1], |
|
1115 | i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1], | |
1116 | shortfn(node))) |
|
1116 | shortfn(node))) | |
1117 |
|
1117 | |||
1118 | @command('debugindexdot', cmdutil.debugrevlogopts, |
|
1118 | @command('debugindexdot', cmdutil.debugrevlogopts, | |
1119 | _('-c|-m|FILE'), optionalrepo=True) |
|
1119 | _('-c|-m|FILE'), optionalrepo=True) | |
1120 | def debugindexdot(ui, repo, file_=None, **opts): |
|
1120 | def debugindexdot(ui, repo, file_=None, **opts): | |
1121 | """dump an index DAG as a graphviz dot file""" |
|
1121 | """dump an index DAG as a graphviz dot file""" | |
1122 | opts = pycompat.byteskwargs(opts) |
|
1122 | opts = pycompat.byteskwargs(opts) | |
1123 | r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts) |
|
1123 | r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts) | |
1124 | ui.write(("digraph G {\n")) |
|
1124 | ui.write(("digraph G {\n")) | |
1125 | for i in r: |
|
1125 | for i in r: | |
1126 | node = r.node(i) |
|
1126 | node = r.node(i) | |
1127 | pp = r.parents(node) |
|
1127 | pp = r.parents(node) | |
1128 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
1128 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) | |
1129 | if pp[1] != nullid: |
|
1129 | if pp[1] != nullid: | |
1130 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
1130 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) | |
1131 | ui.write("}\n") |
|
1131 | ui.write("}\n") | |
1132 |
|
1132 | |||
1133 | @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True) |
|
1133 | @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True) | |
1134 | def debuginstall(ui, **opts): |
|
1134 | def debuginstall(ui, **opts): | |
1135 | '''test Mercurial installation |
|
1135 | '''test Mercurial installation | |
1136 |
|
1136 | |||
1137 | Returns 0 on success. |
|
1137 | Returns 0 on success. | |
1138 | ''' |
|
1138 | ''' | |
1139 | opts = pycompat.byteskwargs(opts) |
|
1139 | opts = pycompat.byteskwargs(opts) | |
1140 |
|
1140 | |||
1141 | def writetemp(contents): |
|
1141 | def writetemp(contents): | |
1142 | (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") |
|
1142 | (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") | |
1143 | f = os.fdopen(fd, r"wb") |
|
1143 | f = os.fdopen(fd, r"wb") | |
1144 | f.write(contents) |
|
1144 | f.write(contents) | |
1145 | f.close() |
|
1145 | f.close() | |
1146 | return name |
|
1146 | return name | |
1147 |
|
1147 | |||
1148 | problems = 0 |
|
1148 | problems = 0 | |
1149 |
|
1149 | |||
1150 | fm = ui.formatter('debuginstall', opts) |
|
1150 | fm = ui.formatter('debuginstall', opts) | |
1151 | fm.startitem() |
|
1151 | fm.startitem() | |
1152 |
|
1152 | |||
1153 | # encoding |
|
1153 | # encoding | |
1154 | fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) |
|
1154 | fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) | |
1155 | err = None |
|
1155 | err = None | |
1156 | try: |
|
1156 | try: | |
1157 | codecs.lookup(pycompat.sysstr(encoding.encoding)) |
|
1157 | codecs.lookup(pycompat.sysstr(encoding.encoding)) | |
1158 | except LookupError as inst: |
|
1158 | except LookupError as inst: | |
1159 | err = stringutil.forcebytestr(inst) |
|
1159 | err = stringutil.forcebytestr(inst) | |
1160 | problems += 1 |
|
1160 | problems += 1 | |
1161 | fm.condwrite(err, 'encodingerror', _(" %s\n" |
|
1161 | fm.condwrite(err, 'encodingerror', _(" %s\n" | |
1162 | " (check that your locale is properly set)\n"), err) |
|
1162 | " (check that your locale is properly set)\n"), err) | |
1163 |
|
1163 | |||
1164 | # Python |
|
1164 | # Python | |
1165 | fm.write('pythonexe', _("checking Python executable (%s)\n"), |
|
1165 | fm.write('pythonexe', _("checking Python executable (%s)\n"), | |
1166 | pycompat.sysexecutable) |
|
1166 | pycompat.sysexecutable) | |
1167 | fm.write('pythonver', _("checking Python version (%s)\n"), |
|
1167 | fm.write('pythonver', _("checking Python version (%s)\n"), | |
1168 | ("%d.%d.%d" % sys.version_info[:3])) |
|
1168 | ("%d.%d.%d" % sys.version_info[:3])) | |
1169 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), |
|
1169 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), | |
1170 | os.path.dirname(pycompat.fsencode(os.__file__))) |
|
1170 | os.path.dirname(pycompat.fsencode(os.__file__))) | |
1171 |
|
1171 | |||
1172 | security = set(sslutil.supportedprotocols) |
|
1172 | security = set(sslutil.supportedprotocols) | |
1173 | if sslutil.hassni: |
|
1173 | if sslutil.hassni: | |
1174 | security.add('sni') |
|
1174 | security.add('sni') | |
1175 |
|
1175 | |||
1176 | fm.write('pythonsecurity', _("checking Python security support (%s)\n"), |
|
1176 | fm.write('pythonsecurity', _("checking Python security support (%s)\n"), | |
1177 | fm.formatlist(sorted(security), name='protocol', |
|
1177 | fm.formatlist(sorted(security), name='protocol', | |
1178 | fmt='%s', sep=',')) |
|
1178 | fmt='%s', sep=',')) | |
1179 |
|
1179 | |||
1180 | # These are warnings, not errors. So don't increment problem count. This |
|
1180 | # These are warnings, not errors. So don't increment problem count. This | |
1181 | # may change in the future. |
|
1181 | # may change in the future. | |
1182 | if 'tls1.2' not in security: |
|
1182 | if 'tls1.2' not in security: | |
1183 | fm.plain(_(' TLS 1.2 not supported by Python install; ' |
|
1183 | fm.plain(_(' TLS 1.2 not supported by Python install; ' | |
1184 | 'network connections lack modern security\n')) |
|
1184 | 'network connections lack modern security\n')) | |
1185 | if 'sni' not in security: |
|
1185 | if 'sni' not in security: | |
1186 | fm.plain(_(' SNI not supported by Python install; may have ' |
|
1186 | fm.plain(_(' SNI not supported by Python install; may have ' | |
1187 | 'connectivity issues with some servers\n')) |
|
1187 | 'connectivity issues with some servers\n')) | |
1188 |
|
1188 | |||
1189 | # TODO print CA cert info |
|
1189 | # TODO print CA cert info | |
1190 |
|
1190 | |||
1191 | # hg version |
|
1191 | # hg version | |
1192 | hgver = util.version() |
|
1192 | hgver = util.version() | |
1193 | fm.write('hgver', _("checking Mercurial version (%s)\n"), |
|
1193 | fm.write('hgver', _("checking Mercurial version (%s)\n"), | |
1194 | hgver.split('+')[0]) |
|
1194 | hgver.split('+')[0]) | |
1195 | fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), |
|
1195 | fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), | |
1196 | '+'.join(hgver.split('+')[1:])) |
|
1196 | '+'.join(hgver.split('+')[1:])) | |
1197 |
|
1197 | |||
1198 | # compiled modules |
|
1198 | # compiled modules | |
1199 | fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), |
|
1199 | fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), | |
1200 | policy.policy) |
|
1200 | policy.policy) | |
1201 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), |
|
1201 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), | |
1202 | os.path.dirname(pycompat.fsencode(__file__))) |
|
1202 | os.path.dirname(pycompat.fsencode(__file__))) | |
1203 |
|
1203 | |||
1204 | if policy.policy in ('c', 'allow'): |
|
1204 | if policy.policy in ('c', 'allow'): | |
1205 | err = None |
|
1205 | err = None | |
1206 | try: |
|
1206 | try: | |
1207 | from .cext import ( |
|
1207 | from .cext import ( | |
1208 | base85, |
|
1208 | base85, | |
1209 | bdiff, |
|
1209 | bdiff, | |
1210 | mpatch, |
|
1210 | mpatch, | |
1211 | osutil, |
|
1211 | osutil, | |
1212 | ) |
|
1212 | ) | |
1213 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes |
|
1213 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes | |
1214 | except Exception as inst: |
|
1214 | except Exception as inst: | |
1215 | err = stringutil.forcebytestr(inst) |
|
1215 | err = stringutil.forcebytestr(inst) | |
1216 | problems += 1 |
|
1216 | problems += 1 | |
1217 | fm.condwrite(err, 'extensionserror', " %s\n", err) |
|
1217 | fm.condwrite(err, 'extensionserror', " %s\n", err) | |
1218 |
|
1218 | |||
1219 | compengines = util.compengines._engines.values() |
|
1219 | compengines = util.compengines._engines.values() | |
1220 | fm.write('compengines', _('checking registered compression engines (%s)\n'), |
|
1220 | fm.write('compengines', _('checking registered compression engines (%s)\n'), | |
1221 | fm.formatlist(sorted(e.name() for e in compengines), |
|
1221 | fm.formatlist(sorted(e.name() for e in compengines), | |
1222 | name='compengine', fmt='%s', sep=', ')) |
|
1222 | name='compengine', fmt='%s', sep=', ')) | |
1223 | fm.write('compenginesavail', _('checking available compression engines ' |
|
1223 | fm.write('compenginesavail', _('checking available compression engines ' | |
1224 | '(%s)\n'), |
|
1224 | '(%s)\n'), | |
1225 | fm.formatlist(sorted(e.name() for e in compengines |
|
1225 | fm.formatlist(sorted(e.name() for e in compengines | |
1226 | if e.available()), |
|
1226 | if e.available()), | |
1227 | name='compengine', fmt='%s', sep=', ')) |
|
1227 | name='compengine', fmt='%s', sep=', ')) | |
1228 | wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE) |
|
1228 | wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE) | |
1229 | fm.write('compenginesserver', _('checking available compression engines ' |
|
1229 | fm.write('compenginesserver', _('checking available compression engines ' | |
1230 | 'for wire protocol (%s)\n'), |
|
1230 | 'for wire protocol (%s)\n'), | |
1231 | fm.formatlist([e.name() for e in wirecompengines |
|
1231 | fm.formatlist([e.name() for e in wirecompengines | |
1232 | if e.wireprotosupport()], |
|
1232 | if e.wireprotosupport()], | |
1233 | name='compengine', fmt='%s', sep=', ')) |
|
1233 | name='compengine', fmt='%s', sep=', ')) | |
1234 | re2 = 'missing' |
|
1234 | re2 = 'missing' | |
1235 | if util._re2: |
|
1235 | if util._re2: | |
1236 | re2 = 'available' |
|
1236 | re2 = 'available' | |
1237 | fm.plain(_('checking "re2" regexp engine (%s)\n') % re2) |
|
1237 | fm.plain(_('checking "re2" regexp engine (%s)\n') % re2) | |
1238 | fm.data(re2=bool(util._re2)) |
|
1238 | fm.data(re2=bool(util._re2)) | |
1239 |
|
1239 | |||
1240 | # templates |
|
1240 | # templates | |
1241 | p = templater.templatepaths() |
|
1241 | p = templater.templatepaths() | |
1242 | fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) |
|
1242 | fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) | |
1243 | fm.condwrite(not p, '', _(" no template directories found\n")) |
|
1243 | fm.condwrite(not p, '', _(" no template directories found\n")) | |
1244 | if p: |
|
1244 | if p: | |
1245 | m = templater.templatepath("map-cmdline.default") |
|
1245 | m = templater.templatepath("map-cmdline.default") | |
1246 | if m: |
|
1246 | if m: | |
1247 | # template found, check if it is working |
|
1247 | # template found, check if it is working | |
1248 | err = None |
|
1248 | err = None | |
1249 | try: |
|
1249 | try: | |
1250 | templater.templater.frommapfile(m) |
|
1250 | templater.templater.frommapfile(m) | |
1251 | except Exception as inst: |
|
1251 | except Exception as inst: | |
1252 | err = stringutil.forcebytestr(inst) |
|
1252 | err = stringutil.forcebytestr(inst) | |
1253 | p = None |
|
1253 | p = None | |
1254 | fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) |
|
1254 | fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) | |
1255 | else: |
|
1255 | else: | |
1256 | p = None |
|
1256 | p = None | |
1257 | fm.condwrite(p, 'defaulttemplate', |
|
1257 | fm.condwrite(p, 'defaulttemplate', | |
1258 | _("checking default template (%s)\n"), m) |
|
1258 | _("checking default template (%s)\n"), m) | |
1259 | fm.condwrite(not m, 'defaulttemplatenotfound', |
|
1259 | fm.condwrite(not m, 'defaulttemplatenotfound', | |
1260 | _(" template '%s' not found\n"), "default") |
|
1260 | _(" template '%s' not found\n"), "default") | |
1261 | if not p: |
|
1261 | if not p: | |
1262 | problems += 1 |
|
1262 | problems += 1 | |
1263 | fm.condwrite(not p, '', |
|
1263 | fm.condwrite(not p, '', | |
1264 | _(" (templates seem to have been installed incorrectly)\n")) |
|
1264 | _(" (templates seem to have been installed incorrectly)\n")) | |
1265 |
|
1265 | |||
1266 | # editor |
|
1266 | # editor | |
1267 | editor = ui.geteditor() |
|
1267 | editor = ui.geteditor() | |
1268 | editor = util.expandpath(editor) |
|
1268 | editor = util.expandpath(editor) | |
1269 | editorbin = procutil.shellsplit(editor)[0] |
|
1269 | editorbin = procutil.shellsplit(editor)[0] | |
1270 | fm.write('editor', _("checking commit editor... (%s)\n"), editorbin) |
|
1270 | fm.write('editor', _("checking commit editor... (%s)\n"), editorbin) | |
1271 | cmdpath = procutil.findexe(editorbin) |
|
1271 | cmdpath = procutil.findexe(editorbin) | |
1272 | fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', |
|
1272 | fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', | |
1273 | _(" No commit editor set and can't find %s in PATH\n" |
|
1273 | _(" No commit editor set and can't find %s in PATH\n" | |
1274 | " (specify a commit editor in your configuration" |
|
1274 | " (specify a commit editor in your configuration" | |
1275 | " file)\n"), not cmdpath and editor == 'vi' and editorbin) |
|
1275 | " file)\n"), not cmdpath and editor == 'vi' and editorbin) | |
1276 | fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', |
|
1276 | fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', | |
1277 | _(" Can't find editor '%s' in PATH\n" |
|
1277 | _(" Can't find editor '%s' in PATH\n" | |
1278 | " (specify a commit editor in your configuration" |
|
1278 | " (specify a commit editor in your configuration" | |
1279 | " file)\n"), not cmdpath and editorbin) |
|
1279 | " file)\n"), not cmdpath and editorbin) | |
1280 | if not cmdpath and editor != 'vi': |
|
1280 | if not cmdpath and editor != 'vi': | |
1281 | problems += 1 |
|
1281 | problems += 1 | |
1282 |
|
1282 | |||
1283 | # check username |
|
1283 | # check username | |
1284 | username = None |
|
1284 | username = None | |
1285 | err = None |
|
1285 | err = None | |
1286 | try: |
|
1286 | try: | |
1287 | username = ui.username() |
|
1287 | username = ui.username() | |
1288 | except error.Abort as e: |
|
1288 | except error.Abort as e: | |
1289 | err = stringutil.forcebytestr(e) |
|
1289 | err = stringutil.forcebytestr(e) | |
1290 | problems += 1 |
|
1290 | problems += 1 | |
1291 |
|
1291 | |||
1292 | fm.condwrite(username, 'username', _("checking username (%s)\n"), username) |
|
1292 | fm.condwrite(username, 'username', _("checking username (%s)\n"), username) | |
1293 | fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" |
|
1293 | fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" | |
1294 | " (specify a username in your configuration file)\n"), err) |
|
1294 | " (specify a username in your configuration file)\n"), err) | |
1295 |
|
1295 | |||
1296 | fm.condwrite(not problems, '', |
|
1296 | fm.condwrite(not problems, '', | |
1297 | _("no problems detected\n")) |
|
1297 | _("no problems detected\n")) | |
1298 | if not problems: |
|
1298 | if not problems: | |
1299 | fm.data(problems=problems) |
|
1299 | fm.data(problems=problems) | |
1300 | fm.condwrite(problems, 'problems', |
|
1300 | fm.condwrite(problems, 'problems', | |
1301 | _("%d problems detected," |
|
1301 | _("%d problems detected," | |
1302 | " please check your install!\n"), problems) |
|
1302 | " please check your install!\n"), problems) | |
1303 | fm.end() |
|
1303 | fm.end() | |
1304 |
|
1304 | |||
1305 | return problems |
|
1305 | return problems | |
1306 |
|
1306 | |||
1307 | @command('debugknown', [], _('REPO ID...'), norepo=True) |
|
1307 | @command('debugknown', [], _('REPO ID...'), norepo=True) | |
1308 | def debugknown(ui, repopath, *ids, **opts): |
|
1308 | def debugknown(ui, repopath, *ids, **opts): | |
1309 | """test whether node ids are known to a repo |
|
1309 | """test whether node ids are known to a repo | |
1310 |
|
1310 | |||
1311 | Every ID must be a full-length hex node id string. Returns a list of 0s |
|
1311 | Every ID must be a full-length hex node id string. Returns a list of 0s | |
1312 | and 1s indicating unknown/known. |
|
1312 | and 1s indicating unknown/known. | |
1313 | """ |
|
1313 | """ | |
1314 | opts = pycompat.byteskwargs(opts) |
|
1314 | opts = pycompat.byteskwargs(opts) | |
1315 | repo = hg.peer(ui, opts, repopath) |
|
1315 | repo = hg.peer(ui, opts, repopath) | |
1316 | if not repo.capable('known'): |
|
1316 | if not repo.capable('known'): | |
1317 | raise error.Abort("known() not supported by target repository") |
|
1317 | raise error.Abort("known() not supported by target repository") | |
1318 | flags = repo.known([bin(s) for s in ids]) |
|
1318 | flags = repo.known([bin(s) for s in ids]) | |
1319 | ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) |
|
1319 | ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) | |
1320 |
|
1320 | |||
1321 | @command('debuglabelcomplete', [], _('LABEL...')) |
|
1321 | @command('debuglabelcomplete', [], _('LABEL...')) | |
1322 | def debuglabelcomplete(ui, repo, *args): |
|
1322 | def debuglabelcomplete(ui, repo, *args): | |
1323 | '''backwards compatibility with old bash completion scripts (DEPRECATED)''' |
|
1323 | '''backwards compatibility with old bash completion scripts (DEPRECATED)''' | |
1324 | debugnamecomplete(ui, repo, *args) |
|
1324 | debugnamecomplete(ui, repo, *args) | |
1325 |
|
1325 | |||
1326 | @command('debuglocks', |
|
1326 | @command('debuglocks', | |
1327 | [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), |
|
1327 | [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), | |
1328 | ('W', 'force-wlock', None, |
|
1328 | ('W', 'force-wlock', None, | |
1329 | _('free the working state lock (DANGEROUS)')), |
|
1329 | _('free the working state lock (DANGEROUS)')), | |
1330 | ('s', 'set-lock', None, _('set the store lock until stopped')), |
|
1330 | ('s', 'set-lock', None, _('set the store lock until stopped')), | |
1331 | ('S', 'set-wlock', None, |
|
1331 | ('S', 'set-wlock', None, | |
1332 | _('set the working state lock until stopped'))], |
|
1332 | _('set the working state lock until stopped'))], | |
1333 | _('[OPTION]...')) |
|
1333 | _('[OPTION]...')) | |
1334 | def debuglocks(ui, repo, **opts): |
|
1334 | def debuglocks(ui, repo, **opts): | |
1335 | """show or modify state of locks |
|
1335 | """show or modify state of locks | |
1336 |
|
1336 | |||
1337 | By default, this command will show which locks are held. This |
|
1337 | By default, this command will show which locks are held. This | |
1338 | includes the user and process holding the lock, the amount of time |
|
1338 | includes the user and process holding the lock, the amount of time | |
1339 | the lock has been held, and the machine name where the process is |
|
1339 | the lock has been held, and the machine name where the process is | |
1340 | running if it's not local. |
|
1340 | running if it's not local. | |
1341 |
|
1341 | |||
1342 | Locks protect the integrity of Mercurial's data, so should be |
|
1342 | Locks protect the integrity of Mercurial's data, so should be | |
1343 | treated with care. System crashes or other interruptions may cause |
|
1343 | treated with care. System crashes or other interruptions may cause | |
1344 | locks to not be properly released, though Mercurial will usually |
|
1344 | locks to not be properly released, though Mercurial will usually | |
1345 | detect and remove such stale locks automatically. |
|
1345 | detect and remove such stale locks automatically. | |
1346 |
|
1346 | |||
1347 | However, detecting stale locks may not always be possible (for |
|
1347 | However, detecting stale locks may not always be possible (for | |
1348 | instance, on a shared filesystem). Removing locks may also be |
|
1348 | instance, on a shared filesystem). Removing locks may also be | |
1349 | blocked by filesystem permissions. |
|
1349 | blocked by filesystem permissions. | |
1350 |
|
1350 | |||
1351 | Setting a lock will prevent other commands from changing the data. |
|
1351 | Setting a lock will prevent other commands from changing the data. | |
1352 | The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. |
|
1352 | The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. | |
1353 | The set locks are removed when the command exits. |
|
1353 | The set locks are removed when the command exits. | |
1354 |
|
1354 | |||
1355 | Returns 0 if no locks are held. |
|
1355 | Returns 0 if no locks are held. | |
1356 |
|
1356 | |||
1357 | """ |
|
1357 | """ | |
1358 |
|
1358 | |||
1359 | if opts.get(r'force_lock'): |
|
1359 | if opts.get(r'force_lock'): | |
1360 | repo.svfs.unlink('lock') |
|
1360 | repo.svfs.unlink('lock') | |
1361 | if opts.get(r'force_wlock'): |
|
1361 | if opts.get(r'force_wlock'): | |
1362 | repo.vfs.unlink('wlock') |
|
1362 | repo.vfs.unlink('wlock') | |
1363 | if opts.get(r'force_lock') or opts.get(r'force_wlock'): |
|
1363 | if opts.get(r'force_lock') or opts.get(r'force_wlock'): | |
1364 | return 0 |
|
1364 | return 0 | |
1365 |
|
1365 | |||
1366 | locks = [] |
|
1366 | locks = [] | |
1367 | try: |
|
1367 | try: | |
1368 | if opts.get(r'set_wlock'): |
|
1368 | if opts.get(r'set_wlock'): | |
1369 | try: |
|
1369 | try: | |
1370 | locks.append(repo.wlock(False)) |
|
1370 | locks.append(repo.wlock(False)) | |
1371 | except error.LockHeld: |
|
1371 | except error.LockHeld: | |
1372 | raise error.Abort(_('wlock is already held')) |
|
1372 | raise error.Abort(_('wlock is already held')) | |
1373 | if opts.get(r'set_lock'): |
|
1373 | if opts.get(r'set_lock'): | |
1374 | try: |
|
1374 | try: | |
1375 | locks.append(repo.lock(False)) |
|
1375 | locks.append(repo.lock(False)) | |
1376 | except error.LockHeld: |
|
1376 | except error.LockHeld: | |
1377 | raise error.Abort(_('lock is already held')) |
|
1377 | raise error.Abort(_('lock is already held')) | |
1378 | if len(locks): |
|
1378 | if len(locks): | |
1379 | ui.promptchoice(_("ready to release the lock (y)? $$ &Yes")) |
|
1379 | ui.promptchoice(_("ready to release the lock (y)? $$ &Yes")) | |
1380 | return 0 |
|
1380 | return 0 | |
1381 | finally: |
|
1381 | finally: | |
1382 | release(*locks) |
|
1382 | release(*locks) | |
1383 |
|
1383 | |||
1384 | now = time.time() |
|
1384 | now = time.time() | |
1385 | held = 0 |
|
1385 | held = 0 | |
1386 |
|
1386 | |||
1387 | def report(vfs, name, method): |
|
1387 | def report(vfs, name, method): | |
1388 | # this causes stale locks to get reaped for more accurate reporting |
|
1388 | # this causes stale locks to get reaped for more accurate reporting | |
1389 | try: |
|
1389 | try: | |
1390 | l = method(False) |
|
1390 | l = method(False) | |
1391 | except error.LockHeld: |
|
1391 | except error.LockHeld: | |
1392 | l = None |
|
1392 | l = None | |
1393 |
|
1393 | |||
1394 | if l: |
|
1394 | if l: | |
1395 | l.release() |
|
1395 | l.release() | |
1396 | else: |
|
1396 | else: | |
1397 | try: |
|
1397 | try: | |
1398 | st = vfs.lstat(name) |
|
1398 | st = vfs.lstat(name) | |
1399 | age = now - st[stat.ST_MTIME] |
|
1399 | age = now - st[stat.ST_MTIME] | |
1400 | user = util.username(st.st_uid) |
|
1400 | user = util.username(st.st_uid) | |
1401 | locker = vfs.readlock(name) |
|
1401 | locker = vfs.readlock(name) | |
1402 | if ":" in locker: |
|
1402 | if ":" in locker: | |
1403 | host, pid = locker.split(':') |
|
1403 | host, pid = locker.split(':') | |
1404 | if host == socket.gethostname(): |
|
1404 | if host == socket.gethostname(): | |
1405 | locker = 'user %s, process %s' % (user, pid) |
|
1405 | locker = 'user %s, process %s' % (user, pid) | |
1406 | else: |
|
1406 | else: | |
1407 | locker = 'user %s, process %s, host %s' \ |
|
1407 | locker = 'user %s, process %s, host %s' \ | |
1408 | % (user, pid, host) |
|
1408 | % (user, pid, host) | |
1409 | ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) |
|
1409 | ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) | |
1410 | return 1 |
|
1410 | return 1 | |
1411 | except OSError as e: |
|
1411 | except OSError as e: | |
1412 | if e.errno != errno.ENOENT: |
|
1412 | if e.errno != errno.ENOENT: | |
1413 | raise |
|
1413 | raise | |
1414 |
|
1414 | |||
1415 | ui.write(("%-6s free\n") % (name + ":")) |
|
1415 | ui.write(("%-6s free\n") % (name + ":")) | |
1416 | return 0 |
|
1416 | return 0 | |
1417 |
|
1417 | |||
1418 | held += report(repo.svfs, "lock", repo.lock) |
|
1418 | held += report(repo.svfs, "lock", repo.lock) | |
1419 | held += report(repo.vfs, "wlock", repo.wlock) |
|
1419 | held += report(repo.vfs, "wlock", repo.wlock) | |
1420 |
|
1420 | |||
1421 | return held |
|
1421 | return held | |
1422 |
|
1422 | |||
1423 | @command('debugmergestate', [], '') |
|
1423 | @command('debugmergestate', [], '') | |
1424 | def debugmergestate(ui, repo, *args): |
|
1424 | def debugmergestate(ui, repo, *args): | |
1425 | """print merge state |
|
1425 | """print merge state | |
1426 |
|
1426 | |||
1427 | Use --verbose to print out information about whether v1 or v2 merge state |
|
1427 | Use --verbose to print out information about whether v1 or v2 merge state | |
1428 | was chosen.""" |
|
1428 | was chosen.""" | |
1429 | def _hashornull(h): |
|
1429 | def _hashornull(h): | |
1430 | if h == nullhex: |
|
1430 | if h == nullhex: | |
1431 | return 'null' |
|
1431 | return 'null' | |
1432 | else: |
|
1432 | else: | |
1433 | return h |
|
1433 | return h | |
1434 |
|
1434 | |||
1435 | def printrecords(version): |
|
1435 | def printrecords(version): | |
1436 | ui.write(('* version %d records\n') % version) |
|
1436 | ui.write(('* version %d records\n') % version) | |
1437 | if version == 1: |
|
1437 | if version == 1: | |
1438 | records = v1records |
|
1438 | records = v1records | |
1439 | else: |
|
1439 | else: | |
1440 | records = v2records |
|
1440 | records = v2records | |
1441 |
|
1441 | |||
1442 | for rtype, record in records: |
|
1442 | for rtype, record in records: | |
1443 | # pretty print some record types |
|
1443 | # pretty print some record types | |
1444 | if rtype == 'L': |
|
1444 | if rtype == 'L': | |
1445 | ui.write(('local: %s\n') % record) |
|
1445 | ui.write(('local: %s\n') % record) | |
1446 | elif rtype == 'O': |
|
1446 | elif rtype == 'O': | |
1447 | ui.write(('other: %s\n') % record) |
|
1447 | ui.write(('other: %s\n') % record) | |
1448 | elif rtype == 'm': |
|
1448 | elif rtype == 'm': | |
1449 | driver, mdstate = record.split('\0', 1) |
|
1449 | driver, mdstate = record.split('\0', 1) | |
1450 | ui.write(('merge driver: %s (state "%s")\n') |
|
1450 | ui.write(('merge driver: %s (state "%s")\n') | |
1451 | % (driver, mdstate)) |
|
1451 | % (driver, mdstate)) | |
1452 | elif rtype in 'FDC': |
|
1452 | elif rtype in 'FDC': | |
1453 | r = record.split('\0') |
|
1453 | r = record.split('\0') | |
1454 | f, state, hash, lfile, afile, anode, ofile = r[0:7] |
|
1454 | f, state, hash, lfile, afile, anode, ofile = r[0:7] | |
1455 | if version == 1: |
|
1455 | if version == 1: | |
1456 | onode = 'not stored in v1 format' |
|
1456 | onode = 'not stored in v1 format' | |
1457 | flags = r[7] |
|
1457 | flags = r[7] | |
1458 | else: |
|
1458 | else: | |
1459 | onode, flags = r[7:9] |
|
1459 | onode, flags = r[7:9] | |
1460 | ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') |
|
1460 | ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') | |
1461 | % (f, rtype, state, _hashornull(hash))) |
|
1461 | % (f, rtype, state, _hashornull(hash))) | |
1462 | ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) |
|
1462 | ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) | |
1463 | ui.write((' ancestor path: %s (node %s)\n') |
|
1463 | ui.write((' ancestor path: %s (node %s)\n') | |
1464 | % (afile, _hashornull(anode))) |
|
1464 | % (afile, _hashornull(anode))) | |
1465 | ui.write((' other path: %s (node %s)\n') |
|
1465 | ui.write((' other path: %s (node %s)\n') | |
1466 | % (ofile, _hashornull(onode))) |
|
1466 | % (ofile, _hashornull(onode))) | |
1467 | elif rtype == 'f': |
|
1467 | elif rtype == 'f': | |
1468 | filename, rawextras = record.split('\0', 1) |
|
1468 | filename, rawextras = record.split('\0', 1) | |
1469 | extras = rawextras.split('\0') |
|
1469 | extras = rawextras.split('\0') | |
1470 | i = 0 |
|
1470 | i = 0 | |
1471 | extrastrings = [] |
|
1471 | extrastrings = [] | |
1472 | while i < len(extras): |
|
1472 | while i < len(extras): | |
1473 | extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) |
|
1473 | extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) | |
1474 | i += 2 |
|
1474 | i += 2 | |
1475 |
|
1475 | |||
1476 | ui.write(('file extras: %s (%s)\n') |
|
1476 | ui.write(('file extras: %s (%s)\n') | |
1477 | % (filename, ', '.join(extrastrings))) |
|
1477 | % (filename, ', '.join(extrastrings))) | |
1478 | elif rtype == 'l': |
|
1478 | elif rtype == 'l': | |
1479 | labels = record.split('\0', 2) |
|
1479 | labels = record.split('\0', 2) | |
1480 | labels = [l for l in labels if len(l) > 0] |
|
1480 | labels = [l for l in labels if len(l) > 0] | |
1481 | ui.write(('labels:\n')) |
|
1481 | ui.write(('labels:\n')) | |
1482 | ui.write((' local: %s\n' % labels[0])) |
|
1482 | ui.write((' local: %s\n' % labels[0])) | |
1483 | ui.write((' other: %s\n' % labels[1])) |
|
1483 | ui.write((' other: %s\n' % labels[1])) | |
1484 | if len(labels) > 2: |
|
1484 | if len(labels) > 2: | |
1485 | ui.write((' base: %s\n' % labels[2])) |
|
1485 | ui.write((' base: %s\n' % labels[2])) | |
1486 | else: |
|
1486 | else: | |
1487 | ui.write(('unrecognized entry: %s\t%s\n') |
|
1487 | ui.write(('unrecognized entry: %s\t%s\n') | |
1488 | % (rtype, record.replace('\0', '\t'))) |
|
1488 | % (rtype, record.replace('\0', '\t'))) | |
1489 |
|
1489 | |||
1490 | # Avoid mergestate.read() since it may raise an exception for unsupported |
|
1490 | # Avoid mergestate.read() since it may raise an exception for unsupported | |
1491 | # merge state records. We shouldn't be doing this, but this is OK since this |
|
1491 | # merge state records. We shouldn't be doing this, but this is OK since this | |
1492 | # command is pretty low-level. |
|
1492 | # command is pretty low-level. | |
1493 | ms = mergemod.mergestate(repo) |
|
1493 | ms = mergemod.mergestate(repo) | |
1494 |
|
1494 | |||
1495 | # sort so that reasonable information is on top |
|
1495 | # sort so that reasonable information is on top | |
1496 | v1records = ms._readrecordsv1() |
|
1496 | v1records = ms._readrecordsv1() | |
1497 | v2records = ms._readrecordsv2() |
|
1497 | v2records = ms._readrecordsv2() | |
1498 | order = 'LOml' |
|
1498 | order = 'LOml' | |
1499 | def key(r): |
|
1499 | def key(r): | |
1500 | idx = order.find(r[0]) |
|
1500 | idx = order.find(r[0]) | |
1501 | if idx == -1: |
|
1501 | if idx == -1: | |
1502 | return (1, r[1]) |
|
1502 | return (1, r[1]) | |
1503 | else: |
|
1503 | else: | |
1504 | return (0, idx) |
|
1504 | return (0, idx) | |
1505 | v1records.sort(key=key) |
|
1505 | v1records.sort(key=key) | |
1506 | v2records.sort(key=key) |
|
1506 | v2records.sort(key=key) | |
1507 |
|
1507 | |||
1508 | if not v1records and not v2records: |
|
1508 | if not v1records and not v2records: | |
1509 | ui.write(('no merge state found\n')) |
|
1509 | ui.write(('no merge state found\n')) | |
1510 | elif not v2records: |
|
1510 | elif not v2records: | |
1511 | ui.note(('no version 2 merge state\n')) |
|
1511 | ui.note(('no version 2 merge state\n')) | |
1512 | printrecords(1) |
|
1512 | printrecords(1) | |
1513 | elif ms._v1v2match(v1records, v2records): |
|
1513 | elif ms._v1v2match(v1records, v2records): | |
1514 | ui.note(('v1 and v2 states match: using v2\n')) |
|
1514 | ui.note(('v1 and v2 states match: using v2\n')) | |
1515 | printrecords(2) |
|
1515 | printrecords(2) | |
1516 | else: |
|
1516 | else: | |
1517 | ui.note(('v1 and v2 states mismatch: using v1\n')) |
|
1517 | ui.note(('v1 and v2 states mismatch: using v1\n')) | |
1518 | printrecords(1) |
|
1518 | printrecords(1) | |
1519 | if ui.verbose: |
|
1519 | if ui.verbose: | |
1520 | printrecords(2) |
|
1520 | printrecords(2) | |
1521 |
|
1521 | |||
1522 | @command('debugnamecomplete', [], _('NAME...')) |
|
1522 | @command('debugnamecomplete', [], _('NAME...')) | |
1523 | def debugnamecomplete(ui, repo, *args): |
|
1523 | def debugnamecomplete(ui, repo, *args): | |
1524 | '''complete "names" - tags, open branch names, bookmark names''' |
|
1524 | '''complete "names" - tags, open branch names, bookmark names''' | |
1525 |
|
1525 | |||
1526 | names = set() |
|
1526 | names = set() | |
1527 | # since we previously only listed open branches, we will handle that |
|
1527 | # since we previously only listed open branches, we will handle that | |
1528 | # specially (after this for loop) |
|
1528 | # specially (after this for loop) | |
1529 | for name, ns in repo.names.iteritems(): |
|
1529 | for name, ns in repo.names.iteritems(): | |
1530 | if name != 'branches': |
|
1530 | if name != 'branches': | |
1531 | names.update(ns.listnames(repo)) |
|
1531 | names.update(ns.listnames(repo)) | |
1532 | names.update(tag for (tag, heads, tip, closed) |
|
1532 | names.update(tag for (tag, heads, tip, closed) | |
1533 | in repo.branchmap().iterbranches() if not closed) |
|
1533 | in repo.branchmap().iterbranches() if not closed) | |
1534 | completions = set() |
|
1534 | completions = set() | |
1535 | if not args: |
|
1535 | if not args: | |
1536 | args = [''] |
|
1536 | args = [''] | |
1537 | for a in args: |
|
1537 | for a in args: | |
1538 | completions.update(n for n in names if n.startswith(a)) |
|
1538 | completions.update(n for n in names if n.startswith(a)) | |
1539 | ui.write('\n'.join(sorted(completions))) |
|
1539 | ui.write('\n'.join(sorted(completions))) | |
1540 | ui.write('\n') |
|
1540 | ui.write('\n') | |
1541 |
|
1541 | |||
1542 | @command('debugobsolete', |
|
1542 | @command('debugobsolete', | |
1543 | [('', 'flags', 0, _('markers flag')), |
|
1543 | [('', 'flags', 0, _('markers flag')), | |
1544 | ('', 'record-parents', False, |
|
1544 | ('', 'record-parents', False, | |
1545 | _('record parent information for the precursor')), |
|
1545 | _('record parent information for the precursor')), | |
1546 | ('r', 'rev', [], _('display markers relevant to REV')), |
|
1546 | ('r', 'rev', [], _('display markers relevant to REV')), | |
1547 | ('', 'exclusive', False, _('restrict display to markers only ' |
|
1547 | ('', 'exclusive', False, _('restrict display to markers only ' | |
1548 | 'relevant to REV')), |
|
1548 | 'relevant to REV')), | |
1549 | ('', 'index', False, _('display index of the marker')), |
|
1549 | ('', 'index', False, _('display index of the marker')), | |
1550 | ('', 'delete', [], _('delete markers specified by indices')), |
|
1550 | ('', 'delete', [], _('delete markers specified by indices')), | |
1551 | ] + cmdutil.commitopts2 + cmdutil.formatteropts, |
|
1551 | ] + cmdutil.commitopts2 + cmdutil.formatteropts, | |
1552 | _('[OBSOLETED [REPLACEMENT ...]]')) |
|
1552 | _('[OBSOLETED [REPLACEMENT ...]]')) | |
1553 | def debugobsolete(ui, repo, precursor=None, *successors, **opts): |
|
1553 | def debugobsolete(ui, repo, precursor=None, *successors, **opts): | |
1554 | """create arbitrary obsolete marker |
|
1554 | """create arbitrary obsolete marker | |
1555 |
|
1555 | |||
1556 | With no arguments, displays the list of obsolescence markers.""" |
|
1556 | With no arguments, displays the list of obsolescence markers.""" | |
1557 |
|
1557 | |||
1558 | opts = pycompat.byteskwargs(opts) |
|
1558 | opts = pycompat.byteskwargs(opts) | |
1559 |
|
1559 | |||
1560 | def parsenodeid(s): |
|
1560 | def parsenodeid(s): | |
1561 | try: |
|
1561 | try: | |
1562 | # We do not use revsingle/revrange functions here to accept |
|
1562 | # We do not use revsingle/revrange functions here to accept | |
1563 | # arbitrary node identifiers, possibly not present in the |
|
1563 | # arbitrary node identifiers, possibly not present in the | |
1564 | # local repository. |
|
1564 | # local repository. | |
1565 | n = bin(s) |
|
1565 | n = bin(s) | |
1566 | if len(n) != len(nullid): |
|
1566 | if len(n) != len(nullid): | |
1567 | raise TypeError() |
|
1567 | raise TypeError() | |
1568 | return n |
|
1568 | return n | |
1569 | except TypeError: |
|
1569 | except TypeError: | |
1570 | raise error.Abort('changeset references must be full hexadecimal ' |
|
1570 | raise error.Abort('changeset references must be full hexadecimal ' | |
1571 | 'node identifiers') |
|
1571 | 'node identifiers') | |
1572 |
|
1572 | |||
1573 | if opts.get('delete'): |
|
1573 | if opts.get('delete'): | |
1574 | indices = [] |
|
1574 | indices = [] | |
1575 | for v in opts.get('delete'): |
|
1575 | for v in opts.get('delete'): | |
1576 | try: |
|
1576 | try: | |
1577 | indices.append(int(v)) |
|
1577 | indices.append(int(v)) | |
1578 | except ValueError: |
|
1578 | except ValueError: | |
1579 | raise error.Abort(_('invalid index value: %r') % v, |
|
1579 | raise error.Abort(_('invalid index value: %r') % v, | |
1580 | hint=_('use integers for indices')) |
|
1580 | hint=_('use integers for indices')) | |
1581 |
|
1581 | |||
1582 | if repo.currenttransaction(): |
|
1582 | if repo.currenttransaction(): | |
1583 | raise error.Abort(_('cannot delete obsmarkers in the middle ' |
|
1583 | raise error.Abort(_('cannot delete obsmarkers in the middle ' | |
1584 | 'of transaction.')) |
|
1584 | 'of transaction.')) | |
1585 |
|
1585 | |||
1586 | with repo.lock(): |
|
1586 | with repo.lock(): | |
1587 | n = repair.deleteobsmarkers(repo.obsstore, indices) |
|
1587 | n = repair.deleteobsmarkers(repo.obsstore, indices) | |
1588 | ui.write(_('deleted %i obsolescence markers\n') % n) |
|
1588 | ui.write(_('deleted %i obsolescence markers\n') % n) | |
1589 |
|
1589 | |||
1590 | return |
|
1590 | return | |
1591 |
|
1591 | |||
1592 | if precursor is not None: |
|
1592 | if precursor is not None: | |
1593 | if opts['rev']: |
|
1593 | if opts['rev']: | |
1594 | raise error.Abort('cannot select revision when creating marker') |
|
1594 | raise error.Abort('cannot select revision when creating marker') | |
1595 | metadata = {} |
|
1595 | metadata = {} | |
1596 | metadata['user'] = opts['user'] or ui.username() |
|
1596 | metadata['user'] = opts['user'] or ui.username() | |
1597 | succs = tuple(parsenodeid(succ) for succ in successors) |
|
1597 | succs = tuple(parsenodeid(succ) for succ in successors) | |
1598 | l = repo.lock() |
|
1598 | l = repo.lock() | |
1599 | try: |
|
1599 | try: | |
1600 | tr = repo.transaction('debugobsolete') |
|
1600 | tr = repo.transaction('debugobsolete') | |
1601 | try: |
|
1601 | try: | |
1602 | date = opts.get('date') |
|
1602 | date = opts.get('date') | |
1603 | if date: |
|
1603 | if date: | |
1604 | date = dateutil.parsedate(date) |
|
1604 | date = dateutil.parsedate(date) | |
1605 | else: |
|
1605 | else: | |
1606 | date = None |
|
1606 | date = None | |
1607 | prec = parsenodeid(precursor) |
|
1607 | prec = parsenodeid(precursor) | |
1608 | parents = None |
|
1608 | parents = None | |
1609 | if opts['record_parents']: |
|
1609 | if opts['record_parents']: | |
1610 | if prec not in repo.unfiltered(): |
|
1610 | if prec not in repo.unfiltered(): | |
1611 | raise error.Abort('cannot used --record-parents on ' |
|
1611 | raise error.Abort('cannot used --record-parents on ' | |
1612 | 'unknown changesets') |
|
1612 | 'unknown changesets') | |
1613 | parents = repo.unfiltered()[prec].parents() |
|
1613 | parents = repo.unfiltered()[prec].parents() | |
1614 | parents = tuple(p.node() for p in parents) |
|
1614 | parents = tuple(p.node() for p in parents) | |
1615 | repo.obsstore.create(tr, prec, succs, opts['flags'], |
|
1615 | repo.obsstore.create(tr, prec, succs, opts['flags'], | |
1616 | parents=parents, date=date, |
|
1616 | parents=parents, date=date, | |
1617 | metadata=metadata, ui=ui) |
|
1617 | metadata=metadata, ui=ui) | |
1618 | tr.close() |
|
1618 | tr.close() | |
1619 | except ValueError as exc: |
|
1619 | except ValueError as exc: | |
1620 | raise error.Abort(_('bad obsmarker input: %s') % |
|
1620 | raise error.Abort(_('bad obsmarker input: %s') % | |
1621 | pycompat.bytestr(exc)) |
|
1621 | pycompat.bytestr(exc)) | |
1622 | finally: |
|
1622 | finally: | |
1623 | tr.release() |
|
1623 | tr.release() | |
1624 | finally: |
|
1624 | finally: | |
1625 | l.release() |
|
1625 | l.release() | |
1626 | else: |
|
1626 | else: | |
1627 | if opts['rev']: |
|
1627 | if opts['rev']: | |
1628 | revs = scmutil.revrange(repo, opts['rev']) |
|
1628 | revs = scmutil.revrange(repo, opts['rev']) | |
1629 | nodes = [repo[r].node() for r in revs] |
|
1629 | nodes = [repo[r].node() for r in revs] | |
1630 | markers = list(obsutil.getmarkers(repo, nodes=nodes, |
|
1630 | markers = list(obsutil.getmarkers(repo, nodes=nodes, | |
1631 | exclusive=opts['exclusive'])) |
|
1631 | exclusive=opts['exclusive'])) | |
1632 | markers.sort(key=lambda x: x._data) |
|
1632 | markers.sort(key=lambda x: x._data) | |
1633 | else: |
|
1633 | else: | |
1634 | markers = obsutil.getmarkers(repo) |
|
1634 | markers = obsutil.getmarkers(repo) | |
1635 |
|
1635 | |||
1636 | markerstoiter = markers |
|
1636 | markerstoiter = markers | |
1637 | isrelevant = lambda m: True |
|
1637 | isrelevant = lambda m: True | |
1638 | if opts.get('rev') and opts.get('index'): |
|
1638 | if opts.get('rev') and opts.get('index'): | |
1639 | markerstoiter = obsutil.getmarkers(repo) |
|
1639 | markerstoiter = obsutil.getmarkers(repo) | |
1640 | markerset = set(markers) |
|
1640 | markerset = set(markers) | |
1641 | isrelevant = lambda m: m in markerset |
|
1641 | isrelevant = lambda m: m in markerset | |
1642 |
|
1642 | |||
1643 | fm = ui.formatter('debugobsolete', opts) |
|
1643 | fm = ui.formatter('debugobsolete', opts) | |
1644 | for i, m in enumerate(markerstoiter): |
|
1644 | for i, m in enumerate(markerstoiter): | |
1645 | if not isrelevant(m): |
|
1645 | if not isrelevant(m): | |
1646 | # marker can be irrelevant when we're iterating over a set |
|
1646 | # marker can be irrelevant when we're iterating over a set | |
1647 | # of markers (markerstoiter) which is bigger than the set |
|
1647 | # of markers (markerstoiter) which is bigger than the set | |
1648 | # of markers we want to display (markers) |
|
1648 | # of markers we want to display (markers) | |
1649 | # this can happen if both --index and --rev options are |
|
1649 | # this can happen if both --index and --rev options are | |
1650 | # provided and thus we need to iterate over all of the markers |
|
1650 | # provided and thus we need to iterate over all of the markers | |
1651 | # to get the correct indices, but only display the ones that |
|
1651 | # to get the correct indices, but only display the ones that | |
1652 | # are relevant to --rev value |
|
1652 | # are relevant to --rev value | |
1653 | continue |
|
1653 | continue | |
1654 | fm.startitem() |
|
1654 | fm.startitem() | |
1655 | ind = i if opts.get('index') else None |
|
1655 | ind = i if opts.get('index') else None | |
1656 | cmdutil.showmarker(fm, m, index=ind) |
|
1656 | cmdutil.showmarker(fm, m, index=ind) | |
1657 | fm.end() |
|
1657 | fm.end() | |
1658 |
|
1658 | |||
1659 | @command('debugpathcomplete', |
|
1659 | @command('debugpathcomplete', | |
1660 | [('f', 'full', None, _('complete an entire path')), |
|
1660 | [('f', 'full', None, _('complete an entire path')), | |
1661 | ('n', 'normal', None, _('show only normal files')), |
|
1661 | ('n', 'normal', None, _('show only normal files')), | |
1662 | ('a', 'added', None, _('show only added files')), |
|
1662 | ('a', 'added', None, _('show only added files')), | |
1663 | ('r', 'removed', None, _('show only removed files'))], |
|
1663 | ('r', 'removed', None, _('show only removed files'))], | |
1664 | _('FILESPEC...')) |
|
1664 | _('FILESPEC...')) | |
1665 | def debugpathcomplete(ui, repo, *specs, **opts): |
|
1665 | def debugpathcomplete(ui, repo, *specs, **opts): | |
1666 | '''complete part or all of a tracked path |
|
1666 | '''complete part or all of a tracked path | |
1667 |
|
1667 | |||
1668 | This command supports shells that offer path name completion. It |
|
1668 | This command supports shells that offer path name completion. It | |
1669 | currently completes only files already known to the dirstate. |
|
1669 | currently completes only files already known to the dirstate. | |
1670 |
|
1670 | |||
1671 | Completion extends only to the next path segment unless |
|
1671 | Completion extends only to the next path segment unless | |
1672 | --full is specified, in which case entire paths are used.''' |
|
1672 | --full is specified, in which case entire paths are used.''' | |
1673 |
|
1673 | |||
1674 | def complete(path, acceptable): |
|
1674 | def complete(path, acceptable): | |
1675 | dirstate = repo.dirstate |
|
1675 | dirstate = repo.dirstate | |
1676 | spec = os.path.normpath(os.path.join(pycompat.getcwd(), path)) |
|
1676 | spec = os.path.normpath(os.path.join(pycompat.getcwd(), path)) | |
1677 | rootdir = repo.root + pycompat.ossep |
|
1677 | rootdir = repo.root + pycompat.ossep | |
1678 | if spec != repo.root and not spec.startswith(rootdir): |
|
1678 | if spec != repo.root and not spec.startswith(rootdir): | |
1679 | return [], [] |
|
1679 | return [], [] | |
1680 | if os.path.isdir(spec): |
|
1680 | if os.path.isdir(spec): | |
1681 | spec += '/' |
|
1681 | spec += '/' | |
1682 | spec = spec[len(rootdir):] |
|
1682 | spec = spec[len(rootdir):] | |
1683 | fixpaths = pycompat.ossep != '/' |
|
1683 | fixpaths = pycompat.ossep != '/' | |
1684 | if fixpaths: |
|
1684 | if fixpaths: | |
1685 | spec = spec.replace(pycompat.ossep, '/') |
|
1685 | spec = spec.replace(pycompat.ossep, '/') | |
1686 | speclen = len(spec) |
|
1686 | speclen = len(spec) | |
1687 | fullpaths = opts[r'full'] |
|
1687 | fullpaths = opts[r'full'] | |
1688 | files, dirs = set(), set() |
|
1688 | files, dirs = set(), set() | |
1689 | adddir, addfile = dirs.add, files.add |
|
1689 | adddir, addfile = dirs.add, files.add | |
1690 | for f, st in dirstate.iteritems(): |
|
1690 | for f, st in dirstate.iteritems(): | |
1691 | if f.startswith(spec) and st[0] in acceptable: |
|
1691 | if f.startswith(spec) and st[0] in acceptable: | |
1692 | if fixpaths: |
|
1692 | if fixpaths: | |
1693 | f = f.replace('/', pycompat.ossep) |
|
1693 | f = f.replace('/', pycompat.ossep) | |
1694 | if fullpaths: |
|
1694 | if fullpaths: | |
1695 | addfile(f) |
|
1695 | addfile(f) | |
1696 | continue |
|
1696 | continue | |
1697 | s = f.find(pycompat.ossep, speclen) |
|
1697 | s = f.find(pycompat.ossep, speclen) | |
1698 | if s >= 0: |
|
1698 | if s >= 0: | |
1699 | adddir(f[:s]) |
|
1699 | adddir(f[:s]) | |
1700 | else: |
|
1700 | else: | |
1701 | addfile(f) |
|
1701 | addfile(f) | |
1702 | return files, dirs |
|
1702 | return files, dirs | |
1703 |
|
1703 | |||
1704 | acceptable = '' |
|
1704 | acceptable = '' | |
1705 | if opts[r'normal']: |
|
1705 | if opts[r'normal']: | |
1706 | acceptable += 'nm' |
|
1706 | acceptable += 'nm' | |
1707 | if opts[r'added']: |
|
1707 | if opts[r'added']: | |
1708 | acceptable += 'a' |
|
1708 | acceptable += 'a' | |
1709 | if opts[r'removed']: |
|
1709 | if opts[r'removed']: | |
1710 | acceptable += 'r' |
|
1710 | acceptable += 'r' | |
1711 | cwd = repo.getcwd() |
|
1711 | cwd = repo.getcwd() | |
1712 | if not specs: |
|
1712 | if not specs: | |
1713 | specs = ['.'] |
|
1713 | specs = ['.'] | |
1714 |
|
1714 | |||
1715 | files, dirs = set(), set() |
|
1715 | files, dirs = set(), set() | |
1716 | for spec in specs: |
|
1716 | for spec in specs: | |
1717 | f, d = complete(spec, acceptable or 'nmar') |
|
1717 | f, d = complete(spec, acceptable or 'nmar') | |
1718 | files.update(f) |
|
1718 | files.update(f) | |
1719 | dirs.update(d) |
|
1719 | dirs.update(d) | |
1720 | files.update(dirs) |
|
1720 | files.update(dirs) | |
1721 | ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) |
|
1721 | ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) | |
1722 | ui.write('\n') |
|
1722 | ui.write('\n') | |
1723 |
|
1723 | |||
1724 | @command('debugpeer', [], _('PATH'), norepo=True) |
|
1724 | @command('debugpeer', [], _('PATH'), norepo=True) | |
1725 | def debugpeer(ui, path): |
|
1725 | def debugpeer(ui, path): | |
1726 | """establish a connection to a peer repository""" |
|
1726 | """establish a connection to a peer repository""" | |
1727 | # Always enable peer request logging. Requires --debug to display |
|
1727 | # Always enable peer request logging. Requires --debug to display | |
1728 | # though. |
|
1728 | # though. | |
1729 | overrides = { |
|
1729 | overrides = { | |
1730 | ('devel', 'debug.peer-request'): True, |
|
1730 | ('devel', 'debug.peer-request'): True, | |
1731 | } |
|
1731 | } | |
1732 |
|
1732 | |||
1733 | with ui.configoverride(overrides): |
|
1733 | with ui.configoverride(overrides): | |
1734 | peer = hg.peer(ui, {}, path) |
|
1734 | peer = hg.peer(ui, {}, path) | |
1735 |
|
1735 | |||
1736 | local = peer.local() is not None |
|
1736 | local = peer.local() is not None | |
1737 | canpush = peer.canpush() |
|
1737 | canpush = peer.canpush() | |
1738 |
|
1738 | |||
1739 | ui.write(_('url: %s\n') % peer.url()) |
|
1739 | ui.write(_('url: %s\n') % peer.url()) | |
1740 | ui.write(_('local: %s\n') % (_('yes') if local else _('no'))) |
|
1740 | ui.write(_('local: %s\n') % (_('yes') if local else _('no'))) | |
1741 | ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no'))) |
|
1741 | ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no'))) | |
1742 |
|
1742 | |||
1743 | @command('debugpickmergetool', |
|
1743 | @command('debugpickmergetool', | |
1744 | [('r', 'rev', '', _('check for files in this revision'), _('REV')), |
|
1744 | [('r', 'rev', '', _('check for files in this revision'), _('REV')), | |
1745 | ('', 'changedelete', None, _('emulate merging change and delete')), |
|
1745 | ('', 'changedelete', None, _('emulate merging change and delete')), | |
1746 | ] + cmdutil.walkopts + cmdutil.mergetoolopts, |
|
1746 | ] + cmdutil.walkopts + cmdutil.mergetoolopts, | |
1747 | _('[PATTERN]...'), |
|
1747 | _('[PATTERN]...'), | |
1748 | inferrepo=True) |
|
1748 | inferrepo=True) | |
1749 | def debugpickmergetool(ui, repo, *pats, **opts): |
|
1749 | def debugpickmergetool(ui, repo, *pats, **opts): | |
1750 | """examine which merge tool is chosen for specified file |
|
1750 | """examine which merge tool is chosen for specified file | |
1751 |
|
1751 | |||
1752 | As described in :hg:`help merge-tools`, Mercurial examines |
|
1752 | As described in :hg:`help merge-tools`, Mercurial examines | |
1753 | configurations below in this order to decide which merge tool is |
|
1753 | configurations below in this order to decide which merge tool is | |
1754 | chosen for specified file. |
|
1754 | chosen for specified file. | |
1755 |
|
1755 | |||
1756 | 1. ``--tool`` option |
|
1756 | 1. ``--tool`` option | |
1757 | 2. ``HGMERGE`` environment variable |
|
1757 | 2. ``HGMERGE`` environment variable | |
1758 | 3. configurations in ``merge-patterns`` section |
|
1758 | 3. configurations in ``merge-patterns`` section | |
1759 | 4. configuration of ``ui.merge`` |
|
1759 | 4. configuration of ``ui.merge`` | |
1760 | 5. configurations in ``merge-tools`` section |
|
1760 | 5. configurations in ``merge-tools`` section | |
1761 | 6. ``hgmerge`` tool (for historical reason only) |
|
1761 | 6. ``hgmerge`` tool (for historical reason only) | |
1762 | 7. default tool for fallback (``:merge`` or ``:prompt``) |
|
1762 | 7. default tool for fallback (``:merge`` or ``:prompt``) | |
1763 |
|
1763 | |||
1764 | This command writes out examination result in the style below:: |
|
1764 | This command writes out examination result in the style below:: | |
1765 |
|
1765 | |||
1766 | FILE = MERGETOOL |
|
1766 | FILE = MERGETOOL | |
1767 |
|
1767 | |||
1768 | By default, all files known in the first parent context of the |
|
1768 | By default, all files known in the first parent context of the | |
1769 | working directory are examined. Use file patterns and/or -I/-X |
|
1769 | working directory are examined. Use file patterns and/or -I/-X | |
1770 | options to limit target files. -r/--rev is also useful to examine |
|
1770 | options to limit target files. -r/--rev is also useful to examine | |
1771 | files in another context without actual updating to it. |
|
1771 | files in another context without actual updating to it. | |
1772 |
|
1772 | |||
1773 | With --debug, this command shows warning messages while matching |
|
1773 | With --debug, this command shows warning messages while matching | |
1774 | against ``merge-patterns`` and so on, too. It is recommended to |
|
1774 | against ``merge-patterns`` and so on, too. It is recommended to | |
1775 | use this option with explicit file patterns and/or -I/-X options, |
|
1775 | use this option with explicit file patterns and/or -I/-X options, | |
1776 | because this option increases amount of output per file according |
|
1776 | because this option increases amount of output per file according | |
1777 | to configurations in hgrc. |
|
1777 | to configurations in hgrc. | |
1778 |
|
1778 | |||
1779 | With -v/--verbose, this command shows configurations below at |
|
1779 | With -v/--verbose, this command shows configurations below at | |
1780 | first (only if specified). |
|
1780 | first (only if specified). | |
1781 |
|
1781 | |||
1782 | - ``--tool`` option |
|
1782 | - ``--tool`` option | |
1783 | - ``HGMERGE`` environment variable |
|
1783 | - ``HGMERGE`` environment variable | |
1784 | - configuration of ``ui.merge`` |
|
1784 | - configuration of ``ui.merge`` | |
1785 |
|
1785 | |||
1786 | If merge tool is chosen before matching against |
|
1786 | If merge tool is chosen before matching against | |
1787 | ``merge-patterns``, this command can't show any helpful |
|
1787 | ``merge-patterns``, this command can't show any helpful | |
1788 | information, even with --debug. In such case, information above is |
|
1788 | information, even with --debug. In such case, information above is | |
1789 | useful to know why a merge tool is chosen. |
|
1789 | useful to know why a merge tool is chosen. | |
1790 | """ |
|
1790 | """ | |
1791 | opts = pycompat.byteskwargs(opts) |
|
1791 | opts = pycompat.byteskwargs(opts) | |
1792 | overrides = {} |
|
1792 | overrides = {} | |
1793 | if opts['tool']: |
|
1793 | if opts['tool']: | |
1794 | overrides[('ui', 'forcemerge')] = opts['tool'] |
|
1794 | overrides[('ui', 'forcemerge')] = opts['tool'] | |
1795 | ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool']))) |
|
1795 | ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool']))) | |
1796 |
|
1796 | |||
1797 | with ui.configoverride(overrides, 'debugmergepatterns'): |
|
1797 | with ui.configoverride(overrides, 'debugmergepatterns'): | |
1798 | hgmerge = encoding.environ.get("HGMERGE") |
|
1798 | hgmerge = encoding.environ.get("HGMERGE") | |
1799 | if hgmerge is not None: |
|
1799 | if hgmerge is not None: | |
1800 | ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge))) |
|
1800 | ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge))) | |
1801 | uimerge = ui.config("ui", "merge") |
|
1801 | uimerge = ui.config("ui", "merge") | |
1802 | if uimerge: |
|
1802 | if uimerge: | |
1803 | ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge))) |
|
1803 | ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge))) | |
1804 |
|
1804 | |||
1805 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1805 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
1806 | m = scmutil.match(ctx, pats, opts) |
|
1806 | m = scmutil.match(ctx, pats, opts) | |
1807 | changedelete = opts['changedelete'] |
|
1807 | changedelete = opts['changedelete'] | |
1808 | for path in ctx.walk(m): |
|
1808 | for path in ctx.walk(m): | |
1809 | fctx = ctx[path] |
|
1809 | fctx = ctx[path] | |
1810 | try: |
|
1810 | try: | |
1811 | if not ui.debugflag: |
|
1811 | if not ui.debugflag: | |
1812 | ui.pushbuffer(error=True) |
|
1812 | ui.pushbuffer(error=True) | |
1813 | tool, toolpath = filemerge._picktool(repo, ui, path, |
|
1813 | tool, toolpath = filemerge._picktool(repo, ui, path, | |
1814 | fctx.isbinary(), |
|
1814 | fctx.isbinary(), | |
1815 | 'l' in fctx.flags(), |
|
1815 | 'l' in fctx.flags(), | |
1816 | changedelete) |
|
1816 | changedelete) | |
1817 | finally: |
|
1817 | finally: | |
1818 | if not ui.debugflag: |
|
1818 | if not ui.debugflag: | |
1819 | ui.popbuffer() |
|
1819 | ui.popbuffer() | |
1820 | ui.write(('%s = %s\n') % (path, tool)) |
|
1820 | ui.write(('%s = %s\n') % (path, tool)) | |
1821 |
|
1821 | |||
1822 | @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) |
|
1822 | @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) | |
1823 | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): |
|
1823 | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): | |
1824 | '''access the pushkey key/value protocol |
|
1824 | '''access the pushkey key/value protocol | |
1825 |
|
1825 | |||
1826 | With two args, list the keys in the given namespace. |
|
1826 | With two args, list the keys in the given namespace. | |
1827 |
|
1827 | |||
1828 | With five args, set a key to new if it currently is set to old. |
|
1828 | With five args, set a key to new if it currently is set to old. | |
1829 | Reports success or failure. |
|
1829 | Reports success or failure. | |
1830 | ''' |
|
1830 | ''' | |
1831 |
|
1831 | |||
1832 | target = hg.peer(ui, {}, repopath) |
|
1832 | target = hg.peer(ui, {}, repopath) | |
1833 | if keyinfo: |
|
1833 | if keyinfo: | |
1834 | key, old, new = keyinfo |
|
1834 | key, old, new = keyinfo | |
1835 | r = target.pushkey(namespace, key, old, new) |
|
1835 | with target.commandexecutor() as e: | |
|
1836 | r = e.callcommand('pushkey', { | |||
|
1837 | 'namespace': namespace, | |||
|
1838 | 'key': key, | |||
|
1839 | 'old': old, | |||
|
1840 | 'new': new, | |||
|
1841 | }).result() | |||
|
1842 | ||||
1836 | ui.status(pycompat.bytestr(r) + '\n') |
|
1843 | ui.status(pycompat.bytestr(r) + '\n') | |
1837 | return not r |
|
1844 | return not r | |
1838 | else: |
|
1845 | else: | |
1839 | for k, v in sorted(target.listkeys(namespace).iteritems()): |
|
1846 | for k, v in sorted(target.listkeys(namespace).iteritems()): | |
1840 | ui.write("%s\t%s\n" % (stringutil.escapestr(k), |
|
1847 | ui.write("%s\t%s\n" % (stringutil.escapestr(k), | |
1841 | stringutil.escapestr(v))) |
|
1848 | stringutil.escapestr(v))) | |
1842 |
|
1849 | |||
1843 | @command('debugpvec', [], _('A B')) |
|
1850 | @command('debugpvec', [], _('A B')) | |
1844 | def debugpvec(ui, repo, a, b=None): |
|
1851 | def debugpvec(ui, repo, a, b=None): | |
1845 | ca = scmutil.revsingle(repo, a) |
|
1852 | ca = scmutil.revsingle(repo, a) | |
1846 | cb = scmutil.revsingle(repo, b) |
|
1853 | cb = scmutil.revsingle(repo, b) | |
1847 | pa = pvec.ctxpvec(ca) |
|
1854 | pa = pvec.ctxpvec(ca) | |
1848 | pb = pvec.ctxpvec(cb) |
|
1855 | pb = pvec.ctxpvec(cb) | |
1849 | if pa == pb: |
|
1856 | if pa == pb: | |
1850 | rel = "=" |
|
1857 | rel = "=" | |
1851 | elif pa > pb: |
|
1858 | elif pa > pb: | |
1852 | rel = ">" |
|
1859 | rel = ">" | |
1853 | elif pa < pb: |
|
1860 | elif pa < pb: | |
1854 | rel = "<" |
|
1861 | rel = "<" | |
1855 | elif pa | pb: |
|
1862 | elif pa | pb: | |
1856 | rel = "|" |
|
1863 | rel = "|" | |
1857 | ui.write(_("a: %s\n") % pa) |
|
1864 | ui.write(_("a: %s\n") % pa) | |
1858 | ui.write(_("b: %s\n") % pb) |
|
1865 | ui.write(_("b: %s\n") % pb) | |
1859 | ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) |
|
1866 | ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) | |
1860 | ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % |
|
1867 | ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % | |
1861 | (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), |
|
1868 | (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), | |
1862 | pa.distance(pb), rel)) |
|
1869 | pa.distance(pb), rel)) | |
1863 |
|
1870 | |||
1864 | @command('debugrebuilddirstate|debugrebuildstate', |
|
1871 | @command('debugrebuilddirstate|debugrebuildstate', | |
1865 | [('r', 'rev', '', _('revision to rebuild to'), _('REV')), |
|
1872 | [('r', 'rev', '', _('revision to rebuild to'), _('REV')), | |
1866 | ('', 'minimal', None, _('only rebuild files that are inconsistent with ' |
|
1873 | ('', 'minimal', None, _('only rebuild files that are inconsistent with ' | |
1867 | 'the working copy parent')), |
|
1874 | 'the working copy parent')), | |
1868 | ], |
|
1875 | ], | |
1869 | _('[-r REV]')) |
|
1876 | _('[-r REV]')) | |
1870 | def debugrebuilddirstate(ui, repo, rev, **opts): |
|
1877 | def debugrebuilddirstate(ui, repo, rev, **opts): | |
1871 | """rebuild the dirstate as it would look like for the given revision |
|
1878 | """rebuild the dirstate as it would look like for the given revision | |
1872 |
|
1879 | |||
1873 | If no revision is specified the first current parent will be used. |
|
1880 | If no revision is specified the first current parent will be used. | |
1874 |
|
1881 | |||
1875 | The dirstate will be set to the files of the given revision. |
|
1882 | The dirstate will be set to the files of the given revision. | |
1876 | The actual working directory content or existing dirstate |
|
1883 | The actual working directory content or existing dirstate | |
1877 | information such as adds or removes is not considered. |
|
1884 | information such as adds or removes is not considered. | |
1878 |
|
1885 | |||
1879 | ``minimal`` will only rebuild the dirstate status for files that claim to be |
|
1886 | ``minimal`` will only rebuild the dirstate status for files that claim to be | |
1880 | tracked but are not in the parent manifest, or that exist in the parent |
|
1887 | tracked but are not in the parent manifest, or that exist in the parent | |
1881 | manifest but are not in the dirstate. It will not change adds, removes, or |
|
1888 | manifest but are not in the dirstate. It will not change adds, removes, or | |
1882 | modified files that are in the working copy parent. |
|
1889 | modified files that are in the working copy parent. | |
1883 |
|
1890 | |||
1884 | One use of this command is to make the next :hg:`status` invocation |
|
1891 | One use of this command is to make the next :hg:`status` invocation | |
1885 | check the actual file content. |
|
1892 | check the actual file content. | |
1886 | """ |
|
1893 | """ | |
1887 | ctx = scmutil.revsingle(repo, rev) |
|
1894 | ctx = scmutil.revsingle(repo, rev) | |
1888 | with repo.wlock(): |
|
1895 | with repo.wlock(): | |
1889 | dirstate = repo.dirstate |
|
1896 | dirstate = repo.dirstate | |
1890 | changedfiles = None |
|
1897 | changedfiles = None | |
1891 | # See command doc for what minimal does. |
|
1898 | # See command doc for what minimal does. | |
1892 | if opts.get(r'minimal'): |
|
1899 | if opts.get(r'minimal'): | |
1893 | manifestfiles = set(ctx.manifest().keys()) |
|
1900 | manifestfiles = set(ctx.manifest().keys()) | |
1894 | dirstatefiles = set(dirstate) |
|
1901 | dirstatefiles = set(dirstate) | |
1895 | manifestonly = manifestfiles - dirstatefiles |
|
1902 | manifestonly = manifestfiles - dirstatefiles | |
1896 | dsonly = dirstatefiles - manifestfiles |
|
1903 | dsonly = dirstatefiles - manifestfiles | |
1897 | dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') |
|
1904 | dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') | |
1898 | changedfiles = manifestonly | dsnotadded |
|
1905 | changedfiles = manifestonly | dsnotadded | |
1899 |
|
1906 | |||
1900 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
1907 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) | |
1901 |
|
1908 | |||
1902 | @command('debugrebuildfncache', [], '') |
|
1909 | @command('debugrebuildfncache', [], '') | |
1903 | def debugrebuildfncache(ui, repo): |
|
1910 | def debugrebuildfncache(ui, repo): | |
1904 | """rebuild the fncache file""" |
|
1911 | """rebuild the fncache file""" | |
1905 | repair.rebuildfncache(ui, repo) |
|
1912 | repair.rebuildfncache(ui, repo) | |
1906 |
|
1913 | |||
1907 | @command('debugrename', |
|
1914 | @command('debugrename', | |
1908 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
1915 | [('r', 'rev', '', _('revision to debug'), _('REV'))], | |
1909 | _('[-r REV] FILE')) |
|
1916 | _('[-r REV] FILE')) | |
1910 | def debugrename(ui, repo, file1, *pats, **opts): |
|
1917 | def debugrename(ui, repo, file1, *pats, **opts): | |
1911 | """dump rename information""" |
|
1918 | """dump rename information""" | |
1912 |
|
1919 | |||
1913 | opts = pycompat.byteskwargs(opts) |
|
1920 | opts = pycompat.byteskwargs(opts) | |
1914 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1921 | ctx = scmutil.revsingle(repo, opts.get('rev')) | |
1915 | m = scmutil.match(ctx, (file1,) + pats, opts) |
|
1922 | m = scmutil.match(ctx, (file1,) + pats, opts) | |
1916 | for abs in ctx.walk(m): |
|
1923 | for abs in ctx.walk(m): | |
1917 | fctx = ctx[abs] |
|
1924 | fctx = ctx[abs] | |
1918 | o = fctx.filelog().renamed(fctx.filenode()) |
|
1925 | o = fctx.filelog().renamed(fctx.filenode()) | |
1919 | rel = m.rel(abs) |
|
1926 | rel = m.rel(abs) | |
1920 | if o: |
|
1927 | if o: | |
1921 | ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) |
|
1928 | ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) | |
1922 | else: |
|
1929 | else: | |
1923 | ui.write(_("%s not renamed\n") % rel) |
|
1930 | ui.write(_("%s not renamed\n") % rel) | |
1924 |
|
1931 | |||
1925 | @command('debugrevlog', cmdutil.debugrevlogopts + |
|
1932 | @command('debugrevlog', cmdutil.debugrevlogopts + | |
1926 | [('d', 'dump', False, _('dump index data'))], |
|
1933 | [('d', 'dump', False, _('dump index data'))], | |
1927 | _('-c|-m|FILE'), |
|
1934 | _('-c|-m|FILE'), | |
1928 | optionalrepo=True) |
|
1935 | optionalrepo=True) | |
1929 | def debugrevlog(ui, repo, file_=None, **opts): |
|
1936 | def debugrevlog(ui, repo, file_=None, **opts): | |
1930 | """show data and statistics about a revlog""" |
|
1937 | """show data and statistics about a revlog""" | |
1931 | opts = pycompat.byteskwargs(opts) |
|
1938 | opts = pycompat.byteskwargs(opts) | |
1932 | r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) |
|
1939 | r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) | |
1933 |
|
1940 | |||
1934 | if opts.get("dump"): |
|
1941 | if opts.get("dump"): | |
1935 | numrevs = len(r) |
|
1942 | numrevs = len(r) | |
1936 | ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" |
|
1943 | ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" | |
1937 | " rawsize totalsize compression heads chainlen\n")) |
|
1944 | " rawsize totalsize compression heads chainlen\n")) | |
1938 | ts = 0 |
|
1945 | ts = 0 | |
1939 | heads = set() |
|
1946 | heads = set() | |
1940 |
|
1947 | |||
1941 | for rev in xrange(numrevs): |
|
1948 | for rev in xrange(numrevs): | |
1942 | dbase = r.deltaparent(rev) |
|
1949 | dbase = r.deltaparent(rev) | |
1943 | if dbase == -1: |
|
1950 | if dbase == -1: | |
1944 | dbase = rev |
|
1951 | dbase = rev | |
1945 | cbase = r.chainbase(rev) |
|
1952 | cbase = r.chainbase(rev) | |
1946 | clen = r.chainlen(rev) |
|
1953 | clen = r.chainlen(rev) | |
1947 | p1, p2 = r.parentrevs(rev) |
|
1954 | p1, p2 = r.parentrevs(rev) | |
1948 | rs = r.rawsize(rev) |
|
1955 | rs = r.rawsize(rev) | |
1949 | ts = ts + rs |
|
1956 | ts = ts + rs | |
1950 | heads -= set(r.parentrevs(rev)) |
|
1957 | heads -= set(r.parentrevs(rev)) | |
1951 | heads.add(rev) |
|
1958 | heads.add(rev) | |
1952 | try: |
|
1959 | try: | |
1953 | compression = ts / r.end(rev) |
|
1960 | compression = ts / r.end(rev) | |
1954 | except ZeroDivisionError: |
|
1961 | except ZeroDivisionError: | |
1955 | compression = 0 |
|
1962 | compression = 0 | |
1956 | ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " |
|
1963 | ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " | |
1957 | "%11d %5d %8d\n" % |
|
1964 | "%11d %5d %8d\n" % | |
1958 | (rev, p1, p2, r.start(rev), r.end(rev), |
|
1965 | (rev, p1, p2, r.start(rev), r.end(rev), | |
1959 | r.start(dbase), r.start(cbase), |
|
1966 | r.start(dbase), r.start(cbase), | |
1960 | r.start(p1), r.start(p2), |
|
1967 | r.start(p1), r.start(p2), | |
1961 | rs, ts, compression, len(heads), clen)) |
|
1968 | rs, ts, compression, len(heads), clen)) | |
1962 | return 0 |
|
1969 | return 0 | |
1963 |
|
1970 | |||
1964 | v = r.version |
|
1971 | v = r.version | |
1965 | format = v & 0xFFFF |
|
1972 | format = v & 0xFFFF | |
1966 | flags = [] |
|
1973 | flags = [] | |
1967 | gdelta = False |
|
1974 | gdelta = False | |
1968 | if v & revlog.FLAG_INLINE_DATA: |
|
1975 | if v & revlog.FLAG_INLINE_DATA: | |
1969 | flags.append('inline') |
|
1976 | flags.append('inline') | |
1970 | if v & revlog.FLAG_GENERALDELTA: |
|
1977 | if v & revlog.FLAG_GENERALDELTA: | |
1971 | gdelta = True |
|
1978 | gdelta = True | |
1972 | flags.append('generaldelta') |
|
1979 | flags.append('generaldelta') | |
1973 | if not flags: |
|
1980 | if not flags: | |
1974 | flags = ['(none)'] |
|
1981 | flags = ['(none)'] | |
1975 |
|
1982 | |||
1976 | nummerges = 0 |
|
1983 | nummerges = 0 | |
1977 | numfull = 0 |
|
1984 | numfull = 0 | |
1978 | numprev = 0 |
|
1985 | numprev = 0 | |
1979 | nump1 = 0 |
|
1986 | nump1 = 0 | |
1980 | nump2 = 0 |
|
1987 | nump2 = 0 | |
1981 | numother = 0 |
|
1988 | numother = 0 | |
1982 | nump1prev = 0 |
|
1989 | nump1prev = 0 | |
1983 | nump2prev = 0 |
|
1990 | nump2prev = 0 | |
1984 | chainlengths = [] |
|
1991 | chainlengths = [] | |
1985 | chainbases = [] |
|
1992 | chainbases = [] | |
1986 | chainspans = [] |
|
1993 | chainspans = [] | |
1987 |
|
1994 | |||
1988 | datasize = [None, 0, 0] |
|
1995 | datasize = [None, 0, 0] | |
1989 | fullsize = [None, 0, 0] |
|
1996 | fullsize = [None, 0, 0] | |
1990 | deltasize = [None, 0, 0] |
|
1997 | deltasize = [None, 0, 0] | |
1991 | chunktypecounts = {} |
|
1998 | chunktypecounts = {} | |
1992 | chunktypesizes = {} |
|
1999 | chunktypesizes = {} | |
1993 |
|
2000 | |||
1994 | def addsize(size, l): |
|
2001 | def addsize(size, l): | |
1995 | if l[0] is None or size < l[0]: |
|
2002 | if l[0] is None or size < l[0]: | |
1996 | l[0] = size |
|
2003 | l[0] = size | |
1997 | if size > l[1]: |
|
2004 | if size > l[1]: | |
1998 | l[1] = size |
|
2005 | l[1] = size | |
1999 | l[2] += size |
|
2006 | l[2] += size | |
2000 |
|
2007 | |||
2001 | numrevs = len(r) |
|
2008 | numrevs = len(r) | |
2002 | for rev in xrange(numrevs): |
|
2009 | for rev in xrange(numrevs): | |
2003 | p1, p2 = r.parentrevs(rev) |
|
2010 | p1, p2 = r.parentrevs(rev) | |
2004 | delta = r.deltaparent(rev) |
|
2011 | delta = r.deltaparent(rev) | |
2005 | if format > 0: |
|
2012 | if format > 0: | |
2006 | addsize(r.rawsize(rev), datasize) |
|
2013 | addsize(r.rawsize(rev), datasize) | |
2007 | if p2 != nullrev: |
|
2014 | if p2 != nullrev: | |
2008 | nummerges += 1 |
|
2015 | nummerges += 1 | |
2009 | size = r.length(rev) |
|
2016 | size = r.length(rev) | |
2010 | if delta == nullrev: |
|
2017 | if delta == nullrev: | |
2011 | chainlengths.append(0) |
|
2018 | chainlengths.append(0) | |
2012 | chainbases.append(r.start(rev)) |
|
2019 | chainbases.append(r.start(rev)) | |
2013 | chainspans.append(size) |
|
2020 | chainspans.append(size) | |
2014 | numfull += 1 |
|
2021 | numfull += 1 | |
2015 | addsize(size, fullsize) |
|
2022 | addsize(size, fullsize) | |
2016 | else: |
|
2023 | else: | |
2017 | chainlengths.append(chainlengths[delta] + 1) |
|
2024 | chainlengths.append(chainlengths[delta] + 1) | |
2018 | baseaddr = chainbases[delta] |
|
2025 | baseaddr = chainbases[delta] | |
2019 | revaddr = r.start(rev) |
|
2026 | revaddr = r.start(rev) | |
2020 | chainbases.append(baseaddr) |
|
2027 | chainbases.append(baseaddr) | |
2021 | chainspans.append((revaddr - baseaddr) + size) |
|
2028 | chainspans.append((revaddr - baseaddr) + size) | |
2022 | addsize(size, deltasize) |
|
2029 | addsize(size, deltasize) | |
2023 | if delta == rev - 1: |
|
2030 | if delta == rev - 1: | |
2024 | numprev += 1 |
|
2031 | numprev += 1 | |
2025 | if delta == p1: |
|
2032 | if delta == p1: | |
2026 | nump1prev += 1 |
|
2033 | nump1prev += 1 | |
2027 | elif delta == p2: |
|
2034 | elif delta == p2: | |
2028 | nump2prev += 1 |
|
2035 | nump2prev += 1 | |
2029 | elif delta == p1: |
|
2036 | elif delta == p1: | |
2030 | nump1 += 1 |
|
2037 | nump1 += 1 | |
2031 | elif delta == p2: |
|
2038 | elif delta == p2: | |
2032 | nump2 += 1 |
|
2039 | nump2 += 1 | |
2033 | elif delta != nullrev: |
|
2040 | elif delta != nullrev: | |
2034 | numother += 1 |
|
2041 | numother += 1 | |
2035 |
|
2042 | |||
2036 | # Obtain data on the raw chunks in the revlog. |
|
2043 | # Obtain data on the raw chunks in the revlog. | |
2037 | segment = r._getsegmentforrevs(rev, rev)[1] |
|
2044 | segment = r._getsegmentforrevs(rev, rev)[1] | |
2038 | if segment: |
|
2045 | if segment: | |
2039 | chunktype = bytes(segment[0:1]) |
|
2046 | chunktype = bytes(segment[0:1]) | |
2040 | else: |
|
2047 | else: | |
2041 | chunktype = 'empty' |
|
2048 | chunktype = 'empty' | |
2042 |
|
2049 | |||
2043 | if chunktype not in chunktypecounts: |
|
2050 | if chunktype not in chunktypecounts: | |
2044 | chunktypecounts[chunktype] = 0 |
|
2051 | chunktypecounts[chunktype] = 0 | |
2045 | chunktypesizes[chunktype] = 0 |
|
2052 | chunktypesizes[chunktype] = 0 | |
2046 |
|
2053 | |||
2047 | chunktypecounts[chunktype] += 1 |
|
2054 | chunktypecounts[chunktype] += 1 | |
2048 | chunktypesizes[chunktype] += size |
|
2055 | chunktypesizes[chunktype] += size | |
2049 |
|
2056 | |||
2050 | # Adjust size min value for empty cases |
|
2057 | # Adjust size min value for empty cases | |
2051 | for size in (datasize, fullsize, deltasize): |
|
2058 | for size in (datasize, fullsize, deltasize): | |
2052 | if size[0] is None: |
|
2059 | if size[0] is None: | |
2053 | size[0] = 0 |
|
2060 | size[0] = 0 | |
2054 |
|
2061 | |||
2055 | numdeltas = numrevs - numfull |
|
2062 | numdeltas = numrevs - numfull | |
2056 | numoprev = numprev - nump1prev - nump2prev |
|
2063 | numoprev = numprev - nump1prev - nump2prev | |
2057 | totalrawsize = datasize[2] |
|
2064 | totalrawsize = datasize[2] | |
2058 | datasize[2] /= numrevs |
|
2065 | datasize[2] /= numrevs | |
2059 | fulltotal = fullsize[2] |
|
2066 | fulltotal = fullsize[2] | |
2060 | fullsize[2] /= numfull |
|
2067 | fullsize[2] /= numfull | |
2061 | deltatotal = deltasize[2] |
|
2068 | deltatotal = deltasize[2] | |
2062 | if numrevs - numfull > 0: |
|
2069 | if numrevs - numfull > 0: | |
2063 | deltasize[2] /= numrevs - numfull |
|
2070 | deltasize[2] /= numrevs - numfull | |
2064 | totalsize = fulltotal + deltatotal |
|
2071 | totalsize = fulltotal + deltatotal | |
2065 | avgchainlen = sum(chainlengths) / numrevs |
|
2072 | avgchainlen = sum(chainlengths) / numrevs | |
2066 | maxchainlen = max(chainlengths) |
|
2073 | maxchainlen = max(chainlengths) | |
2067 | maxchainspan = max(chainspans) |
|
2074 | maxchainspan = max(chainspans) | |
2068 | compratio = 1 |
|
2075 | compratio = 1 | |
2069 | if totalsize: |
|
2076 | if totalsize: | |
2070 | compratio = totalrawsize / totalsize |
|
2077 | compratio = totalrawsize / totalsize | |
2071 |
|
2078 | |||
2072 | basedfmtstr = '%%%dd\n' |
|
2079 | basedfmtstr = '%%%dd\n' | |
2073 | basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' |
|
2080 | basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' | |
2074 |
|
2081 | |||
2075 | def dfmtstr(max): |
|
2082 | def dfmtstr(max): | |
2076 | return basedfmtstr % len(str(max)) |
|
2083 | return basedfmtstr % len(str(max)) | |
2077 | def pcfmtstr(max, padding=0): |
|
2084 | def pcfmtstr(max, padding=0): | |
2078 | return basepcfmtstr % (len(str(max)), ' ' * padding) |
|
2085 | return basepcfmtstr % (len(str(max)), ' ' * padding) | |
2079 |
|
2086 | |||
2080 | def pcfmt(value, total): |
|
2087 | def pcfmt(value, total): | |
2081 | if total: |
|
2088 | if total: | |
2082 | return (value, 100 * float(value) / total) |
|
2089 | return (value, 100 * float(value) / total) | |
2083 | else: |
|
2090 | else: | |
2084 | return value, 100.0 |
|
2091 | return value, 100.0 | |
2085 |
|
2092 | |||
2086 | ui.write(('format : %d\n') % format) |
|
2093 | ui.write(('format : %d\n') % format) | |
2087 | ui.write(('flags : %s\n') % ', '.join(flags)) |
|
2094 | ui.write(('flags : %s\n') % ', '.join(flags)) | |
2088 |
|
2095 | |||
2089 | ui.write('\n') |
|
2096 | ui.write('\n') | |
2090 | fmt = pcfmtstr(totalsize) |
|
2097 | fmt = pcfmtstr(totalsize) | |
2091 | fmt2 = dfmtstr(totalsize) |
|
2098 | fmt2 = dfmtstr(totalsize) | |
2092 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2099 | ui.write(('revisions : ') + fmt2 % numrevs) | |
2093 | ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) |
|
2100 | ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) | |
2094 | ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) |
|
2101 | ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) | |
2095 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2102 | ui.write(('revisions : ') + fmt2 % numrevs) | |
2096 | ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) |
|
2103 | ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) | |
2097 | ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) |
|
2104 | ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) | |
2098 | ui.write(('revision size : ') + fmt2 % totalsize) |
|
2105 | ui.write(('revision size : ') + fmt2 % totalsize) | |
2099 | ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) |
|
2106 | ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) | |
2100 | ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) |
|
2107 | ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) | |
2101 |
|
2108 | |||
2102 | def fmtchunktype(chunktype): |
|
2109 | def fmtchunktype(chunktype): | |
2103 | if chunktype == 'empty': |
|
2110 | if chunktype == 'empty': | |
2104 | return ' %s : ' % chunktype |
|
2111 | return ' %s : ' % chunktype | |
2105 | elif chunktype in pycompat.bytestr(string.ascii_letters): |
|
2112 | elif chunktype in pycompat.bytestr(string.ascii_letters): | |
2106 | return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) |
|
2113 | return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) | |
2107 | else: |
|
2114 | else: | |
2108 | return ' 0x%s : ' % hex(chunktype) |
|
2115 | return ' 0x%s : ' % hex(chunktype) | |
2109 |
|
2116 | |||
2110 | ui.write('\n') |
|
2117 | ui.write('\n') | |
2111 | ui.write(('chunks : ') + fmt2 % numrevs) |
|
2118 | ui.write(('chunks : ') + fmt2 % numrevs) | |
2112 | for chunktype in sorted(chunktypecounts): |
|
2119 | for chunktype in sorted(chunktypecounts): | |
2113 | ui.write(fmtchunktype(chunktype)) |
|
2120 | ui.write(fmtchunktype(chunktype)) | |
2114 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) |
|
2121 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) | |
2115 | ui.write(('chunks size : ') + fmt2 % totalsize) |
|
2122 | ui.write(('chunks size : ') + fmt2 % totalsize) | |
2116 | for chunktype in sorted(chunktypecounts): |
|
2123 | for chunktype in sorted(chunktypecounts): | |
2117 | ui.write(fmtchunktype(chunktype)) |
|
2124 | ui.write(fmtchunktype(chunktype)) | |
2118 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) |
|
2125 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) | |
2119 |
|
2126 | |||
2120 | ui.write('\n') |
|
2127 | ui.write('\n') | |
2121 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) |
|
2128 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) | |
2122 | ui.write(('avg chain length : ') + fmt % avgchainlen) |
|
2129 | ui.write(('avg chain length : ') + fmt % avgchainlen) | |
2123 | ui.write(('max chain length : ') + fmt % maxchainlen) |
|
2130 | ui.write(('max chain length : ') + fmt % maxchainlen) | |
2124 | ui.write(('max chain reach : ') + fmt % maxchainspan) |
|
2131 | ui.write(('max chain reach : ') + fmt % maxchainspan) | |
2125 | ui.write(('compression ratio : ') + fmt % compratio) |
|
2132 | ui.write(('compression ratio : ') + fmt % compratio) | |
2126 |
|
2133 | |||
2127 | if format > 0: |
|
2134 | if format > 0: | |
2128 | ui.write('\n') |
|
2135 | ui.write('\n') | |
2129 | ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') |
|
2136 | ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') | |
2130 | % tuple(datasize)) |
|
2137 | % tuple(datasize)) | |
2131 | ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') |
|
2138 | ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') | |
2132 | % tuple(fullsize)) |
|
2139 | % tuple(fullsize)) | |
2133 | ui.write(('delta size (min/max/avg) : %d / %d / %d\n') |
|
2140 | ui.write(('delta size (min/max/avg) : %d / %d / %d\n') | |
2134 | % tuple(deltasize)) |
|
2141 | % tuple(deltasize)) | |
2135 |
|
2142 | |||
2136 | if numdeltas > 0: |
|
2143 | if numdeltas > 0: | |
2137 | ui.write('\n') |
|
2144 | ui.write('\n') | |
2138 | fmt = pcfmtstr(numdeltas) |
|
2145 | fmt = pcfmtstr(numdeltas) | |
2139 | fmt2 = pcfmtstr(numdeltas, 4) |
|
2146 | fmt2 = pcfmtstr(numdeltas, 4) | |
2140 | ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) |
|
2147 | ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) | |
2141 | if numprev > 0: |
|
2148 | if numprev > 0: | |
2142 | ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, |
|
2149 | ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, | |
2143 | numprev)) |
|
2150 | numprev)) | |
2144 | ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, |
|
2151 | ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, | |
2145 | numprev)) |
|
2152 | numprev)) | |
2146 | ui.write((' other : ') + fmt2 % pcfmt(numoprev, |
|
2153 | ui.write((' other : ') + fmt2 % pcfmt(numoprev, | |
2147 | numprev)) |
|
2154 | numprev)) | |
2148 | if gdelta: |
|
2155 | if gdelta: | |
2149 | ui.write(('deltas against p1 : ') |
|
2156 | ui.write(('deltas against p1 : ') | |
2150 | + fmt % pcfmt(nump1, numdeltas)) |
|
2157 | + fmt % pcfmt(nump1, numdeltas)) | |
2151 | ui.write(('deltas against p2 : ') |
|
2158 | ui.write(('deltas against p2 : ') | |
2152 | + fmt % pcfmt(nump2, numdeltas)) |
|
2159 | + fmt % pcfmt(nump2, numdeltas)) | |
2153 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, |
|
2160 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, | |
2154 | numdeltas)) |
|
2161 | numdeltas)) | |
2155 |
|
2162 | |||
2156 | @command('debugrevspec', |
|
2163 | @command('debugrevspec', | |
2157 | [('', 'optimize', None, |
|
2164 | [('', 'optimize', None, | |
2158 | _('print parsed tree after optimizing (DEPRECATED)')), |
|
2165 | _('print parsed tree after optimizing (DEPRECATED)')), | |
2159 | ('', 'show-revs', True, _('print list of result revisions (default)')), |
|
2166 | ('', 'show-revs', True, _('print list of result revisions (default)')), | |
2160 | ('s', 'show-set', None, _('print internal representation of result set')), |
|
2167 | ('s', 'show-set', None, _('print internal representation of result set')), | |
2161 | ('p', 'show-stage', [], |
|
2168 | ('p', 'show-stage', [], | |
2162 | _('print parsed tree at the given stage'), _('NAME')), |
|
2169 | _('print parsed tree at the given stage'), _('NAME')), | |
2163 | ('', 'no-optimized', False, _('evaluate tree without optimization')), |
|
2170 | ('', 'no-optimized', False, _('evaluate tree without optimization')), | |
2164 | ('', 'verify-optimized', False, _('verify optimized result')), |
|
2171 | ('', 'verify-optimized', False, _('verify optimized result')), | |
2165 | ], |
|
2172 | ], | |
2166 | ('REVSPEC')) |
|
2173 | ('REVSPEC')) | |
2167 | def debugrevspec(ui, repo, expr, **opts): |
|
2174 | def debugrevspec(ui, repo, expr, **opts): | |
2168 | """parse and apply a revision specification |
|
2175 | """parse and apply a revision specification | |
2169 |
|
2176 | |||
2170 | Use -p/--show-stage option to print the parsed tree at the given stages. |
|
2177 | Use -p/--show-stage option to print the parsed tree at the given stages. | |
2171 | Use -p all to print tree at every stage. |
|
2178 | Use -p all to print tree at every stage. | |
2172 |
|
2179 | |||
2173 | Use --no-show-revs option with -s or -p to print only the set |
|
2180 | Use --no-show-revs option with -s or -p to print only the set | |
2174 | representation or the parsed tree respectively. |
|
2181 | representation or the parsed tree respectively. | |
2175 |
|
2182 | |||
2176 | Use --verify-optimized to compare the optimized result with the unoptimized |
|
2183 | Use --verify-optimized to compare the optimized result with the unoptimized | |
2177 | one. Returns 1 if the optimized result differs. |
|
2184 | one. Returns 1 if the optimized result differs. | |
2178 | """ |
|
2185 | """ | |
2179 | opts = pycompat.byteskwargs(opts) |
|
2186 | opts = pycompat.byteskwargs(opts) | |
2180 | aliases = ui.configitems('revsetalias') |
|
2187 | aliases = ui.configitems('revsetalias') | |
2181 | stages = [ |
|
2188 | stages = [ | |
2182 | ('parsed', lambda tree: tree), |
|
2189 | ('parsed', lambda tree: tree), | |
2183 | ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases, |
|
2190 | ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases, | |
2184 | ui.warn)), |
|
2191 | ui.warn)), | |
2185 | ('concatenated', revsetlang.foldconcat), |
|
2192 | ('concatenated', revsetlang.foldconcat), | |
2186 | ('analyzed', revsetlang.analyze), |
|
2193 | ('analyzed', revsetlang.analyze), | |
2187 | ('optimized', revsetlang.optimize), |
|
2194 | ('optimized', revsetlang.optimize), | |
2188 | ] |
|
2195 | ] | |
2189 | if opts['no_optimized']: |
|
2196 | if opts['no_optimized']: | |
2190 | stages = stages[:-1] |
|
2197 | stages = stages[:-1] | |
2191 | if opts['verify_optimized'] and opts['no_optimized']: |
|
2198 | if opts['verify_optimized'] and opts['no_optimized']: | |
2192 | raise error.Abort(_('cannot use --verify-optimized with ' |
|
2199 | raise error.Abort(_('cannot use --verify-optimized with ' | |
2193 | '--no-optimized')) |
|
2200 | '--no-optimized')) | |
2194 | stagenames = set(n for n, f in stages) |
|
2201 | stagenames = set(n for n, f in stages) | |
2195 |
|
2202 | |||
2196 | showalways = set() |
|
2203 | showalways = set() | |
2197 | showchanged = set() |
|
2204 | showchanged = set() | |
2198 | if ui.verbose and not opts['show_stage']: |
|
2205 | if ui.verbose and not opts['show_stage']: | |
2199 | # show parsed tree by --verbose (deprecated) |
|
2206 | # show parsed tree by --verbose (deprecated) | |
2200 | showalways.add('parsed') |
|
2207 | showalways.add('parsed') | |
2201 | showchanged.update(['expanded', 'concatenated']) |
|
2208 | showchanged.update(['expanded', 'concatenated']) | |
2202 | if opts['optimize']: |
|
2209 | if opts['optimize']: | |
2203 | showalways.add('optimized') |
|
2210 | showalways.add('optimized') | |
2204 | if opts['show_stage'] and opts['optimize']: |
|
2211 | if opts['show_stage'] and opts['optimize']: | |
2205 | raise error.Abort(_('cannot use --optimize with --show-stage')) |
|
2212 | raise error.Abort(_('cannot use --optimize with --show-stage')) | |
2206 | if opts['show_stage'] == ['all']: |
|
2213 | if opts['show_stage'] == ['all']: | |
2207 | showalways.update(stagenames) |
|
2214 | showalways.update(stagenames) | |
2208 | else: |
|
2215 | else: | |
2209 | for n in opts['show_stage']: |
|
2216 | for n in opts['show_stage']: | |
2210 | if n not in stagenames: |
|
2217 | if n not in stagenames: | |
2211 | raise error.Abort(_('invalid stage name: %s') % n) |
|
2218 | raise error.Abort(_('invalid stage name: %s') % n) | |
2212 | showalways.update(opts['show_stage']) |
|
2219 | showalways.update(opts['show_stage']) | |
2213 |
|
2220 | |||
2214 | treebystage = {} |
|
2221 | treebystage = {} | |
2215 | printedtree = None |
|
2222 | printedtree = None | |
2216 | tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo)) |
|
2223 | tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo)) | |
2217 | for n, f in stages: |
|
2224 | for n, f in stages: | |
2218 | treebystage[n] = tree = f(tree) |
|
2225 | treebystage[n] = tree = f(tree) | |
2219 | if n in showalways or (n in showchanged and tree != printedtree): |
|
2226 | if n in showalways or (n in showchanged and tree != printedtree): | |
2220 | if opts['show_stage'] or n != 'parsed': |
|
2227 | if opts['show_stage'] or n != 'parsed': | |
2221 | ui.write(("* %s:\n") % n) |
|
2228 | ui.write(("* %s:\n") % n) | |
2222 | ui.write(revsetlang.prettyformat(tree), "\n") |
|
2229 | ui.write(revsetlang.prettyformat(tree), "\n") | |
2223 | printedtree = tree |
|
2230 | printedtree = tree | |
2224 |
|
2231 | |||
2225 | if opts['verify_optimized']: |
|
2232 | if opts['verify_optimized']: | |
2226 | arevs = revset.makematcher(treebystage['analyzed'])(repo) |
|
2233 | arevs = revset.makematcher(treebystage['analyzed'])(repo) | |
2227 | brevs = revset.makematcher(treebystage['optimized'])(repo) |
|
2234 | brevs = revset.makematcher(treebystage['optimized'])(repo) | |
2228 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2235 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): | |
2229 | ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n") |
|
2236 | ui.write(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n") | |
2230 | ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n") |
|
2237 | ui.write(("* optimized set:\n"), smartset.prettyformat(brevs), "\n") | |
2231 | arevs = list(arevs) |
|
2238 | arevs = list(arevs) | |
2232 | brevs = list(brevs) |
|
2239 | brevs = list(brevs) | |
2233 | if arevs == brevs: |
|
2240 | if arevs == brevs: | |
2234 | return 0 |
|
2241 | return 0 | |
2235 | ui.write(('--- analyzed\n'), label='diff.file_a') |
|
2242 | ui.write(('--- analyzed\n'), label='diff.file_a') | |
2236 | ui.write(('+++ optimized\n'), label='diff.file_b') |
|
2243 | ui.write(('+++ optimized\n'), label='diff.file_b') | |
2237 | sm = difflib.SequenceMatcher(None, arevs, brevs) |
|
2244 | sm = difflib.SequenceMatcher(None, arevs, brevs) | |
2238 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
2245 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): | |
2239 | if tag in ('delete', 'replace'): |
|
2246 | if tag in ('delete', 'replace'): | |
2240 | for c in arevs[alo:ahi]: |
|
2247 | for c in arevs[alo:ahi]: | |
2241 | ui.write('-%s\n' % c, label='diff.deleted') |
|
2248 | ui.write('-%s\n' % c, label='diff.deleted') | |
2242 | if tag in ('insert', 'replace'): |
|
2249 | if tag in ('insert', 'replace'): | |
2243 | for c in brevs[blo:bhi]: |
|
2250 | for c in brevs[blo:bhi]: | |
2244 | ui.write('+%s\n' % c, label='diff.inserted') |
|
2251 | ui.write('+%s\n' % c, label='diff.inserted') | |
2245 | if tag == 'equal': |
|
2252 | if tag == 'equal': | |
2246 | for c in arevs[alo:ahi]: |
|
2253 | for c in arevs[alo:ahi]: | |
2247 | ui.write(' %s\n' % c) |
|
2254 | ui.write(' %s\n' % c) | |
2248 | return 1 |
|
2255 | return 1 | |
2249 |
|
2256 | |||
2250 | func = revset.makematcher(tree) |
|
2257 | func = revset.makematcher(tree) | |
2251 | revs = func(repo) |
|
2258 | revs = func(repo) | |
2252 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2259 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): | |
2253 | ui.write(("* set:\n"), smartset.prettyformat(revs), "\n") |
|
2260 | ui.write(("* set:\n"), smartset.prettyformat(revs), "\n") | |
2254 | if not opts['show_revs']: |
|
2261 | if not opts['show_revs']: | |
2255 | return |
|
2262 | return | |
2256 | for c in revs: |
|
2263 | for c in revs: | |
2257 | ui.write("%d\n" % c) |
|
2264 | ui.write("%d\n" % c) | |
2258 |
|
2265 | |||
2259 | @command('debugserve', [ |
|
2266 | @command('debugserve', [ | |
2260 | ('', 'sshstdio', False, _('run an SSH server bound to process handles')), |
|
2267 | ('', 'sshstdio', False, _('run an SSH server bound to process handles')), | |
2261 | ('', 'logiofd', '', _('file descriptor to log server I/O to')), |
|
2268 | ('', 'logiofd', '', _('file descriptor to log server I/O to')), | |
2262 | ('', 'logiofile', '', _('file to log server I/O to')), |
|
2269 | ('', 'logiofile', '', _('file to log server I/O to')), | |
2263 | ], '') |
|
2270 | ], '') | |
2264 | def debugserve(ui, repo, **opts): |
|
2271 | def debugserve(ui, repo, **opts): | |
2265 | """run a server with advanced settings |
|
2272 | """run a server with advanced settings | |
2266 |
|
2273 | |||
2267 | This command is similar to :hg:`serve`. It exists partially as a |
|
2274 | This command is similar to :hg:`serve`. It exists partially as a | |
2268 | workaround to the fact that ``hg serve --stdio`` must have specific |
|
2275 | workaround to the fact that ``hg serve --stdio`` must have specific | |
2269 | arguments for security reasons. |
|
2276 | arguments for security reasons. | |
2270 | """ |
|
2277 | """ | |
2271 | opts = pycompat.byteskwargs(opts) |
|
2278 | opts = pycompat.byteskwargs(opts) | |
2272 |
|
2279 | |||
2273 | if not opts['sshstdio']: |
|
2280 | if not opts['sshstdio']: | |
2274 | raise error.Abort(_('only --sshstdio is currently supported')) |
|
2281 | raise error.Abort(_('only --sshstdio is currently supported')) | |
2275 |
|
2282 | |||
2276 | logfh = None |
|
2283 | logfh = None | |
2277 |
|
2284 | |||
2278 | if opts['logiofd'] and opts['logiofile']: |
|
2285 | if opts['logiofd'] and opts['logiofile']: | |
2279 | raise error.Abort(_('cannot use both --logiofd and --logiofile')) |
|
2286 | raise error.Abort(_('cannot use both --logiofd and --logiofile')) | |
2280 |
|
2287 | |||
2281 | if opts['logiofd']: |
|
2288 | if opts['logiofd']: | |
2282 | # Line buffered because output is line based. |
|
2289 | # Line buffered because output is line based. | |
2283 | logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) |
|
2290 | logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) | |
2284 | elif opts['logiofile']: |
|
2291 | elif opts['logiofile']: | |
2285 | logfh = open(opts['logiofile'], 'ab', 1) |
|
2292 | logfh = open(opts['logiofile'], 'ab', 1) | |
2286 |
|
2293 | |||
2287 | s = wireprotoserver.sshserver(ui, repo, logfh=logfh) |
|
2294 | s = wireprotoserver.sshserver(ui, repo, logfh=logfh) | |
2288 | s.serve_forever() |
|
2295 | s.serve_forever() | |
2289 |
|
2296 | |||
2290 | @command('debugsetparents', [], _('REV1 [REV2]')) |
|
2297 | @command('debugsetparents', [], _('REV1 [REV2]')) | |
2291 | def debugsetparents(ui, repo, rev1, rev2=None): |
|
2298 | def debugsetparents(ui, repo, rev1, rev2=None): | |
2292 | """manually set the parents of the current working directory |
|
2299 | """manually set the parents of the current working directory | |
2293 |
|
2300 | |||
2294 | This is useful for writing repository conversion tools, but should |
|
2301 | This is useful for writing repository conversion tools, but should | |
2295 | be used with care. For example, neither the working directory nor the |
|
2302 | be used with care. For example, neither the working directory nor the | |
2296 | dirstate is updated, so file status may be incorrect after running this |
|
2303 | dirstate is updated, so file status may be incorrect after running this | |
2297 | command. |
|
2304 | command. | |
2298 |
|
2305 | |||
2299 | Returns 0 on success. |
|
2306 | Returns 0 on success. | |
2300 | """ |
|
2307 | """ | |
2301 |
|
2308 | |||
2302 | node1 = scmutil.revsingle(repo, rev1).node() |
|
2309 | node1 = scmutil.revsingle(repo, rev1).node() | |
2303 | node2 = scmutil.revsingle(repo, rev2, 'null').node() |
|
2310 | node2 = scmutil.revsingle(repo, rev2, 'null').node() | |
2304 |
|
2311 | |||
2305 | with repo.wlock(): |
|
2312 | with repo.wlock(): | |
2306 | repo.setparents(node1, node2) |
|
2313 | repo.setparents(node1, node2) | |
2307 |
|
2314 | |||
2308 | @command('debugssl', [], '[SOURCE]', optionalrepo=True) |
|
2315 | @command('debugssl', [], '[SOURCE]', optionalrepo=True) | |
2309 | def debugssl(ui, repo, source=None, **opts): |
|
2316 | def debugssl(ui, repo, source=None, **opts): | |
2310 | '''test a secure connection to a server |
|
2317 | '''test a secure connection to a server | |
2311 |
|
2318 | |||
2312 | This builds the certificate chain for the server on Windows, installing the |
|
2319 | This builds the certificate chain for the server on Windows, installing the | |
2313 | missing intermediates and trusted root via Windows Update if necessary. It |
|
2320 | missing intermediates and trusted root via Windows Update if necessary. It | |
2314 | does nothing on other platforms. |
|
2321 | does nothing on other platforms. | |
2315 |
|
2322 | |||
2316 | If SOURCE is omitted, the 'default' path will be used. If a URL is given, |
|
2323 | If SOURCE is omitted, the 'default' path will be used. If a URL is given, | |
2317 | that server is used. See :hg:`help urls` for more information. |
|
2324 | that server is used. See :hg:`help urls` for more information. | |
2318 |
|
2325 | |||
2319 | If the update succeeds, retry the original operation. Otherwise, the cause |
|
2326 | If the update succeeds, retry the original operation. Otherwise, the cause | |
2320 | of the SSL error is likely another issue. |
|
2327 | of the SSL error is likely another issue. | |
2321 | ''' |
|
2328 | ''' | |
2322 | if not pycompat.iswindows: |
|
2329 | if not pycompat.iswindows: | |
2323 | raise error.Abort(_('certificate chain building is only possible on ' |
|
2330 | raise error.Abort(_('certificate chain building is only possible on ' | |
2324 | 'Windows')) |
|
2331 | 'Windows')) | |
2325 |
|
2332 | |||
2326 | if not source: |
|
2333 | if not source: | |
2327 | if not repo: |
|
2334 | if not repo: | |
2328 | raise error.Abort(_("there is no Mercurial repository here, and no " |
|
2335 | raise error.Abort(_("there is no Mercurial repository here, and no " | |
2329 | "server specified")) |
|
2336 | "server specified")) | |
2330 | source = "default" |
|
2337 | source = "default" | |
2331 |
|
2338 | |||
2332 | source, branches = hg.parseurl(ui.expandpath(source)) |
|
2339 | source, branches = hg.parseurl(ui.expandpath(source)) | |
2333 | url = util.url(source) |
|
2340 | url = util.url(source) | |
2334 | addr = None |
|
2341 | addr = None | |
2335 |
|
2342 | |||
2336 | defaultport = {'https': 443, 'ssh': 22} |
|
2343 | defaultport = {'https': 443, 'ssh': 22} | |
2337 | if url.scheme in defaultport: |
|
2344 | if url.scheme in defaultport: | |
2338 | try: |
|
2345 | try: | |
2339 | addr = (url.host, int(url.port or defaultport[url.scheme])) |
|
2346 | addr = (url.host, int(url.port or defaultport[url.scheme])) | |
2340 | except ValueError: |
|
2347 | except ValueError: | |
2341 | raise error.Abort(_("malformed port number in URL")) |
|
2348 | raise error.Abort(_("malformed port number in URL")) | |
2342 | else: |
|
2349 | else: | |
2343 | raise error.Abort(_("only https and ssh connections are supported")) |
|
2350 | raise error.Abort(_("only https and ssh connections are supported")) | |
2344 |
|
2351 | |||
2345 | from . import win32 |
|
2352 | from . import win32 | |
2346 |
|
2353 | |||
2347 | s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS, |
|
2354 | s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS, | |
2348 | cert_reqs=ssl.CERT_NONE, ca_certs=None) |
|
2355 | cert_reqs=ssl.CERT_NONE, ca_certs=None) | |
2349 |
|
2356 | |||
2350 | try: |
|
2357 | try: | |
2351 | s.connect(addr) |
|
2358 | s.connect(addr) | |
2352 | cert = s.getpeercert(True) |
|
2359 | cert = s.getpeercert(True) | |
2353 |
|
2360 | |||
2354 | ui.status(_('checking the certificate chain for %s\n') % url.host) |
|
2361 | ui.status(_('checking the certificate chain for %s\n') % url.host) | |
2355 |
|
2362 | |||
2356 | complete = win32.checkcertificatechain(cert, build=False) |
|
2363 | complete = win32.checkcertificatechain(cert, build=False) | |
2357 |
|
2364 | |||
2358 | if not complete: |
|
2365 | if not complete: | |
2359 | ui.status(_('certificate chain is incomplete, updating... ')) |
|
2366 | ui.status(_('certificate chain is incomplete, updating... ')) | |
2360 |
|
2367 | |||
2361 | if not win32.checkcertificatechain(cert): |
|
2368 | if not win32.checkcertificatechain(cert): | |
2362 | ui.status(_('failed.\n')) |
|
2369 | ui.status(_('failed.\n')) | |
2363 | else: |
|
2370 | else: | |
2364 | ui.status(_('done.\n')) |
|
2371 | ui.status(_('done.\n')) | |
2365 | else: |
|
2372 | else: | |
2366 | ui.status(_('full certificate chain is available\n')) |
|
2373 | ui.status(_('full certificate chain is available\n')) | |
2367 | finally: |
|
2374 | finally: | |
2368 | s.close() |
|
2375 | s.close() | |
2369 |
|
2376 | |||
2370 | @command('debugsub', |
|
2377 | @command('debugsub', | |
2371 | [('r', 'rev', '', |
|
2378 | [('r', 'rev', '', | |
2372 | _('revision to check'), _('REV'))], |
|
2379 | _('revision to check'), _('REV'))], | |
2373 | _('[-r REV] [REV]')) |
|
2380 | _('[-r REV] [REV]')) | |
2374 | def debugsub(ui, repo, rev=None): |
|
2381 | def debugsub(ui, repo, rev=None): | |
2375 | ctx = scmutil.revsingle(repo, rev, None) |
|
2382 | ctx = scmutil.revsingle(repo, rev, None) | |
2376 | for k, v in sorted(ctx.substate.items()): |
|
2383 | for k, v in sorted(ctx.substate.items()): | |
2377 | ui.write(('path %s\n') % k) |
|
2384 | ui.write(('path %s\n') % k) | |
2378 | ui.write((' source %s\n') % v[0]) |
|
2385 | ui.write((' source %s\n') % v[0]) | |
2379 | ui.write((' revision %s\n') % v[1]) |
|
2386 | ui.write((' revision %s\n') % v[1]) | |
2380 |
|
2387 | |||
2381 | @command('debugsuccessorssets', |
|
2388 | @command('debugsuccessorssets', | |
2382 | [('', 'closest', False, _('return closest successors sets only'))], |
|
2389 | [('', 'closest', False, _('return closest successors sets only'))], | |
2383 | _('[REV]')) |
|
2390 | _('[REV]')) | |
2384 | def debugsuccessorssets(ui, repo, *revs, **opts): |
|
2391 | def debugsuccessorssets(ui, repo, *revs, **opts): | |
2385 | """show set of successors for revision |
|
2392 | """show set of successors for revision | |
2386 |
|
2393 | |||
2387 | A successors set of changeset A is a consistent group of revisions that |
|
2394 | A successors set of changeset A is a consistent group of revisions that | |
2388 | succeed A. It contains non-obsolete changesets only unless closests |
|
2395 | succeed A. It contains non-obsolete changesets only unless closests | |
2389 | successors set is set. |
|
2396 | successors set is set. | |
2390 |
|
2397 | |||
2391 | In most cases a changeset A has a single successors set containing a single |
|
2398 | In most cases a changeset A has a single successors set containing a single | |
2392 | successor (changeset A replaced by A'). |
|
2399 | successor (changeset A replaced by A'). | |
2393 |
|
2400 | |||
2394 | A changeset that is made obsolete with no successors are called "pruned". |
|
2401 | A changeset that is made obsolete with no successors are called "pruned". | |
2395 | Such changesets have no successors sets at all. |
|
2402 | Such changesets have no successors sets at all. | |
2396 |
|
2403 | |||
2397 | A changeset that has been "split" will have a successors set containing |
|
2404 | A changeset that has been "split" will have a successors set containing | |
2398 | more than one successor. |
|
2405 | more than one successor. | |
2399 |
|
2406 | |||
2400 | A changeset that has been rewritten in multiple different ways is called |
|
2407 | A changeset that has been rewritten in multiple different ways is called | |
2401 | "divergent". Such changesets have multiple successor sets (each of which |
|
2408 | "divergent". Such changesets have multiple successor sets (each of which | |
2402 | may also be split, i.e. have multiple successors). |
|
2409 | may also be split, i.e. have multiple successors). | |
2403 |
|
2410 | |||
2404 | Results are displayed as follows:: |
|
2411 | Results are displayed as follows:: | |
2405 |
|
2412 | |||
2406 | <rev1> |
|
2413 | <rev1> | |
2407 | <successors-1A> |
|
2414 | <successors-1A> | |
2408 | <rev2> |
|
2415 | <rev2> | |
2409 | <successors-2A> |
|
2416 | <successors-2A> | |
2410 | <successors-2B1> <successors-2B2> <successors-2B3> |
|
2417 | <successors-2B1> <successors-2B2> <successors-2B3> | |
2411 |
|
2418 | |||
2412 | Here rev2 has two possible (i.e. divergent) successors sets. The first |
|
2419 | Here rev2 has two possible (i.e. divergent) successors sets. The first | |
2413 | holds one element, whereas the second holds three (i.e. the changeset has |
|
2420 | holds one element, whereas the second holds three (i.e. the changeset has | |
2414 | been split). |
|
2421 | been split). | |
2415 | """ |
|
2422 | """ | |
2416 | # passed to successorssets caching computation from one call to another |
|
2423 | # passed to successorssets caching computation from one call to another | |
2417 | cache = {} |
|
2424 | cache = {} | |
2418 | ctx2str = bytes |
|
2425 | ctx2str = bytes | |
2419 | node2str = short |
|
2426 | node2str = short | |
2420 | for rev in scmutil.revrange(repo, revs): |
|
2427 | for rev in scmutil.revrange(repo, revs): | |
2421 | ctx = repo[rev] |
|
2428 | ctx = repo[rev] | |
2422 | ui.write('%s\n'% ctx2str(ctx)) |
|
2429 | ui.write('%s\n'% ctx2str(ctx)) | |
2423 | for succsset in obsutil.successorssets(repo, ctx.node(), |
|
2430 | for succsset in obsutil.successorssets(repo, ctx.node(), | |
2424 | closest=opts[r'closest'], |
|
2431 | closest=opts[r'closest'], | |
2425 | cache=cache): |
|
2432 | cache=cache): | |
2426 | if succsset: |
|
2433 | if succsset: | |
2427 | ui.write(' ') |
|
2434 | ui.write(' ') | |
2428 | ui.write(node2str(succsset[0])) |
|
2435 | ui.write(node2str(succsset[0])) | |
2429 | for node in succsset[1:]: |
|
2436 | for node in succsset[1:]: | |
2430 | ui.write(' ') |
|
2437 | ui.write(' ') | |
2431 | ui.write(node2str(node)) |
|
2438 | ui.write(node2str(node)) | |
2432 | ui.write('\n') |
|
2439 | ui.write('\n') | |
2433 |
|
2440 | |||
2434 | @command('debugtemplate', |
|
2441 | @command('debugtemplate', | |
2435 | [('r', 'rev', [], _('apply template on changesets'), _('REV')), |
|
2442 | [('r', 'rev', [], _('apply template on changesets'), _('REV')), | |
2436 | ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], |
|
2443 | ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], | |
2437 | _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), |
|
2444 | _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), | |
2438 | optionalrepo=True) |
|
2445 | optionalrepo=True) | |
2439 | def debugtemplate(ui, repo, tmpl, **opts): |
|
2446 | def debugtemplate(ui, repo, tmpl, **opts): | |
2440 | """parse and apply a template |
|
2447 | """parse and apply a template | |
2441 |
|
2448 | |||
2442 | If -r/--rev is given, the template is processed as a log template and |
|
2449 | If -r/--rev is given, the template is processed as a log template and | |
2443 | applied to the given changesets. Otherwise, it is processed as a generic |
|
2450 | applied to the given changesets. Otherwise, it is processed as a generic | |
2444 | template. |
|
2451 | template. | |
2445 |
|
2452 | |||
2446 | Use --verbose to print the parsed tree. |
|
2453 | Use --verbose to print the parsed tree. | |
2447 | """ |
|
2454 | """ | |
2448 | revs = None |
|
2455 | revs = None | |
2449 | if opts[r'rev']: |
|
2456 | if opts[r'rev']: | |
2450 | if repo is None: |
|
2457 | if repo is None: | |
2451 | raise error.RepoError(_('there is no Mercurial repository here ' |
|
2458 | raise error.RepoError(_('there is no Mercurial repository here ' | |
2452 | '(.hg not found)')) |
|
2459 | '(.hg not found)')) | |
2453 | revs = scmutil.revrange(repo, opts[r'rev']) |
|
2460 | revs = scmutil.revrange(repo, opts[r'rev']) | |
2454 |
|
2461 | |||
2455 | props = {} |
|
2462 | props = {} | |
2456 | for d in opts[r'define']: |
|
2463 | for d in opts[r'define']: | |
2457 | try: |
|
2464 | try: | |
2458 | k, v = (e.strip() for e in d.split('=', 1)) |
|
2465 | k, v = (e.strip() for e in d.split('=', 1)) | |
2459 | if not k or k == 'ui': |
|
2466 | if not k or k == 'ui': | |
2460 | raise ValueError |
|
2467 | raise ValueError | |
2461 | props[k] = v |
|
2468 | props[k] = v | |
2462 | except ValueError: |
|
2469 | except ValueError: | |
2463 | raise error.Abort(_('malformed keyword definition: %s') % d) |
|
2470 | raise error.Abort(_('malformed keyword definition: %s') % d) | |
2464 |
|
2471 | |||
2465 | if ui.verbose: |
|
2472 | if ui.verbose: | |
2466 | aliases = ui.configitems('templatealias') |
|
2473 | aliases = ui.configitems('templatealias') | |
2467 | tree = templater.parse(tmpl) |
|
2474 | tree = templater.parse(tmpl) | |
2468 | ui.note(templater.prettyformat(tree), '\n') |
|
2475 | ui.note(templater.prettyformat(tree), '\n') | |
2469 | newtree = templater.expandaliases(tree, aliases) |
|
2476 | newtree = templater.expandaliases(tree, aliases) | |
2470 | if newtree != tree: |
|
2477 | if newtree != tree: | |
2471 | ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') |
|
2478 | ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') | |
2472 |
|
2479 | |||
2473 | if revs is None: |
|
2480 | if revs is None: | |
2474 | tres = formatter.templateresources(ui, repo) |
|
2481 | tres = formatter.templateresources(ui, repo) | |
2475 | t = formatter.maketemplater(ui, tmpl, resources=tres) |
|
2482 | t = formatter.maketemplater(ui, tmpl, resources=tres) | |
2476 | ui.write(t.renderdefault(props)) |
|
2483 | ui.write(t.renderdefault(props)) | |
2477 | else: |
|
2484 | else: | |
2478 | displayer = logcmdutil.maketemplater(ui, repo, tmpl) |
|
2485 | displayer = logcmdutil.maketemplater(ui, repo, tmpl) | |
2479 | for r in revs: |
|
2486 | for r in revs: | |
2480 | displayer.show(repo[r], **pycompat.strkwargs(props)) |
|
2487 | displayer.show(repo[r], **pycompat.strkwargs(props)) | |
2481 | displayer.close() |
|
2488 | displayer.close() | |
2482 |
|
2489 | |||
2483 | @command('debuguigetpass', [ |
|
2490 | @command('debuguigetpass', [ | |
2484 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2491 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), | |
2485 | ], _('[-p TEXT]'), norepo=True) |
|
2492 | ], _('[-p TEXT]'), norepo=True) | |
2486 | def debuguigetpass(ui, prompt=''): |
|
2493 | def debuguigetpass(ui, prompt=''): | |
2487 | """show prompt to type password""" |
|
2494 | """show prompt to type password""" | |
2488 | r = ui.getpass(prompt) |
|
2495 | r = ui.getpass(prompt) | |
2489 | ui.write(('respose: %s\n') % r) |
|
2496 | ui.write(('respose: %s\n') % r) | |
2490 |
|
2497 | |||
2491 | @command('debuguiprompt', [ |
|
2498 | @command('debuguiprompt', [ | |
2492 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2499 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), | |
2493 | ], _('[-p TEXT]'), norepo=True) |
|
2500 | ], _('[-p TEXT]'), norepo=True) | |
2494 | def debuguiprompt(ui, prompt=''): |
|
2501 | def debuguiprompt(ui, prompt=''): | |
2495 | """show plain prompt""" |
|
2502 | """show plain prompt""" | |
2496 | r = ui.prompt(prompt) |
|
2503 | r = ui.prompt(prompt) | |
2497 | ui.write(('response: %s\n') % r) |
|
2504 | ui.write(('response: %s\n') % r) | |
2498 |
|
2505 | |||
2499 | @command('debugupdatecaches', []) |
|
2506 | @command('debugupdatecaches', []) | |
2500 | def debugupdatecaches(ui, repo, *pats, **opts): |
|
2507 | def debugupdatecaches(ui, repo, *pats, **opts): | |
2501 | """warm all known caches in the repository""" |
|
2508 | """warm all known caches in the repository""" | |
2502 | with repo.wlock(), repo.lock(): |
|
2509 | with repo.wlock(), repo.lock(): | |
2503 | repo.updatecaches(full=True) |
|
2510 | repo.updatecaches(full=True) | |
2504 |
|
2511 | |||
2505 | @command('debugupgraderepo', [ |
|
2512 | @command('debugupgraderepo', [ | |
2506 | ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), |
|
2513 | ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), | |
2507 | ('', 'run', False, _('performs an upgrade')), |
|
2514 | ('', 'run', False, _('performs an upgrade')), | |
2508 | ]) |
|
2515 | ]) | |
2509 | def debugupgraderepo(ui, repo, run=False, optimize=None): |
|
2516 | def debugupgraderepo(ui, repo, run=False, optimize=None): | |
2510 | """upgrade a repository to use different features |
|
2517 | """upgrade a repository to use different features | |
2511 |
|
2518 | |||
2512 | If no arguments are specified, the repository is evaluated for upgrade |
|
2519 | If no arguments are specified, the repository is evaluated for upgrade | |
2513 | and a list of problems and potential optimizations is printed. |
|
2520 | and a list of problems and potential optimizations is printed. | |
2514 |
|
2521 | |||
2515 | With ``--run``, a repository upgrade is performed. Behavior of the upgrade |
|
2522 | With ``--run``, a repository upgrade is performed. Behavior of the upgrade | |
2516 | can be influenced via additional arguments. More details will be provided |
|
2523 | can be influenced via additional arguments. More details will be provided | |
2517 | by the command output when run without ``--run``. |
|
2524 | by the command output when run without ``--run``. | |
2518 |
|
2525 | |||
2519 | During the upgrade, the repository will be locked and no writes will be |
|
2526 | During the upgrade, the repository will be locked and no writes will be | |
2520 | allowed. |
|
2527 | allowed. | |
2521 |
|
2528 | |||
2522 | At the end of the upgrade, the repository may not be readable while new |
|
2529 | At the end of the upgrade, the repository may not be readable while new | |
2523 | repository data is swapped in. This window will be as long as it takes to |
|
2530 | repository data is swapped in. This window will be as long as it takes to | |
2524 | rename some directories inside the ``.hg`` directory. On most machines, this |
|
2531 | rename some directories inside the ``.hg`` directory. On most machines, this | |
2525 | should complete almost instantaneously and the chances of a consumer being |
|
2532 | should complete almost instantaneously and the chances of a consumer being | |
2526 | unable to access the repository should be low. |
|
2533 | unable to access the repository should be low. | |
2527 | """ |
|
2534 | """ | |
2528 | return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize) |
|
2535 | return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize) | |
2529 |
|
2536 | |||
2530 | @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'), |
|
2537 | @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'), | |
2531 | inferrepo=True) |
|
2538 | inferrepo=True) | |
2532 | def debugwalk(ui, repo, *pats, **opts): |
|
2539 | def debugwalk(ui, repo, *pats, **opts): | |
2533 | """show how files match on given patterns""" |
|
2540 | """show how files match on given patterns""" | |
2534 | opts = pycompat.byteskwargs(opts) |
|
2541 | opts = pycompat.byteskwargs(opts) | |
2535 | m = scmutil.match(repo[None], pats, opts) |
|
2542 | m = scmutil.match(repo[None], pats, opts) | |
2536 | ui.write(('matcher: %r\n' % m)) |
|
2543 | ui.write(('matcher: %r\n' % m)) | |
2537 | items = list(repo[None].walk(m)) |
|
2544 | items = list(repo[None].walk(m)) | |
2538 | if not items: |
|
2545 | if not items: | |
2539 | return |
|
2546 | return | |
2540 | f = lambda fn: fn |
|
2547 | f = lambda fn: fn | |
2541 | if ui.configbool('ui', 'slash') and pycompat.ossep != '/': |
|
2548 | if ui.configbool('ui', 'slash') and pycompat.ossep != '/': | |
2542 | f = lambda fn: util.normpath(fn) |
|
2549 | f = lambda fn: util.normpath(fn) | |
2543 | fmt = 'f %%-%ds %%-%ds %%s' % ( |
|
2550 | fmt = 'f %%-%ds %%-%ds %%s' % ( | |
2544 | max([len(abs) for abs in items]), |
|
2551 | max([len(abs) for abs in items]), | |
2545 | max([len(m.rel(abs)) for abs in items])) |
|
2552 | max([len(m.rel(abs)) for abs in items])) | |
2546 | for abs in items: |
|
2553 | for abs in items: | |
2547 | line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '') |
|
2554 | line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '') | |
2548 | ui.write("%s\n" % line.rstrip()) |
|
2555 | ui.write("%s\n" % line.rstrip()) | |
2549 |
|
2556 | |||
2550 | @command('debugwhyunstable', [], _('REV')) |
|
2557 | @command('debugwhyunstable', [], _('REV')) | |
2551 | def debugwhyunstable(ui, repo, rev): |
|
2558 | def debugwhyunstable(ui, repo, rev): | |
2552 | """explain instabilities of a changeset""" |
|
2559 | """explain instabilities of a changeset""" | |
2553 | for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)): |
|
2560 | for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)): | |
2554 | dnodes = '' |
|
2561 | dnodes = '' | |
2555 | if entry.get('divergentnodes'): |
|
2562 | if entry.get('divergentnodes'): | |
2556 | dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr()) |
|
2563 | dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr()) | |
2557 | for ctx in entry['divergentnodes']) + ' ' |
|
2564 | for ctx in entry['divergentnodes']) + ' ' | |
2558 | ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes, |
|
2565 | ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes, | |
2559 | entry['reason'], entry['node'])) |
|
2566 | entry['reason'], entry['node'])) | |
2560 |
|
2567 | |||
2561 | @command('debugwireargs', |
|
2568 | @command('debugwireargs', | |
2562 | [('', 'three', '', 'three'), |
|
2569 | [('', 'three', '', 'three'), | |
2563 | ('', 'four', '', 'four'), |
|
2570 | ('', 'four', '', 'four'), | |
2564 | ('', 'five', '', 'five'), |
|
2571 | ('', 'five', '', 'five'), | |
2565 | ] + cmdutil.remoteopts, |
|
2572 | ] + cmdutil.remoteopts, | |
2566 | _('REPO [OPTIONS]... [ONE [TWO]]'), |
|
2573 | _('REPO [OPTIONS]... [ONE [TWO]]'), | |
2567 | norepo=True) |
|
2574 | norepo=True) | |
2568 | def debugwireargs(ui, repopath, *vals, **opts): |
|
2575 | def debugwireargs(ui, repopath, *vals, **opts): | |
2569 | opts = pycompat.byteskwargs(opts) |
|
2576 | opts = pycompat.byteskwargs(opts) | |
2570 | repo = hg.peer(ui, opts, repopath) |
|
2577 | repo = hg.peer(ui, opts, repopath) | |
2571 | for opt in cmdutil.remoteopts: |
|
2578 | for opt in cmdutil.remoteopts: | |
2572 | del opts[opt[1]] |
|
2579 | del opts[opt[1]] | |
2573 | args = {} |
|
2580 | args = {} | |
2574 | for k, v in opts.iteritems(): |
|
2581 | for k, v in opts.iteritems(): | |
2575 | if v: |
|
2582 | if v: | |
2576 | args[k] = v |
|
2583 | args[k] = v | |
2577 | args = pycompat.strkwargs(args) |
|
2584 | args = pycompat.strkwargs(args) | |
2578 | # run twice to check that we don't mess up the stream for the next command |
|
2585 | # run twice to check that we don't mess up the stream for the next command | |
2579 | res1 = repo.debugwireargs(*vals, **args) |
|
2586 | res1 = repo.debugwireargs(*vals, **args) | |
2580 | res2 = repo.debugwireargs(*vals, **args) |
|
2587 | res2 = repo.debugwireargs(*vals, **args) | |
2581 | ui.write("%s\n" % res1) |
|
2588 | ui.write("%s\n" % res1) | |
2582 | if res1 != res2: |
|
2589 | if res1 != res2: | |
2583 | ui.warn("%s\n" % res2) |
|
2590 | ui.warn("%s\n" % res2) | |
2584 |
|
2591 | |||
2585 | def _parsewirelangblocks(fh): |
|
2592 | def _parsewirelangblocks(fh): | |
2586 | activeaction = None |
|
2593 | activeaction = None | |
2587 | blocklines = [] |
|
2594 | blocklines = [] | |
2588 |
|
2595 | |||
2589 | for line in fh: |
|
2596 | for line in fh: | |
2590 | line = line.rstrip() |
|
2597 | line = line.rstrip() | |
2591 | if not line: |
|
2598 | if not line: | |
2592 | continue |
|
2599 | continue | |
2593 |
|
2600 | |||
2594 | if line.startswith(b'#'): |
|
2601 | if line.startswith(b'#'): | |
2595 | continue |
|
2602 | continue | |
2596 |
|
2603 | |||
2597 | if not line.startswith(' '): |
|
2604 | if not line.startswith(' '): | |
2598 | # New block. Flush previous one. |
|
2605 | # New block. Flush previous one. | |
2599 | if activeaction: |
|
2606 | if activeaction: | |
2600 | yield activeaction, blocklines |
|
2607 | yield activeaction, blocklines | |
2601 |
|
2608 | |||
2602 | activeaction = line |
|
2609 | activeaction = line | |
2603 | blocklines = [] |
|
2610 | blocklines = [] | |
2604 | continue |
|
2611 | continue | |
2605 |
|
2612 | |||
2606 | # Else we start with an indent. |
|
2613 | # Else we start with an indent. | |
2607 |
|
2614 | |||
2608 | if not activeaction: |
|
2615 | if not activeaction: | |
2609 | raise error.Abort(_('indented line outside of block')) |
|
2616 | raise error.Abort(_('indented line outside of block')) | |
2610 |
|
2617 | |||
2611 | blocklines.append(line) |
|
2618 | blocklines.append(line) | |
2612 |
|
2619 | |||
2613 | # Flush last block. |
|
2620 | # Flush last block. | |
2614 | if activeaction: |
|
2621 | if activeaction: | |
2615 | yield activeaction, blocklines |
|
2622 | yield activeaction, blocklines | |
2616 |
|
2623 | |||
2617 | @command('debugwireproto', |
|
2624 | @command('debugwireproto', | |
2618 | [ |
|
2625 | [ | |
2619 | ('', 'localssh', False, _('start an SSH server for this repo')), |
|
2626 | ('', 'localssh', False, _('start an SSH server for this repo')), | |
2620 | ('', 'peer', '', _('construct a specific version of the peer')), |
|
2627 | ('', 'peer', '', _('construct a specific version of the peer')), | |
2621 | ('', 'noreadstderr', False, _('do not read from stderr of the remote')), |
|
2628 | ('', 'noreadstderr', False, _('do not read from stderr of the remote')), | |
2622 | ] + cmdutil.remoteopts, |
|
2629 | ] + cmdutil.remoteopts, | |
2623 | _('[PATH]'), |
|
2630 | _('[PATH]'), | |
2624 | optionalrepo=True) |
|
2631 | optionalrepo=True) | |
2625 | def debugwireproto(ui, repo, path=None, **opts): |
|
2632 | def debugwireproto(ui, repo, path=None, **opts): | |
2626 | """send wire protocol commands to a server |
|
2633 | """send wire protocol commands to a server | |
2627 |
|
2634 | |||
2628 | This command can be used to issue wire protocol commands to remote |
|
2635 | This command can be used to issue wire protocol commands to remote | |
2629 | peers and to debug the raw data being exchanged. |
|
2636 | peers and to debug the raw data being exchanged. | |
2630 |
|
2637 | |||
2631 | ``--localssh`` will start an SSH server against the current repository |
|
2638 | ``--localssh`` will start an SSH server against the current repository | |
2632 | and connect to that. By default, the connection will perform a handshake |
|
2639 | and connect to that. By default, the connection will perform a handshake | |
2633 | and establish an appropriate peer instance. |
|
2640 | and establish an appropriate peer instance. | |
2634 |
|
2641 | |||
2635 | ``--peer`` can be used to bypass the handshake protocol and construct a |
|
2642 | ``--peer`` can be used to bypass the handshake protocol and construct a | |
2636 | peer instance using the specified class type. Valid values are ``raw``, |
|
2643 | peer instance using the specified class type. Valid values are ``raw``, | |
2637 | ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending |
|
2644 | ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending | |
2638 | raw data payloads and don't support higher-level command actions. |
|
2645 | raw data payloads and don't support higher-level command actions. | |
2639 |
|
2646 | |||
2640 | ``--noreadstderr`` can be used to disable automatic reading from stderr |
|
2647 | ``--noreadstderr`` can be used to disable automatic reading from stderr | |
2641 | of the peer (for SSH connections only). Disabling automatic reading of |
|
2648 | of the peer (for SSH connections only). Disabling automatic reading of | |
2642 | stderr is useful for making output more deterministic. |
|
2649 | stderr is useful for making output more deterministic. | |
2643 |
|
2650 | |||
2644 | Commands are issued via a mini language which is specified via stdin. |
|
2651 | Commands are issued via a mini language which is specified via stdin. | |
2645 | The language consists of individual actions to perform. An action is |
|
2652 | The language consists of individual actions to perform. An action is | |
2646 | defined by a block. A block is defined as a line with no leading |
|
2653 | defined by a block. A block is defined as a line with no leading | |
2647 | space followed by 0 or more lines with leading space. Blocks are |
|
2654 | space followed by 0 or more lines with leading space. Blocks are | |
2648 | effectively a high-level command with additional metadata. |
|
2655 | effectively a high-level command with additional metadata. | |
2649 |
|
2656 | |||
2650 | Lines beginning with ``#`` are ignored. |
|
2657 | Lines beginning with ``#`` are ignored. | |
2651 |
|
2658 | |||
2652 | The following sections denote available actions. |
|
2659 | The following sections denote available actions. | |
2653 |
|
2660 | |||
2654 | raw |
|
2661 | raw | |
2655 | --- |
|
2662 | --- | |
2656 |
|
2663 | |||
2657 | Send raw data to the server. |
|
2664 | Send raw data to the server. | |
2658 |
|
2665 | |||
2659 | The block payload contains the raw data to send as one atomic send |
|
2666 | The block payload contains the raw data to send as one atomic send | |
2660 | operation. The data may not actually be delivered in a single system |
|
2667 | operation. The data may not actually be delivered in a single system | |
2661 | call: it depends on the abilities of the transport being used. |
|
2668 | call: it depends on the abilities of the transport being used. | |
2662 |
|
2669 | |||
2663 | Each line in the block is de-indented and concatenated. Then, that |
|
2670 | Each line in the block is de-indented and concatenated. Then, that | |
2664 | value is evaluated as a Python b'' literal. This allows the use of |
|
2671 | value is evaluated as a Python b'' literal. This allows the use of | |
2665 | backslash escaping, etc. |
|
2672 | backslash escaping, etc. | |
2666 |
|
2673 | |||
2667 | raw+ |
|
2674 | raw+ | |
2668 | ---- |
|
2675 | ---- | |
2669 |
|
2676 | |||
2670 | Behaves like ``raw`` except flushes output afterwards. |
|
2677 | Behaves like ``raw`` except flushes output afterwards. | |
2671 |
|
2678 | |||
2672 | command <X> |
|
2679 | command <X> | |
2673 | ----------- |
|
2680 | ----------- | |
2674 |
|
2681 | |||
2675 | Send a request to run a named command, whose name follows the ``command`` |
|
2682 | Send a request to run a named command, whose name follows the ``command`` | |
2676 | string. |
|
2683 | string. | |
2677 |
|
2684 | |||
2678 | Arguments to the command are defined as lines in this block. The format of |
|
2685 | Arguments to the command are defined as lines in this block. The format of | |
2679 | each line is ``<key> <value>``. e.g.:: |
|
2686 | each line is ``<key> <value>``. e.g.:: | |
2680 |
|
2687 | |||
2681 | command listkeys |
|
2688 | command listkeys | |
2682 | namespace bookmarks |
|
2689 | namespace bookmarks | |
2683 |
|
2690 | |||
2684 | If the value begins with ``eval:``, it will be interpreted as a Python |
|
2691 | If the value begins with ``eval:``, it will be interpreted as a Python | |
2685 | literal expression. Otherwise values are interpreted as Python b'' literals. |
|
2692 | literal expression. Otherwise values are interpreted as Python b'' literals. | |
2686 | This allows sending complex types and encoding special byte sequences via |
|
2693 | This allows sending complex types and encoding special byte sequences via | |
2687 | backslash escaping. |
|
2694 | backslash escaping. | |
2688 |
|
2695 | |||
2689 | The following arguments have special meaning: |
|
2696 | The following arguments have special meaning: | |
2690 |
|
2697 | |||
2691 | ``PUSHFILE`` |
|
2698 | ``PUSHFILE`` | |
2692 | When defined, the *push* mechanism of the peer will be used instead |
|
2699 | When defined, the *push* mechanism of the peer will be used instead | |
2693 | of the static request-response mechanism and the content of the |
|
2700 | of the static request-response mechanism and the content of the | |
2694 | file specified in the value of this argument will be sent as the |
|
2701 | file specified in the value of this argument will be sent as the | |
2695 | command payload. |
|
2702 | command payload. | |
2696 |
|
2703 | |||
2697 | This can be used to submit a local bundle file to the remote. |
|
2704 | This can be used to submit a local bundle file to the remote. | |
2698 |
|
2705 | |||
2699 | batchbegin |
|
2706 | batchbegin | |
2700 | ---------- |
|
2707 | ---------- | |
2701 |
|
2708 | |||
2702 | Instruct the peer to begin a batched send. |
|
2709 | Instruct the peer to begin a batched send. | |
2703 |
|
2710 | |||
2704 | All ``command`` blocks are queued for execution until the next |
|
2711 | All ``command`` blocks are queued for execution until the next | |
2705 | ``batchsubmit`` block. |
|
2712 | ``batchsubmit`` block. | |
2706 |
|
2713 | |||
2707 | batchsubmit |
|
2714 | batchsubmit | |
2708 | ----------- |
|
2715 | ----------- | |
2709 |
|
2716 | |||
2710 | Submit previously queued ``command`` blocks as a batch request. |
|
2717 | Submit previously queued ``command`` blocks as a batch request. | |
2711 |
|
2718 | |||
2712 | This action MUST be paired with a ``batchbegin`` action. |
|
2719 | This action MUST be paired with a ``batchbegin`` action. | |
2713 |
|
2720 | |||
2714 | httprequest <method> <path> |
|
2721 | httprequest <method> <path> | |
2715 | --------------------------- |
|
2722 | --------------------------- | |
2716 |
|
2723 | |||
2717 | (HTTP peer only) |
|
2724 | (HTTP peer only) | |
2718 |
|
2725 | |||
2719 | Send an HTTP request to the peer. |
|
2726 | Send an HTTP request to the peer. | |
2720 |
|
2727 | |||
2721 | The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``. |
|
2728 | The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``. | |
2722 |
|
2729 | |||
2723 | Arguments of the form ``<key>: <value>`` are interpreted as HTTP request |
|
2730 | Arguments of the form ``<key>: <value>`` are interpreted as HTTP request | |
2724 | headers to add to the request. e.g. ``Accept: foo``. |
|
2731 | headers to add to the request. e.g. ``Accept: foo``. | |
2725 |
|
2732 | |||
2726 | The following arguments are special: |
|
2733 | The following arguments are special: | |
2727 |
|
2734 | |||
2728 | ``BODYFILE`` |
|
2735 | ``BODYFILE`` | |
2729 | The content of the file defined as the value to this argument will be |
|
2736 | The content of the file defined as the value to this argument will be | |
2730 | transferred verbatim as the HTTP request body. |
|
2737 | transferred verbatim as the HTTP request body. | |
2731 |
|
2738 | |||
2732 | ``frame <type> <flags> <payload>`` |
|
2739 | ``frame <type> <flags> <payload>`` | |
2733 | Send a unified protocol frame as part of the request body. |
|
2740 | Send a unified protocol frame as part of the request body. | |
2734 |
|
2741 | |||
2735 | All frames will be collected and sent as the body to the HTTP |
|
2742 | All frames will be collected and sent as the body to the HTTP | |
2736 | request. |
|
2743 | request. | |
2737 |
|
2744 | |||
2738 | close |
|
2745 | close | |
2739 | ----- |
|
2746 | ----- | |
2740 |
|
2747 | |||
2741 | Close the connection to the server. |
|
2748 | Close the connection to the server. | |
2742 |
|
2749 | |||
2743 | flush |
|
2750 | flush | |
2744 | ----- |
|
2751 | ----- | |
2745 |
|
2752 | |||
2746 | Flush data written to the server. |
|
2753 | Flush data written to the server. | |
2747 |
|
2754 | |||
2748 | readavailable |
|
2755 | readavailable | |
2749 | ------------- |
|
2756 | ------------- | |
2750 |
|
2757 | |||
2751 | Close the write end of the connection and read all available data from |
|
2758 | Close the write end of the connection and read all available data from | |
2752 | the server. |
|
2759 | the server. | |
2753 |
|
2760 | |||
2754 | If the connection to the server encompasses multiple pipes, we poll both |
|
2761 | If the connection to the server encompasses multiple pipes, we poll both | |
2755 | pipes and read available data. |
|
2762 | pipes and read available data. | |
2756 |
|
2763 | |||
2757 | readline |
|
2764 | readline | |
2758 | -------- |
|
2765 | -------- | |
2759 |
|
2766 | |||
2760 | Read a line of output from the server. If there are multiple output |
|
2767 | Read a line of output from the server. If there are multiple output | |
2761 | pipes, reads only the main pipe. |
|
2768 | pipes, reads only the main pipe. | |
2762 |
|
2769 | |||
2763 | ereadline |
|
2770 | ereadline | |
2764 | --------- |
|
2771 | --------- | |
2765 |
|
2772 | |||
2766 | Like ``readline``, but read from the stderr pipe, if available. |
|
2773 | Like ``readline``, but read from the stderr pipe, if available. | |
2767 |
|
2774 | |||
2768 | read <X> |
|
2775 | read <X> | |
2769 | -------- |
|
2776 | -------- | |
2770 |
|
2777 | |||
2771 | ``read()`` N bytes from the server's main output pipe. |
|
2778 | ``read()`` N bytes from the server's main output pipe. | |
2772 |
|
2779 | |||
2773 | eread <X> |
|
2780 | eread <X> | |
2774 | --------- |
|
2781 | --------- | |
2775 |
|
2782 | |||
2776 | ``read()`` N bytes from the server's stderr pipe, if available. |
|
2783 | ``read()`` N bytes from the server's stderr pipe, if available. | |
2777 |
|
2784 | |||
2778 | Specifying Unified Frame-Based Protocol Frames |
|
2785 | Specifying Unified Frame-Based Protocol Frames | |
2779 | ---------------------------------------------- |
|
2786 | ---------------------------------------------- | |
2780 |
|
2787 | |||
2781 | It is possible to emit a *Unified Frame-Based Protocol* by using special |
|
2788 | It is possible to emit a *Unified Frame-Based Protocol* by using special | |
2782 | syntax. |
|
2789 | syntax. | |
2783 |
|
2790 | |||
2784 | A frame is composed as a type, flags, and payload. These can be parsed |
|
2791 | A frame is composed as a type, flags, and payload. These can be parsed | |
2785 | from a string of the form: |
|
2792 | from a string of the form: | |
2786 |
|
2793 | |||
2787 | <request-id> <stream-id> <stream-flags> <type> <flags> <payload> |
|
2794 | <request-id> <stream-id> <stream-flags> <type> <flags> <payload> | |
2788 |
|
2795 | |||
2789 | ``request-id`` and ``stream-id`` are integers defining the request and |
|
2796 | ``request-id`` and ``stream-id`` are integers defining the request and | |
2790 | stream identifiers. |
|
2797 | stream identifiers. | |
2791 |
|
2798 | |||
2792 | ``type`` can be an integer value for the frame type or the string name |
|
2799 | ``type`` can be an integer value for the frame type or the string name | |
2793 | of the type. The strings are defined in ``wireprotoframing.py``. e.g. |
|
2800 | of the type. The strings are defined in ``wireprotoframing.py``. e.g. | |
2794 | ``command-name``. |
|
2801 | ``command-name``. | |
2795 |
|
2802 | |||
2796 | ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag |
|
2803 | ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag | |
2797 | components. Each component (and there can be just one) can be an integer |
|
2804 | components. Each component (and there can be just one) can be an integer | |
2798 | or a flag name for stream flags or frame flags, respectively. Values are |
|
2805 | or a flag name for stream flags or frame flags, respectively. Values are | |
2799 | resolved to integers and then bitwise OR'd together. |
|
2806 | resolved to integers and then bitwise OR'd together. | |
2800 |
|
2807 | |||
2801 | ``payload`` represents the raw frame payload. If it begins with |
|
2808 | ``payload`` represents the raw frame payload. If it begins with | |
2802 | ``cbor:``, the following string is evaluated as Python code and the |
|
2809 | ``cbor:``, the following string is evaluated as Python code and the | |
2803 | resulting object is fed into a CBOR encoder. Otherwise it is interpreted |
|
2810 | resulting object is fed into a CBOR encoder. Otherwise it is interpreted | |
2804 | as a Python byte string literal. |
|
2811 | as a Python byte string literal. | |
2805 | """ |
|
2812 | """ | |
2806 | opts = pycompat.byteskwargs(opts) |
|
2813 | opts = pycompat.byteskwargs(opts) | |
2807 |
|
2814 | |||
2808 | if opts['localssh'] and not repo: |
|
2815 | if opts['localssh'] and not repo: | |
2809 | raise error.Abort(_('--localssh requires a repository')) |
|
2816 | raise error.Abort(_('--localssh requires a repository')) | |
2810 |
|
2817 | |||
2811 | if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'): |
|
2818 | if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'): | |
2812 | raise error.Abort(_('invalid value for --peer'), |
|
2819 | raise error.Abort(_('invalid value for --peer'), | |
2813 | hint=_('valid values are "raw", "ssh1", and "ssh2"')) |
|
2820 | hint=_('valid values are "raw", "ssh1", and "ssh2"')) | |
2814 |
|
2821 | |||
2815 | if path and opts['localssh']: |
|
2822 | if path and opts['localssh']: | |
2816 | raise error.Abort(_('cannot specify --localssh with an explicit ' |
|
2823 | raise error.Abort(_('cannot specify --localssh with an explicit ' | |
2817 | 'path')) |
|
2824 | 'path')) | |
2818 |
|
2825 | |||
2819 | if ui.interactive(): |
|
2826 | if ui.interactive(): | |
2820 | ui.write(_('(waiting for commands on stdin)\n')) |
|
2827 | ui.write(_('(waiting for commands on stdin)\n')) | |
2821 |
|
2828 | |||
2822 | blocks = list(_parsewirelangblocks(ui.fin)) |
|
2829 | blocks = list(_parsewirelangblocks(ui.fin)) | |
2823 |
|
2830 | |||
2824 | proc = None |
|
2831 | proc = None | |
2825 | stdin = None |
|
2832 | stdin = None | |
2826 | stdout = None |
|
2833 | stdout = None | |
2827 | stderr = None |
|
2834 | stderr = None | |
2828 | opener = None |
|
2835 | opener = None | |
2829 |
|
2836 | |||
2830 | if opts['localssh']: |
|
2837 | if opts['localssh']: | |
2831 | # We start the SSH server in its own process so there is process |
|
2838 | # We start the SSH server in its own process so there is process | |
2832 | # separation. This prevents a whole class of potential bugs around |
|
2839 | # separation. This prevents a whole class of potential bugs around | |
2833 | # shared state from interfering with server operation. |
|
2840 | # shared state from interfering with server operation. | |
2834 | args = procutil.hgcmd() + [ |
|
2841 | args = procutil.hgcmd() + [ | |
2835 | '-R', repo.root, |
|
2842 | '-R', repo.root, | |
2836 | 'debugserve', '--sshstdio', |
|
2843 | 'debugserve', '--sshstdio', | |
2837 | ] |
|
2844 | ] | |
2838 | proc = subprocess.Popen(args, stdin=subprocess.PIPE, |
|
2845 | proc = subprocess.Popen(args, stdin=subprocess.PIPE, | |
2839 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
|
2846 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, | |
2840 | bufsize=0) |
|
2847 | bufsize=0) | |
2841 |
|
2848 | |||
2842 | stdin = proc.stdin |
|
2849 | stdin = proc.stdin | |
2843 | stdout = proc.stdout |
|
2850 | stdout = proc.stdout | |
2844 | stderr = proc.stderr |
|
2851 | stderr = proc.stderr | |
2845 |
|
2852 | |||
2846 | # We turn the pipes into observers so we can log I/O. |
|
2853 | # We turn the pipes into observers so we can log I/O. | |
2847 | if ui.verbose or opts['peer'] == 'raw': |
|
2854 | if ui.verbose or opts['peer'] == 'raw': | |
2848 | stdin = util.makeloggingfileobject(ui, proc.stdin, b'i', |
|
2855 | stdin = util.makeloggingfileobject(ui, proc.stdin, b'i', | |
2849 | logdata=True) |
|
2856 | logdata=True) | |
2850 | stdout = util.makeloggingfileobject(ui, proc.stdout, b'o', |
|
2857 | stdout = util.makeloggingfileobject(ui, proc.stdout, b'o', | |
2851 | logdata=True) |
|
2858 | logdata=True) | |
2852 | stderr = util.makeloggingfileobject(ui, proc.stderr, b'e', |
|
2859 | stderr = util.makeloggingfileobject(ui, proc.stderr, b'e', | |
2853 | logdata=True) |
|
2860 | logdata=True) | |
2854 |
|
2861 | |||
2855 | # --localssh also implies the peer connection settings. |
|
2862 | # --localssh also implies the peer connection settings. | |
2856 |
|
2863 | |||
2857 | url = 'ssh://localserver' |
|
2864 | url = 'ssh://localserver' | |
2858 | autoreadstderr = not opts['noreadstderr'] |
|
2865 | autoreadstderr = not opts['noreadstderr'] | |
2859 |
|
2866 | |||
2860 | if opts['peer'] == 'ssh1': |
|
2867 | if opts['peer'] == 'ssh1': | |
2861 | ui.write(_('creating ssh peer for wire protocol version 1\n')) |
|
2868 | ui.write(_('creating ssh peer for wire protocol version 1\n')) | |
2862 | peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr, |
|
2869 | peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr, | |
2863 | None, autoreadstderr=autoreadstderr) |
|
2870 | None, autoreadstderr=autoreadstderr) | |
2864 | elif opts['peer'] == 'ssh2': |
|
2871 | elif opts['peer'] == 'ssh2': | |
2865 | ui.write(_('creating ssh peer for wire protocol version 2\n')) |
|
2872 | ui.write(_('creating ssh peer for wire protocol version 2\n')) | |
2866 | peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr, |
|
2873 | peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr, | |
2867 | None, autoreadstderr=autoreadstderr) |
|
2874 | None, autoreadstderr=autoreadstderr) | |
2868 | elif opts['peer'] == 'raw': |
|
2875 | elif opts['peer'] == 'raw': | |
2869 | ui.write(_('using raw connection to peer\n')) |
|
2876 | ui.write(_('using raw connection to peer\n')) | |
2870 | peer = None |
|
2877 | peer = None | |
2871 | else: |
|
2878 | else: | |
2872 | ui.write(_('creating ssh peer from handshake results\n')) |
|
2879 | ui.write(_('creating ssh peer from handshake results\n')) | |
2873 | peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr, |
|
2880 | peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr, | |
2874 | autoreadstderr=autoreadstderr) |
|
2881 | autoreadstderr=autoreadstderr) | |
2875 |
|
2882 | |||
2876 | elif path: |
|
2883 | elif path: | |
2877 | # We bypass hg.peer() so we can proxy the sockets. |
|
2884 | # We bypass hg.peer() so we can proxy the sockets. | |
2878 | # TODO consider not doing this because we skip |
|
2885 | # TODO consider not doing this because we skip | |
2879 | # ``hg.wirepeersetupfuncs`` and potentially other useful functionality. |
|
2886 | # ``hg.wirepeersetupfuncs`` and potentially other useful functionality. | |
2880 | u = util.url(path) |
|
2887 | u = util.url(path) | |
2881 | if u.scheme != 'http': |
|
2888 | if u.scheme != 'http': | |
2882 | raise error.Abort(_('only http:// paths are currently supported')) |
|
2889 | raise error.Abort(_('only http:// paths are currently supported')) | |
2883 |
|
2890 | |||
2884 | url, authinfo = u.authinfo() |
|
2891 | url, authinfo = u.authinfo() | |
2885 | openerargs = { |
|
2892 | openerargs = { | |
2886 | r'useragent': b'Mercurial debugwireproto', |
|
2893 | r'useragent': b'Mercurial debugwireproto', | |
2887 | } |
|
2894 | } | |
2888 |
|
2895 | |||
2889 | # Turn pipes/sockets into observers so we can log I/O. |
|
2896 | # Turn pipes/sockets into observers so we can log I/O. | |
2890 | if ui.verbose: |
|
2897 | if ui.verbose: | |
2891 | openerargs.update({ |
|
2898 | openerargs.update({ | |
2892 | r'loggingfh': ui, |
|
2899 | r'loggingfh': ui, | |
2893 | r'loggingname': b's', |
|
2900 | r'loggingname': b's', | |
2894 | r'loggingopts': { |
|
2901 | r'loggingopts': { | |
2895 | r'logdata': True, |
|
2902 | r'logdata': True, | |
2896 | r'logdataapis': False, |
|
2903 | r'logdataapis': False, | |
2897 | }, |
|
2904 | }, | |
2898 | }) |
|
2905 | }) | |
2899 |
|
2906 | |||
2900 | if ui.debugflag: |
|
2907 | if ui.debugflag: | |
2901 | openerargs[r'loggingopts'][r'logdataapis'] = True |
|
2908 | openerargs[r'loggingopts'][r'logdataapis'] = True | |
2902 |
|
2909 | |||
2903 | # Don't send default headers when in raw mode. This allows us to |
|
2910 | # Don't send default headers when in raw mode. This allows us to | |
2904 | # bypass most of the behavior of our URL handling code so we can |
|
2911 | # bypass most of the behavior of our URL handling code so we can | |
2905 | # have near complete control over what's sent on the wire. |
|
2912 | # have near complete control over what's sent on the wire. | |
2906 | if opts['peer'] == 'raw': |
|
2913 | if opts['peer'] == 'raw': | |
2907 | openerargs[r'sendaccept'] = False |
|
2914 | openerargs[r'sendaccept'] = False | |
2908 |
|
2915 | |||
2909 | opener = urlmod.opener(ui, authinfo, **openerargs) |
|
2916 | opener = urlmod.opener(ui, authinfo, **openerargs) | |
2910 |
|
2917 | |||
2911 | if opts['peer'] == 'http2': |
|
2918 | if opts['peer'] == 'http2': | |
2912 | ui.write(_('creating http peer for wire protocol version 2\n')) |
|
2919 | ui.write(_('creating http peer for wire protocol version 2\n')) | |
2913 | # We go through makepeer() because we need an API descriptor for |
|
2920 | # We go through makepeer() because we need an API descriptor for | |
2914 | # the peer instance to be useful. |
|
2921 | # the peer instance to be useful. | |
2915 | with ui.configoverride({ |
|
2922 | with ui.configoverride({ | |
2916 | ('experimental', 'httppeer.advertise-v2'): True}): |
|
2923 | ('experimental', 'httppeer.advertise-v2'): True}): | |
2917 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
2924 | peer = httppeer.makepeer(ui, path, opener=opener) | |
2918 |
|
2925 | |||
2919 | if not isinstance(peer, httppeer.httpv2peer): |
|
2926 | if not isinstance(peer, httppeer.httpv2peer): | |
2920 | raise error.Abort(_('could not instantiate HTTP peer for ' |
|
2927 | raise error.Abort(_('could not instantiate HTTP peer for ' | |
2921 | 'wire protocol version 2'), |
|
2928 | 'wire protocol version 2'), | |
2922 | hint=_('the server may not have the feature ' |
|
2929 | hint=_('the server may not have the feature ' | |
2923 | 'enabled or is not allowing this ' |
|
2930 | 'enabled or is not allowing this ' | |
2924 | 'client version')) |
|
2931 | 'client version')) | |
2925 |
|
2932 | |||
2926 | elif opts['peer'] == 'raw': |
|
2933 | elif opts['peer'] == 'raw': | |
2927 | ui.write(_('using raw connection to peer\n')) |
|
2934 | ui.write(_('using raw connection to peer\n')) | |
2928 | peer = None |
|
2935 | peer = None | |
2929 | elif opts['peer']: |
|
2936 | elif opts['peer']: | |
2930 | raise error.Abort(_('--peer %s not supported with HTTP peers') % |
|
2937 | raise error.Abort(_('--peer %s not supported with HTTP peers') % | |
2931 | opts['peer']) |
|
2938 | opts['peer']) | |
2932 | else: |
|
2939 | else: | |
2933 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
2940 | peer = httppeer.makepeer(ui, path, opener=opener) | |
2934 |
|
2941 | |||
2935 | # We /could/ populate stdin/stdout with sock.makefile()... |
|
2942 | # We /could/ populate stdin/stdout with sock.makefile()... | |
2936 | else: |
|
2943 | else: | |
2937 | raise error.Abort(_('unsupported connection configuration')) |
|
2944 | raise error.Abort(_('unsupported connection configuration')) | |
2938 |
|
2945 | |||
2939 | batchedcommands = None |
|
2946 | batchedcommands = None | |
2940 |
|
2947 | |||
2941 | # Now perform actions based on the parsed wire language instructions. |
|
2948 | # Now perform actions based on the parsed wire language instructions. | |
2942 | for action, lines in blocks: |
|
2949 | for action, lines in blocks: | |
2943 | if action in ('raw', 'raw+'): |
|
2950 | if action in ('raw', 'raw+'): | |
2944 | if not stdin: |
|
2951 | if not stdin: | |
2945 | raise error.Abort(_('cannot call raw/raw+ on this peer')) |
|
2952 | raise error.Abort(_('cannot call raw/raw+ on this peer')) | |
2946 |
|
2953 | |||
2947 | # Concatenate the data together. |
|
2954 | # Concatenate the data together. | |
2948 | data = ''.join(l.lstrip() for l in lines) |
|
2955 | data = ''.join(l.lstrip() for l in lines) | |
2949 | data = stringutil.unescapestr(data) |
|
2956 | data = stringutil.unescapestr(data) | |
2950 | stdin.write(data) |
|
2957 | stdin.write(data) | |
2951 |
|
2958 | |||
2952 | if action == 'raw+': |
|
2959 | if action == 'raw+': | |
2953 | stdin.flush() |
|
2960 | stdin.flush() | |
2954 | elif action == 'flush': |
|
2961 | elif action == 'flush': | |
2955 | if not stdin: |
|
2962 | if not stdin: | |
2956 | raise error.Abort(_('cannot call flush on this peer')) |
|
2963 | raise error.Abort(_('cannot call flush on this peer')) | |
2957 | stdin.flush() |
|
2964 | stdin.flush() | |
2958 | elif action.startswith('command'): |
|
2965 | elif action.startswith('command'): | |
2959 | if not peer: |
|
2966 | if not peer: | |
2960 | raise error.Abort(_('cannot send commands unless peer instance ' |
|
2967 | raise error.Abort(_('cannot send commands unless peer instance ' | |
2961 | 'is available')) |
|
2968 | 'is available')) | |
2962 |
|
2969 | |||
2963 | command = action.split(' ', 1)[1] |
|
2970 | command = action.split(' ', 1)[1] | |
2964 |
|
2971 | |||
2965 | args = {} |
|
2972 | args = {} | |
2966 | for line in lines: |
|
2973 | for line in lines: | |
2967 | # We need to allow empty values. |
|
2974 | # We need to allow empty values. | |
2968 | fields = line.lstrip().split(' ', 1) |
|
2975 | fields = line.lstrip().split(' ', 1) | |
2969 | if len(fields) == 1: |
|
2976 | if len(fields) == 1: | |
2970 | key = fields[0] |
|
2977 | key = fields[0] | |
2971 | value = '' |
|
2978 | value = '' | |
2972 | else: |
|
2979 | else: | |
2973 | key, value = fields |
|
2980 | key, value = fields | |
2974 |
|
2981 | |||
2975 | if value.startswith('eval:'): |
|
2982 | if value.startswith('eval:'): | |
2976 | value = stringutil.evalpythonliteral(value[5:]) |
|
2983 | value = stringutil.evalpythonliteral(value[5:]) | |
2977 | else: |
|
2984 | else: | |
2978 | value = stringutil.unescapestr(value) |
|
2985 | value = stringutil.unescapestr(value) | |
2979 |
|
2986 | |||
2980 | args[key] = value |
|
2987 | args[key] = value | |
2981 |
|
2988 | |||
2982 | if batchedcommands is not None: |
|
2989 | if batchedcommands is not None: | |
2983 | batchedcommands.append((command, args)) |
|
2990 | batchedcommands.append((command, args)) | |
2984 | continue |
|
2991 | continue | |
2985 |
|
2992 | |||
2986 | ui.status(_('sending %s command\n') % command) |
|
2993 | ui.status(_('sending %s command\n') % command) | |
2987 |
|
2994 | |||
2988 | if 'PUSHFILE' in args: |
|
2995 | if 'PUSHFILE' in args: | |
2989 | with open(args['PUSHFILE'], r'rb') as fh: |
|
2996 | with open(args['PUSHFILE'], r'rb') as fh: | |
2990 | del args['PUSHFILE'] |
|
2997 | del args['PUSHFILE'] | |
2991 | res, output = peer._callpush(command, fh, |
|
2998 | res, output = peer._callpush(command, fh, | |
2992 | **pycompat.strkwargs(args)) |
|
2999 | **pycompat.strkwargs(args)) | |
2993 | ui.status(_('result: %s\n') % stringutil.escapestr(res)) |
|
3000 | ui.status(_('result: %s\n') % stringutil.escapestr(res)) | |
2994 | ui.status(_('remote output: %s\n') % |
|
3001 | ui.status(_('remote output: %s\n') % | |
2995 | stringutil.escapestr(output)) |
|
3002 | stringutil.escapestr(output)) | |
2996 | else: |
|
3003 | else: | |
2997 | res = peer._call(command, **pycompat.strkwargs(args)) |
|
3004 | res = peer._call(command, **pycompat.strkwargs(args)) | |
2998 | ui.status(_('response: %s\n') % stringutil.pprint(res)) |
|
3005 | ui.status(_('response: %s\n') % stringutil.pprint(res)) | |
2999 |
|
3006 | |||
3000 | elif action == 'batchbegin': |
|
3007 | elif action == 'batchbegin': | |
3001 | if batchedcommands is not None: |
|
3008 | if batchedcommands is not None: | |
3002 | raise error.Abort(_('nested batchbegin not allowed')) |
|
3009 | raise error.Abort(_('nested batchbegin not allowed')) | |
3003 |
|
3010 | |||
3004 | batchedcommands = [] |
|
3011 | batchedcommands = [] | |
3005 | elif action == 'batchsubmit': |
|
3012 | elif action == 'batchsubmit': | |
3006 | # There is a batching API we could go through. But it would be |
|
3013 | # There is a batching API we could go through. But it would be | |
3007 | # difficult to normalize requests into function calls. It is easier |
|
3014 | # difficult to normalize requests into function calls. It is easier | |
3008 | # to bypass this layer and normalize to commands + args. |
|
3015 | # to bypass this layer and normalize to commands + args. | |
3009 | ui.status(_('sending batch with %d sub-commands\n') % |
|
3016 | ui.status(_('sending batch with %d sub-commands\n') % | |
3010 | len(batchedcommands)) |
|
3017 | len(batchedcommands)) | |
3011 | for i, chunk in enumerate(peer._submitbatch(batchedcommands)): |
|
3018 | for i, chunk in enumerate(peer._submitbatch(batchedcommands)): | |
3012 | ui.status(_('response #%d: %s\n') % |
|
3019 | ui.status(_('response #%d: %s\n') % | |
3013 | (i, stringutil.escapestr(chunk))) |
|
3020 | (i, stringutil.escapestr(chunk))) | |
3014 |
|
3021 | |||
3015 | batchedcommands = None |
|
3022 | batchedcommands = None | |
3016 |
|
3023 | |||
3017 | elif action.startswith('httprequest '): |
|
3024 | elif action.startswith('httprequest '): | |
3018 | if not opener: |
|
3025 | if not opener: | |
3019 | raise error.Abort(_('cannot use httprequest without an HTTP ' |
|
3026 | raise error.Abort(_('cannot use httprequest without an HTTP ' | |
3020 | 'peer')) |
|
3027 | 'peer')) | |
3021 |
|
3028 | |||
3022 | request = action.split(' ', 2) |
|
3029 | request = action.split(' ', 2) | |
3023 | if len(request) != 3: |
|
3030 | if len(request) != 3: | |
3024 | raise error.Abort(_('invalid httprequest: expected format is ' |
|
3031 | raise error.Abort(_('invalid httprequest: expected format is ' | |
3025 | '"httprequest <method> <path>')) |
|
3032 | '"httprequest <method> <path>')) | |
3026 |
|
3033 | |||
3027 | method, httppath = request[1:] |
|
3034 | method, httppath = request[1:] | |
3028 | headers = {} |
|
3035 | headers = {} | |
3029 | body = None |
|
3036 | body = None | |
3030 | frames = [] |
|
3037 | frames = [] | |
3031 | for line in lines: |
|
3038 | for line in lines: | |
3032 | line = line.lstrip() |
|
3039 | line = line.lstrip() | |
3033 | m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line) |
|
3040 | m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line) | |
3034 | if m: |
|
3041 | if m: | |
3035 | headers[m.group(1)] = m.group(2) |
|
3042 | headers[m.group(1)] = m.group(2) | |
3036 | continue |
|
3043 | continue | |
3037 |
|
3044 | |||
3038 | if line.startswith(b'BODYFILE '): |
|
3045 | if line.startswith(b'BODYFILE '): | |
3039 | with open(line.split(b' ', 1), 'rb') as fh: |
|
3046 | with open(line.split(b' ', 1), 'rb') as fh: | |
3040 | body = fh.read() |
|
3047 | body = fh.read() | |
3041 | elif line.startswith(b'frame '): |
|
3048 | elif line.startswith(b'frame '): | |
3042 | frame = wireprotoframing.makeframefromhumanstring( |
|
3049 | frame = wireprotoframing.makeframefromhumanstring( | |
3043 | line[len(b'frame '):]) |
|
3050 | line[len(b'frame '):]) | |
3044 |
|
3051 | |||
3045 | frames.append(frame) |
|
3052 | frames.append(frame) | |
3046 | else: |
|
3053 | else: | |
3047 | raise error.Abort(_('unknown argument to httprequest: %s') % |
|
3054 | raise error.Abort(_('unknown argument to httprequest: %s') % | |
3048 | line) |
|
3055 | line) | |
3049 |
|
3056 | |||
3050 | url = path + httppath |
|
3057 | url = path + httppath | |
3051 |
|
3058 | |||
3052 | if frames: |
|
3059 | if frames: | |
3053 | body = b''.join(bytes(f) for f in frames) |
|
3060 | body = b''.join(bytes(f) for f in frames) | |
3054 |
|
3061 | |||
3055 | req = urlmod.urlreq.request(pycompat.strurl(url), body, headers) |
|
3062 | req = urlmod.urlreq.request(pycompat.strurl(url), body, headers) | |
3056 |
|
3063 | |||
3057 | # urllib.Request insists on using has_data() as a proxy for |
|
3064 | # urllib.Request insists on using has_data() as a proxy for | |
3058 | # determining the request method. Override that to use our |
|
3065 | # determining the request method. Override that to use our | |
3059 | # explicitly requested method. |
|
3066 | # explicitly requested method. | |
3060 | req.get_method = lambda: method |
|
3067 | req.get_method = lambda: method | |
3061 |
|
3068 | |||
3062 | try: |
|
3069 | try: | |
3063 | res = opener.open(req) |
|
3070 | res = opener.open(req) | |
3064 | body = res.read() |
|
3071 | body = res.read() | |
3065 | except util.urlerr.urlerror as e: |
|
3072 | except util.urlerr.urlerror as e: | |
3066 | e.read() |
|
3073 | e.read() | |
3067 | continue |
|
3074 | continue | |
3068 |
|
3075 | |||
3069 | if res.headers.get('Content-Type') == 'application/mercurial-cbor': |
|
3076 | if res.headers.get('Content-Type') == 'application/mercurial-cbor': | |
3070 | ui.write(_('cbor> %s\n') % stringutil.pprint(cbor.loads(body))) |
|
3077 | ui.write(_('cbor> %s\n') % stringutil.pprint(cbor.loads(body))) | |
3071 |
|
3078 | |||
3072 | elif action == 'close': |
|
3079 | elif action == 'close': | |
3073 | peer.close() |
|
3080 | peer.close() | |
3074 | elif action == 'readavailable': |
|
3081 | elif action == 'readavailable': | |
3075 | if not stdout or not stderr: |
|
3082 | if not stdout or not stderr: | |
3076 | raise error.Abort(_('readavailable not available on this peer')) |
|
3083 | raise error.Abort(_('readavailable not available on this peer')) | |
3077 |
|
3084 | |||
3078 | stdin.close() |
|
3085 | stdin.close() | |
3079 | stdout.read() |
|
3086 | stdout.read() | |
3080 | stderr.read() |
|
3087 | stderr.read() | |
3081 |
|
3088 | |||
3082 | elif action == 'readline': |
|
3089 | elif action == 'readline': | |
3083 | if not stdout: |
|
3090 | if not stdout: | |
3084 | raise error.Abort(_('readline not available on this peer')) |
|
3091 | raise error.Abort(_('readline not available on this peer')) | |
3085 | stdout.readline() |
|
3092 | stdout.readline() | |
3086 | elif action == 'ereadline': |
|
3093 | elif action == 'ereadline': | |
3087 | if not stderr: |
|
3094 | if not stderr: | |
3088 | raise error.Abort(_('ereadline not available on this peer')) |
|
3095 | raise error.Abort(_('ereadline not available on this peer')) | |
3089 | stderr.readline() |
|
3096 | stderr.readline() | |
3090 | elif action.startswith('read '): |
|
3097 | elif action.startswith('read '): | |
3091 | count = int(action.split(' ', 1)[1]) |
|
3098 | count = int(action.split(' ', 1)[1]) | |
3092 | if not stdout: |
|
3099 | if not stdout: | |
3093 | raise error.Abort(_('read not available on this peer')) |
|
3100 | raise error.Abort(_('read not available on this peer')) | |
3094 | stdout.read(count) |
|
3101 | stdout.read(count) | |
3095 | elif action.startswith('eread '): |
|
3102 | elif action.startswith('eread '): | |
3096 | count = int(action.split(' ', 1)[1]) |
|
3103 | count = int(action.split(' ', 1)[1]) | |
3097 | if not stderr: |
|
3104 | if not stderr: | |
3098 | raise error.Abort(_('eread not available on this peer')) |
|
3105 | raise error.Abort(_('eread not available on this peer')) | |
3099 | stderr.read(count) |
|
3106 | stderr.read(count) | |
3100 | else: |
|
3107 | else: | |
3101 | raise error.Abort(_('unknown action: %s') % action) |
|
3108 | raise error.Abort(_('unknown action: %s') % action) | |
3102 |
|
3109 | |||
3103 | if batchedcommands is not None: |
|
3110 | if batchedcommands is not None: | |
3104 | raise error.Abort(_('unclosed "batchbegin" request')) |
|
3111 | raise error.Abort(_('unclosed "batchbegin" request')) | |
3105 |
|
3112 | |||
3106 | if peer: |
|
3113 | if peer: | |
3107 | peer.close() |
|
3114 | peer.close() | |
3108 |
|
3115 | |||
3109 | if proc: |
|
3116 | if proc: | |
3110 | proc.kill() |
|
3117 | proc.kill() |
@@ -1,2391 +1,2404 b'' | |||||
1 | # exchange.py - utility to exchange data between repos. |
|
1 | # exchange.py - utility to exchange data between repos. | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import collections |
|
10 | import collections | |
11 | import errno |
|
11 | import errno | |
12 | import hashlib |
|
12 | import hashlib | |
13 |
|
13 | |||
14 | from .i18n import _ |
|
14 | from .i18n import _ | |
15 | from .node import ( |
|
15 | from .node import ( | |
16 | bin, |
|
16 | bin, | |
17 | hex, |
|
17 | hex, | |
18 | nullid, |
|
18 | nullid, | |
19 | ) |
|
19 | ) | |
20 | from .thirdparty import ( |
|
20 | from .thirdparty import ( | |
21 | attr, |
|
21 | attr, | |
22 | ) |
|
22 | ) | |
23 | from . import ( |
|
23 | from . import ( | |
24 | bookmarks as bookmod, |
|
24 | bookmarks as bookmod, | |
25 | bundle2, |
|
25 | bundle2, | |
26 | changegroup, |
|
26 | changegroup, | |
27 | discovery, |
|
27 | discovery, | |
28 | error, |
|
28 | error, | |
29 | lock as lockmod, |
|
29 | lock as lockmod, | |
30 | logexchange, |
|
30 | logexchange, | |
31 | obsolete, |
|
31 | obsolete, | |
32 | phases, |
|
32 | phases, | |
33 | pushkey, |
|
33 | pushkey, | |
34 | pycompat, |
|
34 | pycompat, | |
35 | scmutil, |
|
35 | scmutil, | |
36 | sslutil, |
|
36 | sslutil, | |
37 | streamclone, |
|
37 | streamclone, | |
38 | url as urlmod, |
|
38 | url as urlmod, | |
39 | util, |
|
39 | util, | |
40 | ) |
|
40 | ) | |
41 | from .utils import ( |
|
41 | from .utils import ( | |
42 | stringutil, |
|
42 | stringutil, | |
43 | ) |
|
43 | ) | |
44 |
|
44 | |||
45 | urlerr = util.urlerr |
|
45 | urlerr = util.urlerr | |
46 | urlreq = util.urlreq |
|
46 | urlreq = util.urlreq | |
47 |
|
47 | |||
48 | # Maps bundle version human names to changegroup versions. |
|
48 | # Maps bundle version human names to changegroup versions. | |
49 | _bundlespeccgversions = {'v1': '01', |
|
49 | _bundlespeccgversions = {'v1': '01', | |
50 | 'v2': '02', |
|
50 | 'v2': '02', | |
51 | 'packed1': 's1', |
|
51 | 'packed1': 's1', | |
52 | 'bundle2': '02', #legacy |
|
52 | 'bundle2': '02', #legacy | |
53 | } |
|
53 | } | |
54 |
|
54 | |||
55 | # Maps bundle version with content opts to choose which part to bundle |
|
55 | # Maps bundle version with content opts to choose which part to bundle | |
56 | _bundlespeccontentopts = { |
|
56 | _bundlespeccontentopts = { | |
57 | 'v1': { |
|
57 | 'v1': { | |
58 | 'changegroup': True, |
|
58 | 'changegroup': True, | |
59 | 'cg.version': '01', |
|
59 | 'cg.version': '01', | |
60 | 'obsolescence': False, |
|
60 | 'obsolescence': False, | |
61 | 'phases': False, |
|
61 | 'phases': False, | |
62 | 'tagsfnodescache': False, |
|
62 | 'tagsfnodescache': False, | |
63 | 'revbranchcache': False |
|
63 | 'revbranchcache': False | |
64 | }, |
|
64 | }, | |
65 | 'v2': { |
|
65 | 'v2': { | |
66 | 'changegroup': True, |
|
66 | 'changegroup': True, | |
67 | 'cg.version': '02', |
|
67 | 'cg.version': '02', | |
68 | 'obsolescence': False, |
|
68 | 'obsolescence': False, | |
69 | 'phases': False, |
|
69 | 'phases': False, | |
70 | 'tagsfnodescache': True, |
|
70 | 'tagsfnodescache': True, | |
71 | 'revbranchcache': True |
|
71 | 'revbranchcache': True | |
72 | }, |
|
72 | }, | |
73 | 'packed1' : { |
|
73 | 'packed1' : { | |
74 | 'cg.version': 's1' |
|
74 | 'cg.version': 's1' | |
75 | } |
|
75 | } | |
76 | } |
|
76 | } | |
77 | _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2'] |
|
77 | _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2'] | |
78 |
|
78 | |||
79 | _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True, |
|
79 | _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True, | |
80 | "tagsfnodescache": False, |
|
80 | "tagsfnodescache": False, | |
81 | "revbranchcache": False}} |
|
81 | "revbranchcache": False}} | |
82 |
|
82 | |||
83 | # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE. |
|
83 | # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE. | |
84 | _bundlespecv1compengines = {'gzip', 'bzip2', 'none'} |
|
84 | _bundlespecv1compengines = {'gzip', 'bzip2', 'none'} | |
85 |
|
85 | |||
86 | @attr.s |
|
86 | @attr.s | |
87 | class bundlespec(object): |
|
87 | class bundlespec(object): | |
88 | compression = attr.ib() |
|
88 | compression = attr.ib() | |
89 | version = attr.ib() |
|
89 | version = attr.ib() | |
90 | params = attr.ib() |
|
90 | params = attr.ib() | |
91 | contentopts = attr.ib() |
|
91 | contentopts = attr.ib() | |
92 |
|
92 | |||
93 | def parsebundlespec(repo, spec, strict=True, externalnames=False): |
|
93 | def parsebundlespec(repo, spec, strict=True, externalnames=False): | |
94 | """Parse a bundle string specification into parts. |
|
94 | """Parse a bundle string specification into parts. | |
95 |
|
95 | |||
96 | Bundle specifications denote a well-defined bundle/exchange format. |
|
96 | Bundle specifications denote a well-defined bundle/exchange format. | |
97 | The content of a given specification should not change over time in |
|
97 | The content of a given specification should not change over time in | |
98 | order to ensure that bundles produced by a newer version of Mercurial are |
|
98 | order to ensure that bundles produced by a newer version of Mercurial are | |
99 | readable from an older version. |
|
99 | readable from an older version. | |
100 |
|
100 | |||
101 | The string currently has the form: |
|
101 | The string currently has the form: | |
102 |
|
102 | |||
103 | <compression>-<type>[;<parameter0>[;<parameter1>]] |
|
103 | <compression>-<type>[;<parameter0>[;<parameter1>]] | |
104 |
|
104 | |||
105 | Where <compression> is one of the supported compression formats |
|
105 | Where <compression> is one of the supported compression formats | |
106 | and <type> is (currently) a version string. A ";" can follow the type and |
|
106 | and <type> is (currently) a version string. A ";" can follow the type and | |
107 | all text afterwards is interpreted as URI encoded, ";" delimited key=value |
|
107 | all text afterwards is interpreted as URI encoded, ";" delimited key=value | |
108 | pairs. |
|
108 | pairs. | |
109 |
|
109 | |||
110 | If ``strict`` is True (the default) <compression> is required. Otherwise, |
|
110 | If ``strict`` is True (the default) <compression> is required. Otherwise, | |
111 | it is optional. |
|
111 | it is optional. | |
112 |
|
112 | |||
113 | If ``externalnames`` is False (the default), the human-centric names will |
|
113 | If ``externalnames`` is False (the default), the human-centric names will | |
114 | be converted to their internal representation. |
|
114 | be converted to their internal representation. | |
115 |
|
115 | |||
116 | Returns a bundlespec object of (compression, version, parameters). |
|
116 | Returns a bundlespec object of (compression, version, parameters). | |
117 | Compression will be ``None`` if not in strict mode and a compression isn't |
|
117 | Compression will be ``None`` if not in strict mode and a compression isn't | |
118 | defined. |
|
118 | defined. | |
119 |
|
119 | |||
120 | An ``InvalidBundleSpecification`` is raised when the specification is |
|
120 | An ``InvalidBundleSpecification`` is raised when the specification is | |
121 | not syntactically well formed. |
|
121 | not syntactically well formed. | |
122 |
|
122 | |||
123 | An ``UnsupportedBundleSpecification`` is raised when the compression or |
|
123 | An ``UnsupportedBundleSpecification`` is raised when the compression or | |
124 | bundle type/version is not recognized. |
|
124 | bundle type/version is not recognized. | |
125 |
|
125 | |||
126 | Note: this function will likely eventually return a more complex data |
|
126 | Note: this function will likely eventually return a more complex data | |
127 | structure, including bundle2 part information. |
|
127 | structure, including bundle2 part information. | |
128 | """ |
|
128 | """ | |
129 | def parseparams(s): |
|
129 | def parseparams(s): | |
130 | if ';' not in s: |
|
130 | if ';' not in s: | |
131 | return s, {} |
|
131 | return s, {} | |
132 |
|
132 | |||
133 | params = {} |
|
133 | params = {} | |
134 | version, paramstr = s.split(';', 1) |
|
134 | version, paramstr = s.split(';', 1) | |
135 |
|
135 | |||
136 | for p in paramstr.split(';'): |
|
136 | for p in paramstr.split(';'): | |
137 | if '=' not in p: |
|
137 | if '=' not in p: | |
138 | raise error.InvalidBundleSpecification( |
|
138 | raise error.InvalidBundleSpecification( | |
139 | _('invalid bundle specification: ' |
|
139 | _('invalid bundle specification: ' | |
140 | 'missing "=" in parameter: %s') % p) |
|
140 | 'missing "=" in parameter: %s') % p) | |
141 |
|
141 | |||
142 | key, value = p.split('=', 1) |
|
142 | key, value = p.split('=', 1) | |
143 | key = urlreq.unquote(key) |
|
143 | key = urlreq.unquote(key) | |
144 | value = urlreq.unquote(value) |
|
144 | value = urlreq.unquote(value) | |
145 | params[key] = value |
|
145 | params[key] = value | |
146 |
|
146 | |||
147 | return version, params |
|
147 | return version, params | |
148 |
|
148 | |||
149 |
|
149 | |||
150 | if strict and '-' not in spec: |
|
150 | if strict and '-' not in spec: | |
151 | raise error.InvalidBundleSpecification( |
|
151 | raise error.InvalidBundleSpecification( | |
152 | _('invalid bundle specification; ' |
|
152 | _('invalid bundle specification; ' | |
153 | 'must be prefixed with compression: %s') % spec) |
|
153 | 'must be prefixed with compression: %s') % spec) | |
154 |
|
154 | |||
155 | if '-' in spec: |
|
155 | if '-' in spec: | |
156 | compression, version = spec.split('-', 1) |
|
156 | compression, version = spec.split('-', 1) | |
157 |
|
157 | |||
158 | if compression not in util.compengines.supportedbundlenames: |
|
158 | if compression not in util.compengines.supportedbundlenames: | |
159 | raise error.UnsupportedBundleSpecification( |
|
159 | raise error.UnsupportedBundleSpecification( | |
160 | _('%s compression is not supported') % compression) |
|
160 | _('%s compression is not supported') % compression) | |
161 |
|
161 | |||
162 | version, params = parseparams(version) |
|
162 | version, params = parseparams(version) | |
163 |
|
163 | |||
164 | if version not in _bundlespeccgversions: |
|
164 | if version not in _bundlespeccgversions: | |
165 | raise error.UnsupportedBundleSpecification( |
|
165 | raise error.UnsupportedBundleSpecification( | |
166 | _('%s is not a recognized bundle version') % version) |
|
166 | _('%s is not a recognized bundle version') % version) | |
167 | else: |
|
167 | else: | |
168 | # Value could be just the compression or just the version, in which |
|
168 | # Value could be just the compression or just the version, in which | |
169 | # case some defaults are assumed (but only when not in strict mode). |
|
169 | # case some defaults are assumed (but only when not in strict mode). | |
170 | assert not strict |
|
170 | assert not strict | |
171 |
|
171 | |||
172 | spec, params = parseparams(spec) |
|
172 | spec, params = parseparams(spec) | |
173 |
|
173 | |||
174 | if spec in util.compengines.supportedbundlenames: |
|
174 | if spec in util.compengines.supportedbundlenames: | |
175 | compression = spec |
|
175 | compression = spec | |
176 | version = 'v1' |
|
176 | version = 'v1' | |
177 | # Generaldelta repos require v2. |
|
177 | # Generaldelta repos require v2. | |
178 | if 'generaldelta' in repo.requirements: |
|
178 | if 'generaldelta' in repo.requirements: | |
179 | version = 'v2' |
|
179 | version = 'v2' | |
180 | # Modern compression engines require v2. |
|
180 | # Modern compression engines require v2. | |
181 | if compression not in _bundlespecv1compengines: |
|
181 | if compression not in _bundlespecv1compengines: | |
182 | version = 'v2' |
|
182 | version = 'v2' | |
183 | elif spec in _bundlespeccgversions: |
|
183 | elif spec in _bundlespeccgversions: | |
184 | if spec == 'packed1': |
|
184 | if spec == 'packed1': | |
185 | compression = 'none' |
|
185 | compression = 'none' | |
186 | else: |
|
186 | else: | |
187 | compression = 'bzip2' |
|
187 | compression = 'bzip2' | |
188 | version = spec |
|
188 | version = spec | |
189 | else: |
|
189 | else: | |
190 | raise error.UnsupportedBundleSpecification( |
|
190 | raise error.UnsupportedBundleSpecification( | |
191 | _('%s is not a recognized bundle specification') % spec) |
|
191 | _('%s is not a recognized bundle specification') % spec) | |
192 |
|
192 | |||
193 | # Bundle version 1 only supports a known set of compression engines. |
|
193 | # Bundle version 1 only supports a known set of compression engines. | |
194 | if version == 'v1' and compression not in _bundlespecv1compengines: |
|
194 | if version == 'v1' and compression not in _bundlespecv1compengines: | |
195 | raise error.UnsupportedBundleSpecification( |
|
195 | raise error.UnsupportedBundleSpecification( | |
196 | _('compression engine %s is not supported on v1 bundles') % |
|
196 | _('compression engine %s is not supported on v1 bundles') % | |
197 | compression) |
|
197 | compression) | |
198 |
|
198 | |||
199 | # The specification for packed1 can optionally declare the data formats |
|
199 | # The specification for packed1 can optionally declare the data formats | |
200 | # required to apply it. If we see this metadata, compare against what the |
|
200 | # required to apply it. If we see this metadata, compare against what the | |
201 | # repo supports and error if the bundle isn't compatible. |
|
201 | # repo supports and error if the bundle isn't compatible. | |
202 | if version == 'packed1' and 'requirements' in params: |
|
202 | if version == 'packed1' and 'requirements' in params: | |
203 | requirements = set(params['requirements'].split(',')) |
|
203 | requirements = set(params['requirements'].split(',')) | |
204 | missingreqs = requirements - repo.supportedformats |
|
204 | missingreqs = requirements - repo.supportedformats | |
205 | if missingreqs: |
|
205 | if missingreqs: | |
206 | raise error.UnsupportedBundleSpecification( |
|
206 | raise error.UnsupportedBundleSpecification( | |
207 | _('missing support for repository features: %s') % |
|
207 | _('missing support for repository features: %s') % | |
208 | ', '.join(sorted(missingreqs))) |
|
208 | ', '.join(sorted(missingreqs))) | |
209 |
|
209 | |||
210 | # Compute contentopts based on the version |
|
210 | # Compute contentopts based on the version | |
211 | contentopts = _bundlespeccontentopts.get(version, {}).copy() |
|
211 | contentopts = _bundlespeccontentopts.get(version, {}).copy() | |
212 |
|
212 | |||
213 | # Process the variants |
|
213 | # Process the variants | |
214 | if "stream" in params and params["stream"] == "v2": |
|
214 | if "stream" in params and params["stream"] == "v2": | |
215 | variant = _bundlespecvariants["streamv2"] |
|
215 | variant = _bundlespecvariants["streamv2"] | |
216 | contentopts.update(variant) |
|
216 | contentopts.update(variant) | |
217 |
|
217 | |||
218 | if not externalnames: |
|
218 | if not externalnames: | |
219 | engine = util.compengines.forbundlename(compression) |
|
219 | engine = util.compengines.forbundlename(compression) | |
220 | compression = engine.bundletype()[1] |
|
220 | compression = engine.bundletype()[1] | |
221 | version = _bundlespeccgversions[version] |
|
221 | version = _bundlespeccgversions[version] | |
222 |
|
222 | |||
223 | return bundlespec(compression, version, params, contentopts) |
|
223 | return bundlespec(compression, version, params, contentopts) | |
224 |
|
224 | |||
225 | def readbundle(ui, fh, fname, vfs=None): |
|
225 | def readbundle(ui, fh, fname, vfs=None): | |
226 | header = changegroup.readexactly(fh, 4) |
|
226 | header = changegroup.readexactly(fh, 4) | |
227 |
|
227 | |||
228 | alg = None |
|
228 | alg = None | |
229 | if not fname: |
|
229 | if not fname: | |
230 | fname = "stream" |
|
230 | fname = "stream" | |
231 | if not header.startswith('HG') and header.startswith('\0'): |
|
231 | if not header.startswith('HG') and header.startswith('\0'): | |
232 | fh = changegroup.headerlessfixup(fh, header) |
|
232 | fh = changegroup.headerlessfixup(fh, header) | |
233 | header = "HG10" |
|
233 | header = "HG10" | |
234 | alg = 'UN' |
|
234 | alg = 'UN' | |
235 | elif vfs: |
|
235 | elif vfs: | |
236 | fname = vfs.join(fname) |
|
236 | fname = vfs.join(fname) | |
237 |
|
237 | |||
238 | magic, version = header[0:2], header[2:4] |
|
238 | magic, version = header[0:2], header[2:4] | |
239 |
|
239 | |||
240 | if magic != 'HG': |
|
240 | if magic != 'HG': | |
241 | raise error.Abort(_('%s: not a Mercurial bundle') % fname) |
|
241 | raise error.Abort(_('%s: not a Mercurial bundle') % fname) | |
242 | if version == '10': |
|
242 | if version == '10': | |
243 | if alg is None: |
|
243 | if alg is None: | |
244 | alg = changegroup.readexactly(fh, 2) |
|
244 | alg = changegroup.readexactly(fh, 2) | |
245 | return changegroup.cg1unpacker(fh, alg) |
|
245 | return changegroup.cg1unpacker(fh, alg) | |
246 | elif version.startswith('2'): |
|
246 | elif version.startswith('2'): | |
247 | return bundle2.getunbundler(ui, fh, magicstring=magic + version) |
|
247 | return bundle2.getunbundler(ui, fh, magicstring=magic + version) | |
248 | elif version == 'S1': |
|
248 | elif version == 'S1': | |
249 | return streamclone.streamcloneapplier(fh) |
|
249 | return streamclone.streamcloneapplier(fh) | |
250 | else: |
|
250 | else: | |
251 | raise error.Abort(_('%s: unknown bundle version %s') % (fname, version)) |
|
251 | raise error.Abort(_('%s: unknown bundle version %s') % (fname, version)) | |
252 |
|
252 | |||
253 | def getbundlespec(ui, fh): |
|
253 | def getbundlespec(ui, fh): | |
254 | """Infer the bundlespec from a bundle file handle. |
|
254 | """Infer the bundlespec from a bundle file handle. | |
255 |
|
255 | |||
256 | The input file handle is seeked and the original seek position is not |
|
256 | The input file handle is seeked and the original seek position is not | |
257 | restored. |
|
257 | restored. | |
258 | """ |
|
258 | """ | |
259 | def speccompression(alg): |
|
259 | def speccompression(alg): | |
260 | try: |
|
260 | try: | |
261 | return util.compengines.forbundletype(alg).bundletype()[0] |
|
261 | return util.compengines.forbundletype(alg).bundletype()[0] | |
262 | except KeyError: |
|
262 | except KeyError: | |
263 | return None |
|
263 | return None | |
264 |
|
264 | |||
265 | b = readbundle(ui, fh, None) |
|
265 | b = readbundle(ui, fh, None) | |
266 | if isinstance(b, changegroup.cg1unpacker): |
|
266 | if isinstance(b, changegroup.cg1unpacker): | |
267 | alg = b._type |
|
267 | alg = b._type | |
268 | if alg == '_truncatedBZ': |
|
268 | if alg == '_truncatedBZ': | |
269 | alg = 'BZ' |
|
269 | alg = 'BZ' | |
270 | comp = speccompression(alg) |
|
270 | comp = speccompression(alg) | |
271 | if not comp: |
|
271 | if not comp: | |
272 | raise error.Abort(_('unknown compression algorithm: %s') % alg) |
|
272 | raise error.Abort(_('unknown compression algorithm: %s') % alg) | |
273 | return '%s-v1' % comp |
|
273 | return '%s-v1' % comp | |
274 | elif isinstance(b, bundle2.unbundle20): |
|
274 | elif isinstance(b, bundle2.unbundle20): | |
275 | if 'Compression' in b.params: |
|
275 | if 'Compression' in b.params: | |
276 | comp = speccompression(b.params['Compression']) |
|
276 | comp = speccompression(b.params['Compression']) | |
277 | if not comp: |
|
277 | if not comp: | |
278 | raise error.Abort(_('unknown compression algorithm: %s') % comp) |
|
278 | raise error.Abort(_('unknown compression algorithm: %s') % comp) | |
279 | else: |
|
279 | else: | |
280 | comp = 'none' |
|
280 | comp = 'none' | |
281 |
|
281 | |||
282 | version = None |
|
282 | version = None | |
283 | for part in b.iterparts(): |
|
283 | for part in b.iterparts(): | |
284 | if part.type == 'changegroup': |
|
284 | if part.type == 'changegroup': | |
285 | version = part.params['version'] |
|
285 | version = part.params['version'] | |
286 | if version in ('01', '02'): |
|
286 | if version in ('01', '02'): | |
287 | version = 'v2' |
|
287 | version = 'v2' | |
288 | else: |
|
288 | else: | |
289 | raise error.Abort(_('changegroup version %s does not have ' |
|
289 | raise error.Abort(_('changegroup version %s does not have ' | |
290 | 'a known bundlespec') % version, |
|
290 | 'a known bundlespec') % version, | |
291 | hint=_('try upgrading your Mercurial ' |
|
291 | hint=_('try upgrading your Mercurial ' | |
292 | 'client')) |
|
292 | 'client')) | |
293 | elif part.type == 'stream2' and version is None: |
|
293 | elif part.type == 'stream2' and version is None: | |
294 | # A stream2 part requires to be part of a v2 bundle |
|
294 | # A stream2 part requires to be part of a v2 bundle | |
295 | version = "v2" |
|
295 | version = "v2" | |
296 | requirements = urlreq.unquote(part.params['requirements']) |
|
296 | requirements = urlreq.unquote(part.params['requirements']) | |
297 | splitted = requirements.split() |
|
297 | splitted = requirements.split() | |
298 | params = bundle2._formatrequirementsparams(splitted) |
|
298 | params = bundle2._formatrequirementsparams(splitted) | |
299 | return 'none-v2;stream=v2;%s' % params |
|
299 | return 'none-v2;stream=v2;%s' % params | |
300 |
|
300 | |||
301 | if not version: |
|
301 | if not version: | |
302 | raise error.Abort(_('could not identify changegroup version in ' |
|
302 | raise error.Abort(_('could not identify changegroup version in ' | |
303 | 'bundle')) |
|
303 | 'bundle')) | |
304 |
|
304 | |||
305 | return '%s-%s' % (comp, version) |
|
305 | return '%s-%s' % (comp, version) | |
306 | elif isinstance(b, streamclone.streamcloneapplier): |
|
306 | elif isinstance(b, streamclone.streamcloneapplier): | |
307 | requirements = streamclone.readbundle1header(fh)[2] |
|
307 | requirements = streamclone.readbundle1header(fh)[2] | |
308 | formatted = bundle2._formatrequirementsparams(requirements) |
|
308 | formatted = bundle2._formatrequirementsparams(requirements) | |
309 | return 'none-packed1;%s' % formatted |
|
309 | return 'none-packed1;%s' % formatted | |
310 | else: |
|
310 | else: | |
311 | raise error.Abort(_('unknown bundle type: %s') % b) |
|
311 | raise error.Abort(_('unknown bundle type: %s') % b) | |
312 |
|
312 | |||
313 | def _computeoutgoing(repo, heads, common): |
|
313 | def _computeoutgoing(repo, heads, common): | |
314 | """Computes which revs are outgoing given a set of common |
|
314 | """Computes which revs are outgoing given a set of common | |
315 | and a set of heads. |
|
315 | and a set of heads. | |
316 |
|
316 | |||
317 | This is a separate function so extensions can have access to |
|
317 | This is a separate function so extensions can have access to | |
318 | the logic. |
|
318 | the logic. | |
319 |
|
319 | |||
320 | Returns a discovery.outgoing object. |
|
320 | Returns a discovery.outgoing object. | |
321 | """ |
|
321 | """ | |
322 | cl = repo.changelog |
|
322 | cl = repo.changelog | |
323 | if common: |
|
323 | if common: | |
324 | hasnode = cl.hasnode |
|
324 | hasnode = cl.hasnode | |
325 | common = [n for n in common if hasnode(n)] |
|
325 | common = [n for n in common if hasnode(n)] | |
326 | else: |
|
326 | else: | |
327 | common = [nullid] |
|
327 | common = [nullid] | |
328 | if not heads: |
|
328 | if not heads: | |
329 | heads = cl.heads() |
|
329 | heads = cl.heads() | |
330 | return discovery.outgoing(repo, common, heads) |
|
330 | return discovery.outgoing(repo, common, heads) | |
331 |
|
331 | |||
332 | def _forcebundle1(op): |
|
332 | def _forcebundle1(op): | |
333 | """return true if a pull/push must use bundle1 |
|
333 | """return true if a pull/push must use bundle1 | |
334 |
|
334 | |||
335 | This function is used to allow testing of the older bundle version""" |
|
335 | This function is used to allow testing of the older bundle version""" | |
336 | ui = op.repo.ui |
|
336 | ui = op.repo.ui | |
337 | # The goal is this config is to allow developer to choose the bundle |
|
337 | # The goal is this config is to allow developer to choose the bundle | |
338 | # version used during exchanged. This is especially handy during test. |
|
338 | # version used during exchanged. This is especially handy during test. | |
339 | # Value is a list of bundle version to be picked from, highest version |
|
339 | # Value is a list of bundle version to be picked from, highest version | |
340 | # should be used. |
|
340 | # should be used. | |
341 | # |
|
341 | # | |
342 | # developer config: devel.legacy.exchange |
|
342 | # developer config: devel.legacy.exchange | |
343 | exchange = ui.configlist('devel', 'legacy.exchange') |
|
343 | exchange = ui.configlist('devel', 'legacy.exchange') | |
344 | forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange |
|
344 | forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange | |
345 | return forcebundle1 or not op.remote.capable('bundle2') |
|
345 | return forcebundle1 or not op.remote.capable('bundle2') | |
346 |
|
346 | |||
347 | class pushoperation(object): |
|
347 | class pushoperation(object): | |
348 | """A object that represent a single push operation |
|
348 | """A object that represent a single push operation | |
349 |
|
349 | |||
350 | Its purpose is to carry push related state and very common operations. |
|
350 | Its purpose is to carry push related state and very common operations. | |
351 |
|
351 | |||
352 | A new pushoperation should be created at the beginning of each push and |
|
352 | A new pushoperation should be created at the beginning of each push and | |
353 | discarded afterward. |
|
353 | discarded afterward. | |
354 | """ |
|
354 | """ | |
355 |
|
355 | |||
356 | def __init__(self, repo, remote, force=False, revs=None, newbranch=False, |
|
356 | def __init__(self, repo, remote, force=False, revs=None, newbranch=False, | |
357 | bookmarks=(), pushvars=None): |
|
357 | bookmarks=(), pushvars=None): | |
358 | # repo we push from |
|
358 | # repo we push from | |
359 | self.repo = repo |
|
359 | self.repo = repo | |
360 | self.ui = repo.ui |
|
360 | self.ui = repo.ui | |
361 | # repo we push to |
|
361 | # repo we push to | |
362 | self.remote = remote |
|
362 | self.remote = remote | |
363 | # force option provided |
|
363 | # force option provided | |
364 | self.force = force |
|
364 | self.force = force | |
365 | # revs to be pushed (None is "all") |
|
365 | # revs to be pushed (None is "all") | |
366 | self.revs = revs |
|
366 | self.revs = revs | |
367 | # bookmark explicitly pushed |
|
367 | # bookmark explicitly pushed | |
368 | self.bookmarks = bookmarks |
|
368 | self.bookmarks = bookmarks | |
369 | # allow push of new branch |
|
369 | # allow push of new branch | |
370 | self.newbranch = newbranch |
|
370 | self.newbranch = newbranch | |
371 | # step already performed |
|
371 | # step already performed | |
372 | # (used to check what steps have been already performed through bundle2) |
|
372 | # (used to check what steps have been already performed through bundle2) | |
373 | self.stepsdone = set() |
|
373 | self.stepsdone = set() | |
374 | # Integer version of the changegroup push result |
|
374 | # Integer version of the changegroup push result | |
375 | # - None means nothing to push |
|
375 | # - None means nothing to push | |
376 | # - 0 means HTTP error |
|
376 | # - 0 means HTTP error | |
377 | # - 1 means we pushed and remote head count is unchanged *or* |
|
377 | # - 1 means we pushed and remote head count is unchanged *or* | |
378 | # we have outgoing changesets but refused to push |
|
378 | # we have outgoing changesets but refused to push | |
379 | # - other values as described by addchangegroup() |
|
379 | # - other values as described by addchangegroup() | |
380 | self.cgresult = None |
|
380 | self.cgresult = None | |
381 | # Boolean value for the bookmark push |
|
381 | # Boolean value for the bookmark push | |
382 | self.bkresult = None |
|
382 | self.bkresult = None | |
383 | # discover.outgoing object (contains common and outgoing data) |
|
383 | # discover.outgoing object (contains common and outgoing data) | |
384 | self.outgoing = None |
|
384 | self.outgoing = None | |
385 | # all remote topological heads before the push |
|
385 | # all remote topological heads before the push | |
386 | self.remoteheads = None |
|
386 | self.remoteheads = None | |
387 | # Details of the remote branch pre and post push |
|
387 | # Details of the remote branch pre and post push | |
388 | # |
|
388 | # | |
389 | # mapping: {'branch': ([remoteheads], |
|
389 | # mapping: {'branch': ([remoteheads], | |
390 | # [newheads], |
|
390 | # [newheads], | |
391 | # [unsyncedheads], |
|
391 | # [unsyncedheads], | |
392 | # [discardedheads])} |
|
392 | # [discardedheads])} | |
393 | # - branch: the branch name |
|
393 | # - branch: the branch name | |
394 | # - remoteheads: the list of remote heads known locally |
|
394 | # - remoteheads: the list of remote heads known locally | |
395 | # None if the branch is new |
|
395 | # None if the branch is new | |
396 | # - newheads: the new remote heads (known locally) with outgoing pushed |
|
396 | # - newheads: the new remote heads (known locally) with outgoing pushed | |
397 | # - unsyncedheads: the list of remote heads unknown locally. |
|
397 | # - unsyncedheads: the list of remote heads unknown locally. | |
398 | # - discardedheads: the list of remote heads made obsolete by the push |
|
398 | # - discardedheads: the list of remote heads made obsolete by the push | |
399 | self.pushbranchmap = None |
|
399 | self.pushbranchmap = None | |
400 | # testable as a boolean indicating if any nodes are missing locally. |
|
400 | # testable as a boolean indicating if any nodes are missing locally. | |
401 | self.incoming = None |
|
401 | self.incoming = None | |
402 | # summary of the remote phase situation |
|
402 | # summary of the remote phase situation | |
403 | self.remotephases = None |
|
403 | self.remotephases = None | |
404 | # phases changes that must be pushed along side the changesets |
|
404 | # phases changes that must be pushed along side the changesets | |
405 | self.outdatedphases = None |
|
405 | self.outdatedphases = None | |
406 | # phases changes that must be pushed if changeset push fails |
|
406 | # phases changes that must be pushed if changeset push fails | |
407 | self.fallbackoutdatedphases = None |
|
407 | self.fallbackoutdatedphases = None | |
408 | # outgoing obsmarkers |
|
408 | # outgoing obsmarkers | |
409 | self.outobsmarkers = set() |
|
409 | self.outobsmarkers = set() | |
410 | # outgoing bookmarks |
|
410 | # outgoing bookmarks | |
411 | self.outbookmarks = [] |
|
411 | self.outbookmarks = [] | |
412 | # transaction manager |
|
412 | # transaction manager | |
413 | self.trmanager = None |
|
413 | self.trmanager = None | |
414 | # map { pushkey partid -> callback handling failure} |
|
414 | # map { pushkey partid -> callback handling failure} | |
415 | # used to handle exception from mandatory pushkey part failure |
|
415 | # used to handle exception from mandatory pushkey part failure | |
416 | self.pkfailcb = {} |
|
416 | self.pkfailcb = {} | |
417 | # an iterable of pushvars or None |
|
417 | # an iterable of pushvars or None | |
418 | self.pushvars = pushvars |
|
418 | self.pushvars = pushvars | |
419 |
|
419 | |||
420 | @util.propertycache |
|
420 | @util.propertycache | |
421 | def futureheads(self): |
|
421 | def futureheads(self): | |
422 | """future remote heads if the changeset push succeeds""" |
|
422 | """future remote heads if the changeset push succeeds""" | |
423 | return self.outgoing.missingheads |
|
423 | return self.outgoing.missingheads | |
424 |
|
424 | |||
425 | @util.propertycache |
|
425 | @util.propertycache | |
426 | def fallbackheads(self): |
|
426 | def fallbackheads(self): | |
427 | """future remote heads if the changeset push fails""" |
|
427 | """future remote heads if the changeset push fails""" | |
428 | if self.revs is None: |
|
428 | if self.revs is None: | |
429 | # not target to push, all common are relevant |
|
429 | # not target to push, all common are relevant | |
430 | return self.outgoing.commonheads |
|
430 | return self.outgoing.commonheads | |
431 | unfi = self.repo.unfiltered() |
|
431 | unfi = self.repo.unfiltered() | |
432 | # I want cheads = heads(::missingheads and ::commonheads) |
|
432 | # I want cheads = heads(::missingheads and ::commonheads) | |
433 | # (missingheads is revs with secret changeset filtered out) |
|
433 | # (missingheads is revs with secret changeset filtered out) | |
434 | # |
|
434 | # | |
435 | # This can be expressed as: |
|
435 | # This can be expressed as: | |
436 | # cheads = ( (missingheads and ::commonheads) |
|
436 | # cheads = ( (missingheads and ::commonheads) | |
437 | # + (commonheads and ::missingheads))" |
|
437 | # + (commonheads and ::missingheads))" | |
438 | # ) |
|
438 | # ) | |
439 | # |
|
439 | # | |
440 | # while trying to push we already computed the following: |
|
440 | # while trying to push we already computed the following: | |
441 | # common = (::commonheads) |
|
441 | # common = (::commonheads) | |
442 | # missing = ((commonheads::missingheads) - commonheads) |
|
442 | # missing = ((commonheads::missingheads) - commonheads) | |
443 | # |
|
443 | # | |
444 | # We can pick: |
|
444 | # We can pick: | |
445 | # * missingheads part of common (::commonheads) |
|
445 | # * missingheads part of common (::commonheads) | |
446 | common = self.outgoing.common |
|
446 | common = self.outgoing.common | |
447 | nm = self.repo.changelog.nodemap |
|
447 | nm = self.repo.changelog.nodemap | |
448 | cheads = [node for node in self.revs if nm[node] in common] |
|
448 | cheads = [node for node in self.revs if nm[node] in common] | |
449 | # and |
|
449 | # and | |
450 | # * commonheads parents on missing |
|
450 | # * commonheads parents on missing | |
451 | revset = unfi.set('%ln and parents(roots(%ln))', |
|
451 | revset = unfi.set('%ln and parents(roots(%ln))', | |
452 | self.outgoing.commonheads, |
|
452 | self.outgoing.commonheads, | |
453 | self.outgoing.missing) |
|
453 | self.outgoing.missing) | |
454 | cheads.extend(c.node() for c in revset) |
|
454 | cheads.extend(c.node() for c in revset) | |
455 | return cheads |
|
455 | return cheads | |
456 |
|
456 | |||
457 | @property |
|
457 | @property | |
458 | def commonheads(self): |
|
458 | def commonheads(self): | |
459 | """set of all common heads after changeset bundle push""" |
|
459 | """set of all common heads after changeset bundle push""" | |
460 | if self.cgresult: |
|
460 | if self.cgresult: | |
461 | return self.futureheads |
|
461 | return self.futureheads | |
462 | else: |
|
462 | else: | |
463 | return self.fallbackheads |
|
463 | return self.fallbackheads | |
464 |
|
464 | |||
465 | # mapping of message used when pushing bookmark |
|
465 | # mapping of message used when pushing bookmark | |
466 | bookmsgmap = {'update': (_("updating bookmark %s\n"), |
|
466 | bookmsgmap = {'update': (_("updating bookmark %s\n"), | |
467 | _('updating bookmark %s failed!\n')), |
|
467 | _('updating bookmark %s failed!\n')), | |
468 | 'export': (_("exporting bookmark %s\n"), |
|
468 | 'export': (_("exporting bookmark %s\n"), | |
469 | _('exporting bookmark %s failed!\n')), |
|
469 | _('exporting bookmark %s failed!\n')), | |
470 | 'delete': (_("deleting remote bookmark %s\n"), |
|
470 | 'delete': (_("deleting remote bookmark %s\n"), | |
471 | _('deleting remote bookmark %s failed!\n')), |
|
471 | _('deleting remote bookmark %s failed!\n')), | |
472 | } |
|
472 | } | |
473 |
|
473 | |||
474 |
|
474 | |||
475 | def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(), |
|
475 | def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(), | |
476 | opargs=None): |
|
476 | opargs=None): | |
477 | '''Push outgoing changesets (limited by revs) from a local |
|
477 | '''Push outgoing changesets (limited by revs) from a local | |
478 | repository to remote. Return an integer: |
|
478 | repository to remote. Return an integer: | |
479 | - None means nothing to push |
|
479 | - None means nothing to push | |
480 | - 0 means HTTP error |
|
480 | - 0 means HTTP error | |
481 | - 1 means we pushed and remote head count is unchanged *or* |
|
481 | - 1 means we pushed and remote head count is unchanged *or* | |
482 | we have outgoing changesets but refused to push |
|
482 | we have outgoing changesets but refused to push | |
483 | - other values as described by addchangegroup() |
|
483 | - other values as described by addchangegroup() | |
484 | ''' |
|
484 | ''' | |
485 | if opargs is None: |
|
485 | if opargs is None: | |
486 | opargs = {} |
|
486 | opargs = {} | |
487 | pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks, |
|
487 | pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks, | |
488 | **pycompat.strkwargs(opargs)) |
|
488 | **pycompat.strkwargs(opargs)) | |
489 | if pushop.remote.local(): |
|
489 | if pushop.remote.local(): | |
490 | missing = (set(pushop.repo.requirements) |
|
490 | missing = (set(pushop.repo.requirements) | |
491 | - pushop.remote.local().supported) |
|
491 | - pushop.remote.local().supported) | |
492 | if missing: |
|
492 | if missing: | |
493 | msg = _("required features are not" |
|
493 | msg = _("required features are not" | |
494 | " supported in the destination:" |
|
494 | " supported in the destination:" | |
495 | " %s") % (', '.join(sorted(missing))) |
|
495 | " %s") % (', '.join(sorted(missing))) | |
496 | raise error.Abort(msg) |
|
496 | raise error.Abort(msg) | |
497 |
|
497 | |||
498 | if not pushop.remote.canpush(): |
|
498 | if not pushop.remote.canpush(): | |
499 | raise error.Abort(_("destination does not support push")) |
|
499 | raise error.Abort(_("destination does not support push")) | |
500 |
|
500 | |||
501 | if not pushop.remote.capable('unbundle'): |
|
501 | if not pushop.remote.capable('unbundle'): | |
502 | raise error.Abort(_('cannot push: destination does not support the ' |
|
502 | raise error.Abort(_('cannot push: destination does not support the ' | |
503 | 'unbundle wire protocol command')) |
|
503 | 'unbundle wire protocol command')) | |
504 |
|
504 | |||
505 | # get lock as we might write phase data |
|
505 | # get lock as we might write phase data | |
506 | wlock = lock = None |
|
506 | wlock = lock = None | |
507 | try: |
|
507 | try: | |
508 | # bundle2 push may receive a reply bundle touching bookmarks or other |
|
508 | # bundle2 push may receive a reply bundle touching bookmarks or other | |
509 | # things requiring the wlock. Take it now to ensure proper ordering. |
|
509 | # things requiring the wlock. Take it now to ensure proper ordering. | |
510 | maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback') |
|
510 | maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback') | |
511 | if (not _forcebundle1(pushop)) and maypushback: |
|
511 | if (not _forcebundle1(pushop)) and maypushback: | |
512 | wlock = pushop.repo.wlock() |
|
512 | wlock = pushop.repo.wlock() | |
513 | lock = pushop.repo.lock() |
|
513 | lock = pushop.repo.lock() | |
514 | pushop.trmanager = transactionmanager(pushop.repo, |
|
514 | pushop.trmanager = transactionmanager(pushop.repo, | |
515 | 'push-response', |
|
515 | 'push-response', | |
516 | pushop.remote.url()) |
|
516 | pushop.remote.url()) | |
517 | except IOError as err: |
|
517 | except IOError as err: | |
518 | if err.errno != errno.EACCES: |
|
518 | if err.errno != errno.EACCES: | |
519 | raise |
|
519 | raise | |
520 | # source repo cannot be locked. |
|
520 | # source repo cannot be locked. | |
521 | # We do not abort the push, but just disable the local phase |
|
521 | # We do not abort the push, but just disable the local phase | |
522 | # synchronisation. |
|
522 | # synchronisation. | |
523 | msg = 'cannot lock source repository: %s\n' % err |
|
523 | msg = 'cannot lock source repository: %s\n' % err | |
524 | pushop.ui.debug(msg) |
|
524 | pushop.ui.debug(msg) | |
525 |
|
525 | |||
526 | with wlock or util.nullcontextmanager(), \ |
|
526 | with wlock or util.nullcontextmanager(), \ | |
527 | lock or util.nullcontextmanager(), \ |
|
527 | lock or util.nullcontextmanager(), \ | |
528 | pushop.trmanager or util.nullcontextmanager(): |
|
528 | pushop.trmanager or util.nullcontextmanager(): | |
529 | pushop.repo.checkpush(pushop) |
|
529 | pushop.repo.checkpush(pushop) | |
530 | _pushdiscovery(pushop) |
|
530 | _pushdiscovery(pushop) | |
531 | if not _forcebundle1(pushop): |
|
531 | if not _forcebundle1(pushop): | |
532 | _pushbundle2(pushop) |
|
532 | _pushbundle2(pushop) | |
533 | _pushchangeset(pushop) |
|
533 | _pushchangeset(pushop) | |
534 | _pushsyncphase(pushop) |
|
534 | _pushsyncphase(pushop) | |
535 | _pushobsolete(pushop) |
|
535 | _pushobsolete(pushop) | |
536 | _pushbookmark(pushop) |
|
536 | _pushbookmark(pushop) | |
537 |
|
537 | |||
538 | return pushop |
|
538 | return pushop | |
539 |
|
539 | |||
540 | # list of steps to perform discovery before push |
|
540 | # list of steps to perform discovery before push | |
541 | pushdiscoveryorder = [] |
|
541 | pushdiscoveryorder = [] | |
542 |
|
542 | |||
543 | # Mapping between step name and function |
|
543 | # Mapping between step name and function | |
544 | # |
|
544 | # | |
545 | # This exists to help extensions wrap steps if necessary |
|
545 | # This exists to help extensions wrap steps if necessary | |
546 | pushdiscoverymapping = {} |
|
546 | pushdiscoverymapping = {} | |
547 |
|
547 | |||
548 | def pushdiscovery(stepname): |
|
548 | def pushdiscovery(stepname): | |
549 | """decorator for function performing discovery before push |
|
549 | """decorator for function performing discovery before push | |
550 |
|
550 | |||
551 | The function is added to the step -> function mapping and appended to the |
|
551 | The function is added to the step -> function mapping and appended to the | |
552 | list of steps. Beware that decorated function will be added in order (this |
|
552 | list of steps. Beware that decorated function will be added in order (this | |
553 | may matter). |
|
553 | may matter). | |
554 |
|
554 | |||
555 | You can only use this decorator for a new step, if you want to wrap a step |
|
555 | You can only use this decorator for a new step, if you want to wrap a step | |
556 | from an extension, change the pushdiscovery dictionary directly.""" |
|
556 | from an extension, change the pushdiscovery dictionary directly.""" | |
557 | def dec(func): |
|
557 | def dec(func): | |
558 | assert stepname not in pushdiscoverymapping |
|
558 | assert stepname not in pushdiscoverymapping | |
559 | pushdiscoverymapping[stepname] = func |
|
559 | pushdiscoverymapping[stepname] = func | |
560 | pushdiscoveryorder.append(stepname) |
|
560 | pushdiscoveryorder.append(stepname) | |
561 | return func |
|
561 | return func | |
562 | return dec |
|
562 | return dec | |
563 |
|
563 | |||
564 | def _pushdiscovery(pushop): |
|
564 | def _pushdiscovery(pushop): | |
565 | """Run all discovery steps""" |
|
565 | """Run all discovery steps""" | |
566 | for stepname in pushdiscoveryorder: |
|
566 | for stepname in pushdiscoveryorder: | |
567 | step = pushdiscoverymapping[stepname] |
|
567 | step = pushdiscoverymapping[stepname] | |
568 | step(pushop) |
|
568 | step(pushop) | |
569 |
|
569 | |||
570 | @pushdiscovery('changeset') |
|
570 | @pushdiscovery('changeset') | |
571 | def _pushdiscoverychangeset(pushop): |
|
571 | def _pushdiscoverychangeset(pushop): | |
572 | """discover the changeset that need to be pushed""" |
|
572 | """discover the changeset that need to be pushed""" | |
573 | fci = discovery.findcommonincoming |
|
573 | fci = discovery.findcommonincoming | |
574 | if pushop.revs: |
|
574 | if pushop.revs: | |
575 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force, |
|
575 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force, | |
576 | ancestorsof=pushop.revs) |
|
576 | ancestorsof=pushop.revs) | |
577 | else: |
|
577 | else: | |
578 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) |
|
578 | commoninc = fci(pushop.repo, pushop.remote, force=pushop.force) | |
579 | common, inc, remoteheads = commoninc |
|
579 | common, inc, remoteheads = commoninc | |
580 | fco = discovery.findcommonoutgoing |
|
580 | fco = discovery.findcommonoutgoing | |
581 | outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs, |
|
581 | outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs, | |
582 | commoninc=commoninc, force=pushop.force) |
|
582 | commoninc=commoninc, force=pushop.force) | |
583 | pushop.outgoing = outgoing |
|
583 | pushop.outgoing = outgoing | |
584 | pushop.remoteheads = remoteheads |
|
584 | pushop.remoteheads = remoteheads | |
585 | pushop.incoming = inc |
|
585 | pushop.incoming = inc | |
586 |
|
586 | |||
587 | @pushdiscovery('phase') |
|
587 | @pushdiscovery('phase') | |
588 | def _pushdiscoveryphase(pushop): |
|
588 | def _pushdiscoveryphase(pushop): | |
589 | """discover the phase that needs to be pushed |
|
589 | """discover the phase that needs to be pushed | |
590 |
|
590 | |||
591 | (computed for both success and failure case for changesets push)""" |
|
591 | (computed for both success and failure case for changesets push)""" | |
592 | outgoing = pushop.outgoing |
|
592 | outgoing = pushop.outgoing | |
593 | unfi = pushop.repo.unfiltered() |
|
593 | unfi = pushop.repo.unfiltered() | |
594 | remotephases = pushop.remote.listkeys('phases') |
|
594 | remotephases = pushop.remote.listkeys('phases') | |
595 | if (pushop.ui.configbool('ui', '_usedassubrepo') |
|
595 | if (pushop.ui.configbool('ui', '_usedassubrepo') | |
596 | and remotephases # server supports phases |
|
596 | and remotephases # server supports phases | |
597 | and not pushop.outgoing.missing # no changesets to be pushed |
|
597 | and not pushop.outgoing.missing # no changesets to be pushed | |
598 | and remotephases.get('publishing', False)): |
|
598 | and remotephases.get('publishing', False)): | |
599 | # When: |
|
599 | # When: | |
600 | # - this is a subrepo push |
|
600 | # - this is a subrepo push | |
601 | # - and remote support phase |
|
601 | # - and remote support phase | |
602 | # - and no changeset are to be pushed |
|
602 | # - and no changeset are to be pushed | |
603 | # - and remote is publishing |
|
603 | # - and remote is publishing | |
604 | # We may be in issue 3781 case! |
|
604 | # We may be in issue 3781 case! | |
605 | # We drop the possible phase synchronisation done by |
|
605 | # We drop the possible phase synchronisation done by | |
606 | # courtesy to publish changesets possibly locally draft |
|
606 | # courtesy to publish changesets possibly locally draft | |
607 | # on the remote. |
|
607 | # on the remote. | |
608 | pushop.outdatedphases = [] |
|
608 | pushop.outdatedphases = [] | |
609 | pushop.fallbackoutdatedphases = [] |
|
609 | pushop.fallbackoutdatedphases = [] | |
610 | return |
|
610 | return | |
611 |
|
611 | |||
612 | pushop.remotephases = phases.remotephasessummary(pushop.repo, |
|
612 | pushop.remotephases = phases.remotephasessummary(pushop.repo, | |
613 | pushop.fallbackheads, |
|
613 | pushop.fallbackheads, | |
614 | remotephases) |
|
614 | remotephases) | |
615 | droots = pushop.remotephases.draftroots |
|
615 | droots = pushop.remotephases.draftroots | |
616 |
|
616 | |||
617 | extracond = '' |
|
617 | extracond = '' | |
618 | if not pushop.remotephases.publishing: |
|
618 | if not pushop.remotephases.publishing: | |
619 | extracond = ' and public()' |
|
619 | extracond = ' and public()' | |
620 | revset = 'heads((%%ln::%%ln) %s)' % extracond |
|
620 | revset = 'heads((%%ln::%%ln) %s)' % extracond | |
621 | # Get the list of all revs draft on remote by public here. |
|
621 | # Get the list of all revs draft on remote by public here. | |
622 | # XXX Beware that revset break if droots is not strictly |
|
622 | # XXX Beware that revset break if droots is not strictly | |
623 | # XXX root we may want to ensure it is but it is costly |
|
623 | # XXX root we may want to ensure it is but it is costly | |
624 | fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) |
|
624 | fallback = list(unfi.set(revset, droots, pushop.fallbackheads)) | |
625 | if not outgoing.missing: |
|
625 | if not outgoing.missing: | |
626 | future = fallback |
|
626 | future = fallback | |
627 | else: |
|
627 | else: | |
628 | # adds changeset we are going to push as draft |
|
628 | # adds changeset we are going to push as draft | |
629 | # |
|
629 | # | |
630 | # should not be necessary for publishing server, but because of an |
|
630 | # should not be necessary for publishing server, but because of an | |
631 | # issue fixed in xxxxx we have to do it anyway. |
|
631 | # issue fixed in xxxxx we have to do it anyway. | |
632 | fdroots = list(unfi.set('roots(%ln + %ln::)', |
|
632 | fdroots = list(unfi.set('roots(%ln + %ln::)', | |
633 | outgoing.missing, droots)) |
|
633 | outgoing.missing, droots)) | |
634 | fdroots = [f.node() for f in fdroots] |
|
634 | fdroots = [f.node() for f in fdroots] | |
635 | future = list(unfi.set(revset, fdroots, pushop.futureheads)) |
|
635 | future = list(unfi.set(revset, fdroots, pushop.futureheads)) | |
636 | pushop.outdatedphases = future |
|
636 | pushop.outdatedphases = future | |
637 | pushop.fallbackoutdatedphases = fallback |
|
637 | pushop.fallbackoutdatedphases = fallback | |
638 |
|
638 | |||
639 | @pushdiscovery('obsmarker') |
|
639 | @pushdiscovery('obsmarker') | |
640 | def _pushdiscoveryobsmarkers(pushop): |
|
640 | def _pushdiscoveryobsmarkers(pushop): | |
641 | if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt) |
|
641 | if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt) | |
642 | and pushop.repo.obsstore |
|
642 | and pushop.repo.obsstore | |
643 | and 'obsolete' in pushop.remote.listkeys('namespaces')): |
|
643 | and 'obsolete' in pushop.remote.listkeys('namespaces')): | |
644 | repo = pushop.repo |
|
644 | repo = pushop.repo | |
645 | # very naive computation, that can be quite expensive on big repo. |
|
645 | # very naive computation, that can be quite expensive on big repo. | |
646 | # However: evolution is currently slow on them anyway. |
|
646 | # However: evolution is currently slow on them anyway. | |
647 | nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads)) |
|
647 | nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads)) | |
648 | pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) |
|
648 | pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes) | |
649 |
|
649 | |||
650 | @pushdiscovery('bookmarks') |
|
650 | @pushdiscovery('bookmarks') | |
651 | def _pushdiscoverybookmarks(pushop): |
|
651 | def _pushdiscoverybookmarks(pushop): | |
652 | ui = pushop.ui |
|
652 | ui = pushop.ui | |
653 | repo = pushop.repo.unfiltered() |
|
653 | repo = pushop.repo.unfiltered() | |
654 | remote = pushop.remote |
|
654 | remote = pushop.remote | |
655 | ui.debug("checking for updated bookmarks\n") |
|
655 | ui.debug("checking for updated bookmarks\n") | |
656 | ancestors = () |
|
656 | ancestors = () | |
657 | if pushop.revs: |
|
657 | if pushop.revs: | |
658 | revnums = map(repo.changelog.rev, pushop.revs) |
|
658 | revnums = map(repo.changelog.rev, pushop.revs) | |
659 | ancestors = repo.changelog.ancestors(revnums, inclusive=True) |
|
659 | ancestors = repo.changelog.ancestors(revnums, inclusive=True) | |
660 | remotebookmark = remote.listkeys('bookmarks') |
|
660 | remotebookmark = remote.listkeys('bookmarks') | |
661 |
|
661 | |||
662 | explicit = set([repo._bookmarks.expandname(bookmark) |
|
662 | explicit = set([repo._bookmarks.expandname(bookmark) | |
663 | for bookmark in pushop.bookmarks]) |
|
663 | for bookmark in pushop.bookmarks]) | |
664 |
|
664 | |||
665 | remotebookmark = bookmod.unhexlifybookmarks(remotebookmark) |
|
665 | remotebookmark = bookmod.unhexlifybookmarks(remotebookmark) | |
666 | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) |
|
666 | comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark) | |
667 |
|
667 | |||
668 | def safehex(x): |
|
668 | def safehex(x): | |
669 | if x is None: |
|
669 | if x is None: | |
670 | return x |
|
670 | return x | |
671 | return hex(x) |
|
671 | return hex(x) | |
672 |
|
672 | |||
673 | def hexifycompbookmarks(bookmarks): |
|
673 | def hexifycompbookmarks(bookmarks): | |
674 | return [(b, safehex(scid), safehex(dcid)) |
|
674 | return [(b, safehex(scid), safehex(dcid)) | |
675 | for (b, scid, dcid) in bookmarks] |
|
675 | for (b, scid, dcid) in bookmarks] | |
676 |
|
676 | |||
677 | comp = [hexifycompbookmarks(marks) for marks in comp] |
|
677 | comp = [hexifycompbookmarks(marks) for marks in comp] | |
678 | return _processcompared(pushop, ancestors, explicit, remotebookmark, comp) |
|
678 | return _processcompared(pushop, ancestors, explicit, remotebookmark, comp) | |
679 |
|
679 | |||
680 | def _processcompared(pushop, pushed, explicit, remotebms, comp): |
|
680 | def _processcompared(pushop, pushed, explicit, remotebms, comp): | |
681 | """take decision on bookmark to pull from the remote bookmark |
|
681 | """take decision on bookmark to pull from the remote bookmark | |
682 |
|
682 | |||
683 | Exist to help extensions who want to alter this behavior. |
|
683 | Exist to help extensions who want to alter this behavior. | |
684 | """ |
|
684 | """ | |
685 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp |
|
685 | addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp | |
686 |
|
686 | |||
687 | repo = pushop.repo |
|
687 | repo = pushop.repo | |
688 |
|
688 | |||
689 | for b, scid, dcid in advsrc: |
|
689 | for b, scid, dcid in advsrc: | |
690 | if b in explicit: |
|
690 | if b in explicit: | |
691 | explicit.remove(b) |
|
691 | explicit.remove(b) | |
692 | if not pushed or repo[scid].rev() in pushed: |
|
692 | if not pushed or repo[scid].rev() in pushed: | |
693 | pushop.outbookmarks.append((b, dcid, scid)) |
|
693 | pushop.outbookmarks.append((b, dcid, scid)) | |
694 | # search added bookmark |
|
694 | # search added bookmark | |
695 | for b, scid, dcid in addsrc: |
|
695 | for b, scid, dcid in addsrc: | |
696 | if b in explicit: |
|
696 | if b in explicit: | |
697 | explicit.remove(b) |
|
697 | explicit.remove(b) | |
698 | pushop.outbookmarks.append((b, '', scid)) |
|
698 | pushop.outbookmarks.append((b, '', scid)) | |
699 | # search for overwritten bookmark |
|
699 | # search for overwritten bookmark | |
700 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): |
|
700 | for b, scid, dcid in list(advdst) + list(diverge) + list(differ): | |
701 | if b in explicit: |
|
701 | if b in explicit: | |
702 | explicit.remove(b) |
|
702 | explicit.remove(b) | |
703 | pushop.outbookmarks.append((b, dcid, scid)) |
|
703 | pushop.outbookmarks.append((b, dcid, scid)) | |
704 | # search for bookmark to delete |
|
704 | # search for bookmark to delete | |
705 | for b, scid, dcid in adddst: |
|
705 | for b, scid, dcid in adddst: | |
706 | if b in explicit: |
|
706 | if b in explicit: | |
707 | explicit.remove(b) |
|
707 | explicit.remove(b) | |
708 | # treat as "deleted locally" |
|
708 | # treat as "deleted locally" | |
709 | pushop.outbookmarks.append((b, dcid, '')) |
|
709 | pushop.outbookmarks.append((b, dcid, '')) | |
710 | # identical bookmarks shouldn't get reported |
|
710 | # identical bookmarks shouldn't get reported | |
711 | for b, scid, dcid in same: |
|
711 | for b, scid, dcid in same: | |
712 | if b in explicit: |
|
712 | if b in explicit: | |
713 | explicit.remove(b) |
|
713 | explicit.remove(b) | |
714 |
|
714 | |||
715 | if explicit: |
|
715 | if explicit: | |
716 | explicit = sorted(explicit) |
|
716 | explicit = sorted(explicit) | |
717 | # we should probably list all of them |
|
717 | # we should probably list all of them | |
718 | pushop.ui.warn(_('bookmark %s does not exist on the local ' |
|
718 | pushop.ui.warn(_('bookmark %s does not exist on the local ' | |
719 | 'or remote repository!\n') % explicit[0]) |
|
719 | 'or remote repository!\n') % explicit[0]) | |
720 | pushop.bkresult = 2 |
|
720 | pushop.bkresult = 2 | |
721 |
|
721 | |||
722 | pushop.outbookmarks.sort() |
|
722 | pushop.outbookmarks.sort() | |
723 |
|
723 | |||
724 | def _pushcheckoutgoing(pushop): |
|
724 | def _pushcheckoutgoing(pushop): | |
725 | outgoing = pushop.outgoing |
|
725 | outgoing = pushop.outgoing | |
726 | unfi = pushop.repo.unfiltered() |
|
726 | unfi = pushop.repo.unfiltered() | |
727 | if not outgoing.missing: |
|
727 | if not outgoing.missing: | |
728 | # nothing to push |
|
728 | # nothing to push | |
729 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) |
|
729 | scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded) | |
730 | return False |
|
730 | return False | |
731 | # something to push |
|
731 | # something to push | |
732 | if not pushop.force: |
|
732 | if not pushop.force: | |
733 | # if repo.obsstore == False --> no obsolete |
|
733 | # if repo.obsstore == False --> no obsolete | |
734 | # then, save the iteration |
|
734 | # then, save the iteration | |
735 | if unfi.obsstore: |
|
735 | if unfi.obsstore: | |
736 | # this message are here for 80 char limit reason |
|
736 | # this message are here for 80 char limit reason | |
737 | mso = _("push includes obsolete changeset: %s!") |
|
737 | mso = _("push includes obsolete changeset: %s!") | |
738 | mspd = _("push includes phase-divergent changeset: %s!") |
|
738 | mspd = _("push includes phase-divergent changeset: %s!") | |
739 | mscd = _("push includes content-divergent changeset: %s!") |
|
739 | mscd = _("push includes content-divergent changeset: %s!") | |
740 | mst = {"orphan": _("push includes orphan changeset: %s!"), |
|
740 | mst = {"orphan": _("push includes orphan changeset: %s!"), | |
741 | "phase-divergent": mspd, |
|
741 | "phase-divergent": mspd, | |
742 | "content-divergent": mscd} |
|
742 | "content-divergent": mscd} | |
743 | # If we are to push if there is at least one |
|
743 | # If we are to push if there is at least one | |
744 | # obsolete or unstable changeset in missing, at |
|
744 | # obsolete or unstable changeset in missing, at | |
745 | # least one of the missinghead will be obsolete or |
|
745 | # least one of the missinghead will be obsolete or | |
746 | # unstable. So checking heads only is ok |
|
746 | # unstable. So checking heads only is ok | |
747 | for node in outgoing.missingheads: |
|
747 | for node in outgoing.missingheads: | |
748 | ctx = unfi[node] |
|
748 | ctx = unfi[node] | |
749 | if ctx.obsolete(): |
|
749 | if ctx.obsolete(): | |
750 | raise error.Abort(mso % ctx) |
|
750 | raise error.Abort(mso % ctx) | |
751 | elif ctx.isunstable(): |
|
751 | elif ctx.isunstable(): | |
752 | # TODO print more than one instability in the abort |
|
752 | # TODO print more than one instability in the abort | |
753 | # message |
|
753 | # message | |
754 | raise error.Abort(mst[ctx.instabilities()[0]] % ctx) |
|
754 | raise error.Abort(mst[ctx.instabilities()[0]] % ctx) | |
755 |
|
755 | |||
756 | discovery.checkheads(pushop) |
|
756 | discovery.checkheads(pushop) | |
757 | return True |
|
757 | return True | |
758 |
|
758 | |||
759 | # List of names of steps to perform for an outgoing bundle2, order matters. |
|
759 | # List of names of steps to perform for an outgoing bundle2, order matters. | |
760 | b2partsgenorder = [] |
|
760 | b2partsgenorder = [] | |
761 |
|
761 | |||
762 | # Mapping between step name and function |
|
762 | # Mapping between step name and function | |
763 | # |
|
763 | # | |
764 | # This exists to help extensions wrap steps if necessary |
|
764 | # This exists to help extensions wrap steps if necessary | |
765 | b2partsgenmapping = {} |
|
765 | b2partsgenmapping = {} | |
766 |
|
766 | |||
767 | def b2partsgenerator(stepname, idx=None): |
|
767 | def b2partsgenerator(stepname, idx=None): | |
768 | """decorator for function generating bundle2 part |
|
768 | """decorator for function generating bundle2 part | |
769 |
|
769 | |||
770 | The function is added to the step -> function mapping and appended to the |
|
770 | The function is added to the step -> function mapping and appended to the | |
771 | list of steps. Beware that decorated functions will be added in order |
|
771 | list of steps. Beware that decorated functions will be added in order | |
772 | (this may matter). |
|
772 | (this may matter). | |
773 |
|
773 | |||
774 | You can only use this decorator for new steps, if you want to wrap a step |
|
774 | You can only use this decorator for new steps, if you want to wrap a step | |
775 | from an extension, attack the b2partsgenmapping dictionary directly.""" |
|
775 | from an extension, attack the b2partsgenmapping dictionary directly.""" | |
776 | def dec(func): |
|
776 | def dec(func): | |
777 | assert stepname not in b2partsgenmapping |
|
777 | assert stepname not in b2partsgenmapping | |
778 | b2partsgenmapping[stepname] = func |
|
778 | b2partsgenmapping[stepname] = func | |
779 | if idx is None: |
|
779 | if idx is None: | |
780 | b2partsgenorder.append(stepname) |
|
780 | b2partsgenorder.append(stepname) | |
781 | else: |
|
781 | else: | |
782 | b2partsgenorder.insert(idx, stepname) |
|
782 | b2partsgenorder.insert(idx, stepname) | |
783 | return func |
|
783 | return func | |
784 | return dec |
|
784 | return dec | |
785 |
|
785 | |||
786 | def _pushb2ctxcheckheads(pushop, bundler): |
|
786 | def _pushb2ctxcheckheads(pushop, bundler): | |
787 | """Generate race condition checking parts |
|
787 | """Generate race condition checking parts | |
788 |
|
788 | |||
789 | Exists as an independent function to aid extensions |
|
789 | Exists as an independent function to aid extensions | |
790 | """ |
|
790 | """ | |
791 | # * 'force' do not check for push race, |
|
791 | # * 'force' do not check for push race, | |
792 | # * if we don't push anything, there are nothing to check. |
|
792 | # * if we don't push anything, there are nothing to check. | |
793 | if not pushop.force and pushop.outgoing.missingheads: |
|
793 | if not pushop.force and pushop.outgoing.missingheads: | |
794 | allowunrelated = 'related' in bundler.capabilities.get('checkheads', ()) |
|
794 | allowunrelated = 'related' in bundler.capabilities.get('checkheads', ()) | |
795 | emptyremote = pushop.pushbranchmap is None |
|
795 | emptyremote = pushop.pushbranchmap is None | |
796 | if not allowunrelated or emptyremote: |
|
796 | if not allowunrelated or emptyremote: | |
797 | bundler.newpart('check:heads', data=iter(pushop.remoteheads)) |
|
797 | bundler.newpart('check:heads', data=iter(pushop.remoteheads)) | |
798 | else: |
|
798 | else: | |
799 | affected = set() |
|
799 | affected = set() | |
800 | for branch, heads in pushop.pushbranchmap.iteritems(): |
|
800 | for branch, heads in pushop.pushbranchmap.iteritems(): | |
801 | remoteheads, newheads, unsyncedheads, discardedheads = heads |
|
801 | remoteheads, newheads, unsyncedheads, discardedheads = heads | |
802 | if remoteheads is not None: |
|
802 | if remoteheads is not None: | |
803 | remote = set(remoteheads) |
|
803 | remote = set(remoteheads) | |
804 | affected |= set(discardedheads) & remote |
|
804 | affected |= set(discardedheads) & remote | |
805 | affected |= remote - set(newheads) |
|
805 | affected |= remote - set(newheads) | |
806 | if affected: |
|
806 | if affected: | |
807 | data = iter(sorted(affected)) |
|
807 | data = iter(sorted(affected)) | |
808 | bundler.newpart('check:updated-heads', data=data) |
|
808 | bundler.newpart('check:updated-heads', data=data) | |
809 |
|
809 | |||
810 | def _pushing(pushop): |
|
810 | def _pushing(pushop): | |
811 | """return True if we are pushing anything""" |
|
811 | """return True if we are pushing anything""" | |
812 | return bool(pushop.outgoing.missing |
|
812 | return bool(pushop.outgoing.missing | |
813 | or pushop.outdatedphases |
|
813 | or pushop.outdatedphases | |
814 | or pushop.outobsmarkers |
|
814 | or pushop.outobsmarkers | |
815 | or pushop.outbookmarks) |
|
815 | or pushop.outbookmarks) | |
816 |
|
816 | |||
817 | @b2partsgenerator('check-bookmarks') |
|
817 | @b2partsgenerator('check-bookmarks') | |
818 | def _pushb2checkbookmarks(pushop, bundler): |
|
818 | def _pushb2checkbookmarks(pushop, bundler): | |
819 | """insert bookmark move checking""" |
|
819 | """insert bookmark move checking""" | |
820 | if not _pushing(pushop) or pushop.force: |
|
820 | if not _pushing(pushop) or pushop.force: | |
821 | return |
|
821 | return | |
822 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
822 | b2caps = bundle2.bundle2caps(pushop.remote) | |
823 | hasbookmarkcheck = 'bookmarks' in b2caps |
|
823 | hasbookmarkcheck = 'bookmarks' in b2caps | |
824 | if not (pushop.outbookmarks and hasbookmarkcheck): |
|
824 | if not (pushop.outbookmarks and hasbookmarkcheck): | |
825 | return |
|
825 | return | |
826 | data = [] |
|
826 | data = [] | |
827 | for book, old, new in pushop.outbookmarks: |
|
827 | for book, old, new in pushop.outbookmarks: | |
828 | old = bin(old) |
|
828 | old = bin(old) | |
829 | data.append((book, old)) |
|
829 | data.append((book, old)) | |
830 | checkdata = bookmod.binaryencode(data) |
|
830 | checkdata = bookmod.binaryencode(data) | |
831 | bundler.newpart('check:bookmarks', data=checkdata) |
|
831 | bundler.newpart('check:bookmarks', data=checkdata) | |
832 |
|
832 | |||
833 | @b2partsgenerator('check-phases') |
|
833 | @b2partsgenerator('check-phases') | |
834 | def _pushb2checkphases(pushop, bundler): |
|
834 | def _pushb2checkphases(pushop, bundler): | |
835 | """insert phase move checking""" |
|
835 | """insert phase move checking""" | |
836 | if not _pushing(pushop) or pushop.force: |
|
836 | if not _pushing(pushop) or pushop.force: | |
837 | return |
|
837 | return | |
838 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
838 | b2caps = bundle2.bundle2caps(pushop.remote) | |
839 | hasphaseheads = 'heads' in b2caps.get('phases', ()) |
|
839 | hasphaseheads = 'heads' in b2caps.get('phases', ()) | |
840 | if pushop.remotephases is not None and hasphaseheads: |
|
840 | if pushop.remotephases is not None and hasphaseheads: | |
841 | # check that the remote phase has not changed |
|
841 | # check that the remote phase has not changed | |
842 | checks = [[] for p in phases.allphases] |
|
842 | checks = [[] for p in phases.allphases] | |
843 | checks[phases.public].extend(pushop.remotephases.publicheads) |
|
843 | checks[phases.public].extend(pushop.remotephases.publicheads) | |
844 | checks[phases.draft].extend(pushop.remotephases.draftroots) |
|
844 | checks[phases.draft].extend(pushop.remotephases.draftroots) | |
845 | if any(checks): |
|
845 | if any(checks): | |
846 | for nodes in checks: |
|
846 | for nodes in checks: | |
847 | nodes.sort() |
|
847 | nodes.sort() | |
848 | checkdata = phases.binaryencode(checks) |
|
848 | checkdata = phases.binaryencode(checks) | |
849 | bundler.newpart('check:phases', data=checkdata) |
|
849 | bundler.newpart('check:phases', data=checkdata) | |
850 |
|
850 | |||
851 | @b2partsgenerator('changeset') |
|
851 | @b2partsgenerator('changeset') | |
852 | def _pushb2ctx(pushop, bundler): |
|
852 | def _pushb2ctx(pushop, bundler): | |
853 | """handle changegroup push through bundle2 |
|
853 | """handle changegroup push through bundle2 | |
854 |
|
854 | |||
855 | addchangegroup result is stored in the ``pushop.cgresult`` attribute. |
|
855 | addchangegroup result is stored in the ``pushop.cgresult`` attribute. | |
856 | """ |
|
856 | """ | |
857 | if 'changesets' in pushop.stepsdone: |
|
857 | if 'changesets' in pushop.stepsdone: | |
858 | return |
|
858 | return | |
859 | pushop.stepsdone.add('changesets') |
|
859 | pushop.stepsdone.add('changesets') | |
860 | # Send known heads to the server for race detection. |
|
860 | # Send known heads to the server for race detection. | |
861 | if not _pushcheckoutgoing(pushop): |
|
861 | if not _pushcheckoutgoing(pushop): | |
862 | return |
|
862 | return | |
863 | pushop.repo.prepushoutgoinghooks(pushop) |
|
863 | pushop.repo.prepushoutgoinghooks(pushop) | |
864 |
|
864 | |||
865 | _pushb2ctxcheckheads(pushop, bundler) |
|
865 | _pushb2ctxcheckheads(pushop, bundler) | |
866 |
|
866 | |||
867 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
867 | b2caps = bundle2.bundle2caps(pushop.remote) | |
868 | version = '01' |
|
868 | version = '01' | |
869 | cgversions = b2caps.get('changegroup') |
|
869 | cgversions = b2caps.get('changegroup') | |
870 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
870 | if cgversions: # 3.1 and 3.2 ship with an empty value | |
871 | cgversions = [v for v in cgversions |
|
871 | cgversions = [v for v in cgversions | |
872 | if v in changegroup.supportedoutgoingversions( |
|
872 | if v in changegroup.supportedoutgoingversions( | |
873 | pushop.repo)] |
|
873 | pushop.repo)] | |
874 | if not cgversions: |
|
874 | if not cgversions: | |
875 | raise ValueError(_('no common changegroup version')) |
|
875 | raise ValueError(_('no common changegroup version')) | |
876 | version = max(cgversions) |
|
876 | version = max(cgversions) | |
877 | cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version, |
|
877 | cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version, | |
878 | 'push') |
|
878 | 'push') | |
879 | cgpart = bundler.newpart('changegroup', data=cgstream) |
|
879 | cgpart = bundler.newpart('changegroup', data=cgstream) | |
880 | if cgversions: |
|
880 | if cgversions: | |
881 | cgpart.addparam('version', version) |
|
881 | cgpart.addparam('version', version) | |
882 | if 'treemanifest' in pushop.repo.requirements: |
|
882 | if 'treemanifest' in pushop.repo.requirements: | |
883 | cgpart.addparam('treemanifest', '1') |
|
883 | cgpart.addparam('treemanifest', '1') | |
884 | def handlereply(op): |
|
884 | def handlereply(op): | |
885 | """extract addchangegroup returns from server reply""" |
|
885 | """extract addchangegroup returns from server reply""" | |
886 | cgreplies = op.records.getreplies(cgpart.id) |
|
886 | cgreplies = op.records.getreplies(cgpart.id) | |
887 | assert len(cgreplies['changegroup']) == 1 |
|
887 | assert len(cgreplies['changegroup']) == 1 | |
888 | pushop.cgresult = cgreplies['changegroup'][0]['return'] |
|
888 | pushop.cgresult = cgreplies['changegroup'][0]['return'] | |
889 | return handlereply |
|
889 | return handlereply | |
890 |
|
890 | |||
891 | @b2partsgenerator('phase') |
|
891 | @b2partsgenerator('phase') | |
892 | def _pushb2phases(pushop, bundler): |
|
892 | def _pushb2phases(pushop, bundler): | |
893 | """handle phase push through bundle2""" |
|
893 | """handle phase push through bundle2""" | |
894 | if 'phases' in pushop.stepsdone: |
|
894 | if 'phases' in pushop.stepsdone: | |
895 | return |
|
895 | return | |
896 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
896 | b2caps = bundle2.bundle2caps(pushop.remote) | |
897 | ui = pushop.repo.ui |
|
897 | ui = pushop.repo.ui | |
898 |
|
898 | |||
899 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') |
|
899 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') | |
900 | haspushkey = 'pushkey' in b2caps |
|
900 | haspushkey = 'pushkey' in b2caps | |
901 | hasphaseheads = 'heads' in b2caps.get('phases', ()) |
|
901 | hasphaseheads = 'heads' in b2caps.get('phases', ()) | |
902 |
|
902 | |||
903 | if hasphaseheads and not legacyphase: |
|
903 | if hasphaseheads and not legacyphase: | |
904 | return _pushb2phaseheads(pushop, bundler) |
|
904 | return _pushb2phaseheads(pushop, bundler) | |
905 | elif haspushkey: |
|
905 | elif haspushkey: | |
906 | return _pushb2phasespushkey(pushop, bundler) |
|
906 | return _pushb2phasespushkey(pushop, bundler) | |
907 |
|
907 | |||
908 | def _pushb2phaseheads(pushop, bundler): |
|
908 | def _pushb2phaseheads(pushop, bundler): | |
909 | """push phase information through a bundle2 - binary part""" |
|
909 | """push phase information through a bundle2 - binary part""" | |
910 | pushop.stepsdone.add('phases') |
|
910 | pushop.stepsdone.add('phases') | |
911 | if pushop.outdatedphases: |
|
911 | if pushop.outdatedphases: | |
912 | updates = [[] for p in phases.allphases] |
|
912 | updates = [[] for p in phases.allphases] | |
913 | updates[0].extend(h.node() for h in pushop.outdatedphases) |
|
913 | updates[0].extend(h.node() for h in pushop.outdatedphases) | |
914 | phasedata = phases.binaryencode(updates) |
|
914 | phasedata = phases.binaryencode(updates) | |
915 | bundler.newpart('phase-heads', data=phasedata) |
|
915 | bundler.newpart('phase-heads', data=phasedata) | |
916 |
|
916 | |||
917 | def _pushb2phasespushkey(pushop, bundler): |
|
917 | def _pushb2phasespushkey(pushop, bundler): | |
918 | """push phase information through a bundle2 - pushkey part""" |
|
918 | """push phase information through a bundle2 - pushkey part""" | |
919 | pushop.stepsdone.add('phases') |
|
919 | pushop.stepsdone.add('phases') | |
920 | part2node = [] |
|
920 | part2node = [] | |
921 |
|
921 | |||
922 | def handlefailure(pushop, exc): |
|
922 | def handlefailure(pushop, exc): | |
923 | targetid = int(exc.partid) |
|
923 | targetid = int(exc.partid) | |
924 | for partid, node in part2node: |
|
924 | for partid, node in part2node: | |
925 | if partid == targetid: |
|
925 | if partid == targetid: | |
926 | raise error.Abort(_('updating %s to public failed') % node) |
|
926 | raise error.Abort(_('updating %s to public failed') % node) | |
927 |
|
927 | |||
928 | enc = pushkey.encode |
|
928 | enc = pushkey.encode | |
929 | for newremotehead in pushop.outdatedphases: |
|
929 | for newremotehead in pushop.outdatedphases: | |
930 | part = bundler.newpart('pushkey') |
|
930 | part = bundler.newpart('pushkey') | |
931 | part.addparam('namespace', enc('phases')) |
|
931 | part.addparam('namespace', enc('phases')) | |
932 | part.addparam('key', enc(newremotehead.hex())) |
|
932 | part.addparam('key', enc(newremotehead.hex())) | |
933 | part.addparam('old', enc('%d' % phases.draft)) |
|
933 | part.addparam('old', enc('%d' % phases.draft)) | |
934 | part.addparam('new', enc('%d' % phases.public)) |
|
934 | part.addparam('new', enc('%d' % phases.public)) | |
935 | part2node.append((part.id, newremotehead)) |
|
935 | part2node.append((part.id, newremotehead)) | |
936 | pushop.pkfailcb[part.id] = handlefailure |
|
936 | pushop.pkfailcb[part.id] = handlefailure | |
937 |
|
937 | |||
938 | def handlereply(op): |
|
938 | def handlereply(op): | |
939 | for partid, node in part2node: |
|
939 | for partid, node in part2node: | |
940 | partrep = op.records.getreplies(partid) |
|
940 | partrep = op.records.getreplies(partid) | |
941 | results = partrep['pushkey'] |
|
941 | results = partrep['pushkey'] | |
942 | assert len(results) <= 1 |
|
942 | assert len(results) <= 1 | |
943 | msg = None |
|
943 | msg = None | |
944 | if not results: |
|
944 | if not results: | |
945 | msg = _('server ignored update of %s to public!\n') % node |
|
945 | msg = _('server ignored update of %s to public!\n') % node | |
946 | elif not int(results[0]['return']): |
|
946 | elif not int(results[0]['return']): | |
947 | msg = _('updating %s to public failed!\n') % node |
|
947 | msg = _('updating %s to public failed!\n') % node | |
948 | if msg is not None: |
|
948 | if msg is not None: | |
949 | pushop.ui.warn(msg) |
|
949 | pushop.ui.warn(msg) | |
950 | return handlereply |
|
950 | return handlereply | |
951 |
|
951 | |||
952 | @b2partsgenerator('obsmarkers') |
|
952 | @b2partsgenerator('obsmarkers') | |
953 | def _pushb2obsmarkers(pushop, bundler): |
|
953 | def _pushb2obsmarkers(pushop, bundler): | |
954 | if 'obsmarkers' in pushop.stepsdone: |
|
954 | if 'obsmarkers' in pushop.stepsdone: | |
955 | return |
|
955 | return | |
956 | remoteversions = bundle2.obsmarkersversion(bundler.capabilities) |
|
956 | remoteversions = bundle2.obsmarkersversion(bundler.capabilities) | |
957 | if obsolete.commonversion(remoteversions) is None: |
|
957 | if obsolete.commonversion(remoteversions) is None: | |
958 | return |
|
958 | return | |
959 | pushop.stepsdone.add('obsmarkers') |
|
959 | pushop.stepsdone.add('obsmarkers') | |
960 | if pushop.outobsmarkers: |
|
960 | if pushop.outobsmarkers: | |
961 | markers = sorted(pushop.outobsmarkers) |
|
961 | markers = sorted(pushop.outobsmarkers) | |
962 | bundle2.buildobsmarkerspart(bundler, markers) |
|
962 | bundle2.buildobsmarkerspart(bundler, markers) | |
963 |
|
963 | |||
964 | @b2partsgenerator('bookmarks') |
|
964 | @b2partsgenerator('bookmarks') | |
965 | def _pushb2bookmarks(pushop, bundler): |
|
965 | def _pushb2bookmarks(pushop, bundler): | |
966 | """handle bookmark push through bundle2""" |
|
966 | """handle bookmark push through bundle2""" | |
967 | if 'bookmarks' in pushop.stepsdone: |
|
967 | if 'bookmarks' in pushop.stepsdone: | |
968 | return |
|
968 | return | |
969 | b2caps = bundle2.bundle2caps(pushop.remote) |
|
969 | b2caps = bundle2.bundle2caps(pushop.remote) | |
970 |
|
970 | |||
971 | legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange') |
|
971 | legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange') | |
972 | legacybooks = 'bookmarks' in legacy |
|
972 | legacybooks = 'bookmarks' in legacy | |
973 |
|
973 | |||
974 | if not legacybooks and 'bookmarks' in b2caps: |
|
974 | if not legacybooks and 'bookmarks' in b2caps: | |
975 | return _pushb2bookmarkspart(pushop, bundler) |
|
975 | return _pushb2bookmarkspart(pushop, bundler) | |
976 | elif 'pushkey' in b2caps: |
|
976 | elif 'pushkey' in b2caps: | |
977 | return _pushb2bookmarkspushkey(pushop, bundler) |
|
977 | return _pushb2bookmarkspushkey(pushop, bundler) | |
978 |
|
978 | |||
979 | def _bmaction(old, new): |
|
979 | def _bmaction(old, new): | |
980 | """small utility for bookmark pushing""" |
|
980 | """small utility for bookmark pushing""" | |
981 | if not old: |
|
981 | if not old: | |
982 | return 'export' |
|
982 | return 'export' | |
983 | elif not new: |
|
983 | elif not new: | |
984 | return 'delete' |
|
984 | return 'delete' | |
985 | return 'update' |
|
985 | return 'update' | |
986 |
|
986 | |||
987 | def _pushb2bookmarkspart(pushop, bundler): |
|
987 | def _pushb2bookmarkspart(pushop, bundler): | |
988 | pushop.stepsdone.add('bookmarks') |
|
988 | pushop.stepsdone.add('bookmarks') | |
989 | if not pushop.outbookmarks: |
|
989 | if not pushop.outbookmarks: | |
990 | return |
|
990 | return | |
991 |
|
991 | |||
992 | allactions = [] |
|
992 | allactions = [] | |
993 | data = [] |
|
993 | data = [] | |
994 | for book, old, new in pushop.outbookmarks: |
|
994 | for book, old, new in pushop.outbookmarks: | |
995 | new = bin(new) |
|
995 | new = bin(new) | |
996 | data.append((book, new)) |
|
996 | data.append((book, new)) | |
997 | allactions.append((book, _bmaction(old, new))) |
|
997 | allactions.append((book, _bmaction(old, new))) | |
998 | checkdata = bookmod.binaryencode(data) |
|
998 | checkdata = bookmod.binaryencode(data) | |
999 | bundler.newpart('bookmarks', data=checkdata) |
|
999 | bundler.newpart('bookmarks', data=checkdata) | |
1000 |
|
1000 | |||
1001 | def handlereply(op): |
|
1001 | def handlereply(op): | |
1002 | ui = pushop.ui |
|
1002 | ui = pushop.ui | |
1003 | # if success |
|
1003 | # if success | |
1004 | for book, action in allactions: |
|
1004 | for book, action in allactions: | |
1005 | ui.status(bookmsgmap[action][0] % book) |
|
1005 | ui.status(bookmsgmap[action][0] % book) | |
1006 |
|
1006 | |||
1007 | return handlereply |
|
1007 | return handlereply | |
1008 |
|
1008 | |||
1009 | def _pushb2bookmarkspushkey(pushop, bundler): |
|
1009 | def _pushb2bookmarkspushkey(pushop, bundler): | |
1010 | pushop.stepsdone.add('bookmarks') |
|
1010 | pushop.stepsdone.add('bookmarks') | |
1011 | part2book = [] |
|
1011 | part2book = [] | |
1012 | enc = pushkey.encode |
|
1012 | enc = pushkey.encode | |
1013 |
|
1013 | |||
1014 | def handlefailure(pushop, exc): |
|
1014 | def handlefailure(pushop, exc): | |
1015 | targetid = int(exc.partid) |
|
1015 | targetid = int(exc.partid) | |
1016 | for partid, book, action in part2book: |
|
1016 | for partid, book, action in part2book: | |
1017 | if partid == targetid: |
|
1017 | if partid == targetid: | |
1018 | raise error.Abort(bookmsgmap[action][1].rstrip() % book) |
|
1018 | raise error.Abort(bookmsgmap[action][1].rstrip() % book) | |
1019 | # we should not be called for part we did not generated |
|
1019 | # we should not be called for part we did not generated | |
1020 | assert False |
|
1020 | assert False | |
1021 |
|
1021 | |||
1022 | for book, old, new in pushop.outbookmarks: |
|
1022 | for book, old, new in pushop.outbookmarks: | |
1023 | part = bundler.newpart('pushkey') |
|
1023 | part = bundler.newpart('pushkey') | |
1024 | part.addparam('namespace', enc('bookmarks')) |
|
1024 | part.addparam('namespace', enc('bookmarks')) | |
1025 | part.addparam('key', enc(book)) |
|
1025 | part.addparam('key', enc(book)) | |
1026 | part.addparam('old', enc(old)) |
|
1026 | part.addparam('old', enc(old)) | |
1027 | part.addparam('new', enc(new)) |
|
1027 | part.addparam('new', enc(new)) | |
1028 | action = 'update' |
|
1028 | action = 'update' | |
1029 | if not old: |
|
1029 | if not old: | |
1030 | action = 'export' |
|
1030 | action = 'export' | |
1031 | elif not new: |
|
1031 | elif not new: | |
1032 | action = 'delete' |
|
1032 | action = 'delete' | |
1033 | part2book.append((part.id, book, action)) |
|
1033 | part2book.append((part.id, book, action)) | |
1034 | pushop.pkfailcb[part.id] = handlefailure |
|
1034 | pushop.pkfailcb[part.id] = handlefailure | |
1035 |
|
1035 | |||
1036 | def handlereply(op): |
|
1036 | def handlereply(op): | |
1037 | ui = pushop.ui |
|
1037 | ui = pushop.ui | |
1038 | for partid, book, action in part2book: |
|
1038 | for partid, book, action in part2book: | |
1039 | partrep = op.records.getreplies(partid) |
|
1039 | partrep = op.records.getreplies(partid) | |
1040 | results = partrep['pushkey'] |
|
1040 | results = partrep['pushkey'] | |
1041 | assert len(results) <= 1 |
|
1041 | assert len(results) <= 1 | |
1042 | if not results: |
|
1042 | if not results: | |
1043 | pushop.ui.warn(_('server ignored bookmark %s update\n') % book) |
|
1043 | pushop.ui.warn(_('server ignored bookmark %s update\n') % book) | |
1044 | else: |
|
1044 | else: | |
1045 | ret = int(results[0]['return']) |
|
1045 | ret = int(results[0]['return']) | |
1046 | if ret: |
|
1046 | if ret: | |
1047 | ui.status(bookmsgmap[action][0] % book) |
|
1047 | ui.status(bookmsgmap[action][0] % book) | |
1048 | else: |
|
1048 | else: | |
1049 | ui.warn(bookmsgmap[action][1] % book) |
|
1049 | ui.warn(bookmsgmap[action][1] % book) | |
1050 | if pushop.bkresult is not None: |
|
1050 | if pushop.bkresult is not None: | |
1051 | pushop.bkresult = 1 |
|
1051 | pushop.bkresult = 1 | |
1052 | return handlereply |
|
1052 | return handlereply | |
1053 |
|
1053 | |||
1054 | @b2partsgenerator('pushvars', idx=0) |
|
1054 | @b2partsgenerator('pushvars', idx=0) | |
1055 | def _getbundlesendvars(pushop, bundler): |
|
1055 | def _getbundlesendvars(pushop, bundler): | |
1056 | '''send shellvars via bundle2''' |
|
1056 | '''send shellvars via bundle2''' | |
1057 | pushvars = pushop.pushvars |
|
1057 | pushvars = pushop.pushvars | |
1058 | if pushvars: |
|
1058 | if pushvars: | |
1059 | shellvars = {} |
|
1059 | shellvars = {} | |
1060 | for raw in pushvars: |
|
1060 | for raw in pushvars: | |
1061 | if '=' not in raw: |
|
1061 | if '=' not in raw: | |
1062 | msg = ("unable to parse variable '%s', should follow " |
|
1062 | msg = ("unable to parse variable '%s', should follow " | |
1063 | "'KEY=VALUE' or 'KEY=' format") |
|
1063 | "'KEY=VALUE' or 'KEY=' format") | |
1064 | raise error.Abort(msg % raw) |
|
1064 | raise error.Abort(msg % raw) | |
1065 | k, v = raw.split('=', 1) |
|
1065 | k, v = raw.split('=', 1) | |
1066 | shellvars[k] = v |
|
1066 | shellvars[k] = v | |
1067 |
|
1067 | |||
1068 | part = bundler.newpart('pushvars') |
|
1068 | part = bundler.newpart('pushvars') | |
1069 |
|
1069 | |||
1070 | for key, value in shellvars.iteritems(): |
|
1070 | for key, value in shellvars.iteritems(): | |
1071 | part.addparam(key, value, mandatory=False) |
|
1071 | part.addparam(key, value, mandatory=False) | |
1072 |
|
1072 | |||
1073 | def _pushbundle2(pushop): |
|
1073 | def _pushbundle2(pushop): | |
1074 | """push data to the remote using bundle2 |
|
1074 | """push data to the remote using bundle2 | |
1075 |
|
1075 | |||
1076 | The only currently supported type of data is changegroup but this will |
|
1076 | The only currently supported type of data is changegroup but this will | |
1077 | evolve in the future.""" |
|
1077 | evolve in the future.""" | |
1078 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) |
|
1078 | bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote)) | |
1079 | pushback = (pushop.trmanager |
|
1079 | pushback = (pushop.trmanager | |
1080 | and pushop.ui.configbool('experimental', 'bundle2.pushback')) |
|
1080 | and pushop.ui.configbool('experimental', 'bundle2.pushback')) | |
1081 |
|
1081 | |||
1082 | # create reply capability |
|
1082 | # create reply capability | |
1083 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo, |
|
1083 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo, | |
1084 | allowpushback=pushback, |
|
1084 | allowpushback=pushback, | |
1085 | role='client')) |
|
1085 | role='client')) | |
1086 | bundler.newpart('replycaps', data=capsblob) |
|
1086 | bundler.newpart('replycaps', data=capsblob) | |
1087 | replyhandlers = [] |
|
1087 | replyhandlers = [] | |
1088 | for partgenname in b2partsgenorder: |
|
1088 | for partgenname in b2partsgenorder: | |
1089 | partgen = b2partsgenmapping[partgenname] |
|
1089 | partgen = b2partsgenmapping[partgenname] | |
1090 | ret = partgen(pushop, bundler) |
|
1090 | ret = partgen(pushop, bundler) | |
1091 | if callable(ret): |
|
1091 | if callable(ret): | |
1092 | replyhandlers.append(ret) |
|
1092 | replyhandlers.append(ret) | |
1093 | # do not push if nothing to push |
|
1093 | # do not push if nothing to push | |
1094 | if bundler.nbparts <= 1: |
|
1094 | if bundler.nbparts <= 1: | |
1095 | return |
|
1095 | return | |
1096 | stream = util.chunkbuffer(bundler.getchunks()) |
|
1096 | stream = util.chunkbuffer(bundler.getchunks()) | |
1097 | try: |
|
1097 | try: | |
1098 | try: |
|
1098 | try: | |
1099 | with pushop.remote.commandexecutor() as e: |
|
1099 | with pushop.remote.commandexecutor() as e: | |
1100 | reply = e.callcommand('unbundle', { |
|
1100 | reply = e.callcommand('unbundle', { | |
1101 | 'bundle': stream, |
|
1101 | 'bundle': stream, | |
1102 | 'heads': ['force'], |
|
1102 | 'heads': ['force'], | |
1103 | 'url': pushop.remote.url(), |
|
1103 | 'url': pushop.remote.url(), | |
1104 | }).result() |
|
1104 | }).result() | |
1105 | except error.BundleValueError as exc: |
|
1105 | except error.BundleValueError as exc: | |
1106 | raise error.Abort(_('missing support for %s') % exc) |
|
1106 | raise error.Abort(_('missing support for %s') % exc) | |
1107 | try: |
|
1107 | try: | |
1108 | trgetter = None |
|
1108 | trgetter = None | |
1109 | if pushback: |
|
1109 | if pushback: | |
1110 | trgetter = pushop.trmanager.transaction |
|
1110 | trgetter = pushop.trmanager.transaction | |
1111 | op = bundle2.processbundle(pushop.repo, reply, trgetter) |
|
1111 | op = bundle2.processbundle(pushop.repo, reply, trgetter) | |
1112 | except error.BundleValueError as exc: |
|
1112 | except error.BundleValueError as exc: | |
1113 | raise error.Abort(_('missing support for %s') % exc) |
|
1113 | raise error.Abort(_('missing support for %s') % exc) | |
1114 | except bundle2.AbortFromPart as exc: |
|
1114 | except bundle2.AbortFromPart as exc: | |
1115 | pushop.ui.status(_('remote: %s\n') % exc) |
|
1115 | pushop.ui.status(_('remote: %s\n') % exc) | |
1116 | if exc.hint is not None: |
|
1116 | if exc.hint is not None: | |
1117 | pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint)) |
|
1117 | pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint)) | |
1118 | raise error.Abort(_('push failed on remote')) |
|
1118 | raise error.Abort(_('push failed on remote')) | |
1119 | except error.PushkeyFailed as exc: |
|
1119 | except error.PushkeyFailed as exc: | |
1120 | partid = int(exc.partid) |
|
1120 | partid = int(exc.partid) | |
1121 | if partid not in pushop.pkfailcb: |
|
1121 | if partid not in pushop.pkfailcb: | |
1122 | raise |
|
1122 | raise | |
1123 | pushop.pkfailcb[partid](pushop, exc) |
|
1123 | pushop.pkfailcb[partid](pushop, exc) | |
1124 | for rephand in replyhandlers: |
|
1124 | for rephand in replyhandlers: | |
1125 | rephand(op) |
|
1125 | rephand(op) | |
1126 |
|
1126 | |||
1127 | def _pushchangeset(pushop): |
|
1127 | def _pushchangeset(pushop): | |
1128 | """Make the actual push of changeset bundle to remote repo""" |
|
1128 | """Make the actual push of changeset bundle to remote repo""" | |
1129 | if 'changesets' in pushop.stepsdone: |
|
1129 | if 'changesets' in pushop.stepsdone: | |
1130 | return |
|
1130 | return | |
1131 | pushop.stepsdone.add('changesets') |
|
1131 | pushop.stepsdone.add('changesets') | |
1132 | if not _pushcheckoutgoing(pushop): |
|
1132 | if not _pushcheckoutgoing(pushop): | |
1133 | return |
|
1133 | return | |
1134 |
|
1134 | |||
1135 | # Should have verified this in push(). |
|
1135 | # Should have verified this in push(). | |
1136 | assert pushop.remote.capable('unbundle') |
|
1136 | assert pushop.remote.capable('unbundle') | |
1137 |
|
1137 | |||
1138 | pushop.repo.prepushoutgoinghooks(pushop) |
|
1138 | pushop.repo.prepushoutgoinghooks(pushop) | |
1139 | outgoing = pushop.outgoing |
|
1139 | outgoing = pushop.outgoing | |
1140 | # TODO: get bundlecaps from remote |
|
1140 | # TODO: get bundlecaps from remote | |
1141 | bundlecaps = None |
|
1141 | bundlecaps = None | |
1142 | # create a changegroup from local |
|
1142 | # create a changegroup from local | |
1143 | if pushop.revs is None and not (outgoing.excluded |
|
1143 | if pushop.revs is None and not (outgoing.excluded | |
1144 | or pushop.repo.changelog.filteredrevs): |
|
1144 | or pushop.repo.changelog.filteredrevs): | |
1145 | # push everything, |
|
1145 | # push everything, | |
1146 | # use the fast path, no race possible on push |
|
1146 | # use the fast path, no race possible on push | |
1147 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push', |
|
1147 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push', | |
1148 | fastpath=True, bundlecaps=bundlecaps) |
|
1148 | fastpath=True, bundlecaps=bundlecaps) | |
1149 | else: |
|
1149 | else: | |
1150 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', |
|
1150 | cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', | |
1151 | 'push', bundlecaps=bundlecaps) |
|
1151 | 'push', bundlecaps=bundlecaps) | |
1152 |
|
1152 | |||
1153 | # apply changegroup to remote |
|
1153 | # apply changegroup to remote | |
1154 | # local repo finds heads on server, finds out what |
|
1154 | # local repo finds heads on server, finds out what | |
1155 | # revs it must push. once revs transferred, if server |
|
1155 | # revs it must push. once revs transferred, if server | |
1156 | # finds it has different heads (someone else won |
|
1156 | # finds it has different heads (someone else won | |
1157 | # commit/push race), server aborts. |
|
1157 | # commit/push race), server aborts. | |
1158 | if pushop.force: |
|
1158 | if pushop.force: | |
1159 | remoteheads = ['force'] |
|
1159 | remoteheads = ['force'] | |
1160 | else: |
|
1160 | else: | |
1161 | remoteheads = pushop.remoteheads |
|
1161 | remoteheads = pushop.remoteheads | |
1162 | # ssh: return remote's addchangegroup() |
|
1162 | # ssh: return remote's addchangegroup() | |
1163 | # http: return remote's addchangegroup() or 0 for error |
|
1163 | # http: return remote's addchangegroup() or 0 for error | |
1164 | pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, |
|
1164 | pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, | |
1165 | pushop.repo.url()) |
|
1165 | pushop.repo.url()) | |
1166 |
|
1166 | |||
1167 | def _pushsyncphase(pushop): |
|
1167 | def _pushsyncphase(pushop): | |
1168 | """synchronise phase information locally and remotely""" |
|
1168 | """synchronise phase information locally and remotely""" | |
1169 | cheads = pushop.commonheads |
|
1169 | cheads = pushop.commonheads | |
1170 | # even when we don't push, exchanging phase data is useful |
|
1170 | # even when we don't push, exchanging phase data is useful | |
1171 | remotephases = pushop.remote.listkeys('phases') |
|
1171 | remotephases = pushop.remote.listkeys('phases') | |
1172 | if (pushop.ui.configbool('ui', '_usedassubrepo') |
|
1172 | if (pushop.ui.configbool('ui', '_usedassubrepo') | |
1173 | and remotephases # server supports phases |
|
1173 | and remotephases # server supports phases | |
1174 | and pushop.cgresult is None # nothing was pushed |
|
1174 | and pushop.cgresult is None # nothing was pushed | |
1175 | and remotephases.get('publishing', False)): |
|
1175 | and remotephases.get('publishing', False)): | |
1176 | # When: |
|
1176 | # When: | |
1177 | # - this is a subrepo push |
|
1177 | # - this is a subrepo push | |
1178 | # - and remote support phase |
|
1178 | # - and remote support phase | |
1179 | # - and no changeset was pushed |
|
1179 | # - and no changeset was pushed | |
1180 | # - and remote is publishing |
|
1180 | # - and remote is publishing | |
1181 | # We may be in issue 3871 case! |
|
1181 | # We may be in issue 3871 case! | |
1182 | # We drop the possible phase synchronisation done by |
|
1182 | # We drop the possible phase synchronisation done by | |
1183 | # courtesy to publish changesets possibly locally draft |
|
1183 | # courtesy to publish changesets possibly locally draft | |
1184 | # on the remote. |
|
1184 | # on the remote. | |
1185 | remotephases = {'publishing': 'True'} |
|
1185 | remotephases = {'publishing': 'True'} | |
1186 | if not remotephases: # old server or public only reply from non-publishing |
|
1186 | if not remotephases: # old server or public only reply from non-publishing | |
1187 | _localphasemove(pushop, cheads) |
|
1187 | _localphasemove(pushop, cheads) | |
1188 | # don't push any phase data as there is nothing to push |
|
1188 | # don't push any phase data as there is nothing to push | |
1189 | else: |
|
1189 | else: | |
1190 | ana = phases.analyzeremotephases(pushop.repo, cheads, |
|
1190 | ana = phases.analyzeremotephases(pushop.repo, cheads, | |
1191 | remotephases) |
|
1191 | remotephases) | |
1192 | pheads, droots = ana |
|
1192 | pheads, droots = ana | |
1193 | ### Apply remote phase on local |
|
1193 | ### Apply remote phase on local | |
1194 | if remotephases.get('publishing', False): |
|
1194 | if remotephases.get('publishing', False): | |
1195 | _localphasemove(pushop, cheads) |
|
1195 | _localphasemove(pushop, cheads) | |
1196 | else: # publish = False |
|
1196 | else: # publish = False | |
1197 | _localphasemove(pushop, pheads) |
|
1197 | _localphasemove(pushop, pheads) | |
1198 | _localphasemove(pushop, cheads, phases.draft) |
|
1198 | _localphasemove(pushop, cheads, phases.draft) | |
1199 | ### Apply local phase on remote |
|
1199 | ### Apply local phase on remote | |
1200 |
|
1200 | |||
1201 | if pushop.cgresult: |
|
1201 | if pushop.cgresult: | |
1202 | if 'phases' in pushop.stepsdone: |
|
1202 | if 'phases' in pushop.stepsdone: | |
1203 | # phases already pushed though bundle2 |
|
1203 | # phases already pushed though bundle2 | |
1204 | return |
|
1204 | return | |
1205 | outdated = pushop.outdatedphases |
|
1205 | outdated = pushop.outdatedphases | |
1206 | else: |
|
1206 | else: | |
1207 | outdated = pushop.fallbackoutdatedphases |
|
1207 | outdated = pushop.fallbackoutdatedphases | |
1208 |
|
1208 | |||
1209 | pushop.stepsdone.add('phases') |
|
1209 | pushop.stepsdone.add('phases') | |
1210 |
|
1210 | |||
1211 | # filter heads already turned public by the push |
|
1211 | # filter heads already turned public by the push | |
1212 | outdated = [c for c in outdated if c.node() not in pheads] |
|
1212 | outdated = [c for c in outdated if c.node() not in pheads] | |
1213 | # fallback to independent pushkey command |
|
1213 | # fallback to independent pushkey command | |
1214 | for newremotehead in outdated: |
|
1214 | for newremotehead in outdated: | |
1215 |
|
|
1215 | with pushop.remote.commandexecutor() as e: | |
1216 | newremotehead.hex(), |
|
1216 | r = e.callcommand('pushkey', { | |
1217 | ('%d' % phases.draft), |
|
1217 | 'namespace': 'phases', | |
1218 | ('%d' % phases.public)) |
|
1218 | 'key': newremotehead.hex(), | |
|
1219 | 'old': '%d' % phases.draft, | |||
|
1220 | 'new': '%d' % phases.public | |||
|
1221 | }).result() | |||
|
1222 | ||||
1219 | if not r: |
|
1223 | if not r: | |
1220 | pushop.ui.warn(_('updating %s to public failed!\n') |
|
1224 | pushop.ui.warn(_('updating %s to public failed!\n') | |
1221 | % newremotehead) |
|
1225 | % newremotehead) | |
1222 |
|
1226 | |||
1223 | def _localphasemove(pushop, nodes, phase=phases.public): |
|
1227 | def _localphasemove(pushop, nodes, phase=phases.public): | |
1224 | """move <nodes> to <phase> in the local source repo""" |
|
1228 | """move <nodes> to <phase> in the local source repo""" | |
1225 | if pushop.trmanager: |
|
1229 | if pushop.trmanager: | |
1226 | phases.advanceboundary(pushop.repo, |
|
1230 | phases.advanceboundary(pushop.repo, | |
1227 | pushop.trmanager.transaction(), |
|
1231 | pushop.trmanager.transaction(), | |
1228 | phase, |
|
1232 | phase, | |
1229 | nodes) |
|
1233 | nodes) | |
1230 | else: |
|
1234 | else: | |
1231 | # repo is not locked, do not change any phases! |
|
1235 | # repo is not locked, do not change any phases! | |
1232 | # Informs the user that phases should have been moved when |
|
1236 | # Informs the user that phases should have been moved when | |
1233 | # applicable. |
|
1237 | # applicable. | |
1234 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] |
|
1238 | actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()] | |
1235 | phasestr = phases.phasenames[phase] |
|
1239 | phasestr = phases.phasenames[phase] | |
1236 | if actualmoves: |
|
1240 | if actualmoves: | |
1237 | pushop.ui.status(_('cannot lock source repo, skipping ' |
|
1241 | pushop.ui.status(_('cannot lock source repo, skipping ' | |
1238 | 'local %s phase update\n') % phasestr) |
|
1242 | 'local %s phase update\n') % phasestr) | |
1239 |
|
1243 | |||
1240 | def _pushobsolete(pushop): |
|
1244 | def _pushobsolete(pushop): | |
1241 | """utility function to push obsolete markers to a remote""" |
|
1245 | """utility function to push obsolete markers to a remote""" | |
1242 | if 'obsmarkers' in pushop.stepsdone: |
|
1246 | if 'obsmarkers' in pushop.stepsdone: | |
1243 | return |
|
1247 | return | |
1244 | repo = pushop.repo |
|
1248 | repo = pushop.repo | |
1245 | remote = pushop.remote |
|
1249 | remote = pushop.remote | |
1246 | pushop.stepsdone.add('obsmarkers') |
|
1250 | pushop.stepsdone.add('obsmarkers') | |
1247 | if pushop.outobsmarkers: |
|
1251 | if pushop.outobsmarkers: | |
1248 | pushop.ui.debug('try to push obsolete markers to remote\n') |
|
1252 | pushop.ui.debug('try to push obsolete markers to remote\n') | |
1249 | rslts = [] |
|
1253 | rslts = [] | |
1250 | remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers)) |
|
1254 | remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers)) | |
1251 | for key in sorted(remotedata, reverse=True): |
|
1255 | for key in sorted(remotedata, reverse=True): | |
1252 | # reverse sort to ensure we end with dump0 |
|
1256 | # reverse sort to ensure we end with dump0 | |
1253 | data = remotedata[key] |
|
1257 | data = remotedata[key] | |
1254 | rslts.append(remote.pushkey('obsolete', key, '', data)) |
|
1258 | rslts.append(remote.pushkey('obsolete', key, '', data)) | |
1255 | if [r for r in rslts if not r]: |
|
1259 | if [r for r in rslts if not r]: | |
1256 | msg = _('failed to push some obsolete markers!\n') |
|
1260 | msg = _('failed to push some obsolete markers!\n') | |
1257 | repo.ui.warn(msg) |
|
1261 | repo.ui.warn(msg) | |
1258 |
|
1262 | |||
1259 | def _pushbookmark(pushop): |
|
1263 | def _pushbookmark(pushop): | |
1260 | """Update bookmark position on remote""" |
|
1264 | """Update bookmark position on remote""" | |
1261 | if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone: |
|
1265 | if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone: | |
1262 | return |
|
1266 | return | |
1263 | pushop.stepsdone.add('bookmarks') |
|
1267 | pushop.stepsdone.add('bookmarks') | |
1264 | ui = pushop.ui |
|
1268 | ui = pushop.ui | |
1265 | remote = pushop.remote |
|
1269 | remote = pushop.remote | |
1266 |
|
1270 | |||
1267 | for b, old, new in pushop.outbookmarks: |
|
1271 | for b, old, new in pushop.outbookmarks: | |
1268 | action = 'update' |
|
1272 | action = 'update' | |
1269 | if not old: |
|
1273 | if not old: | |
1270 | action = 'export' |
|
1274 | action = 'export' | |
1271 | elif not new: |
|
1275 | elif not new: | |
1272 | action = 'delete' |
|
1276 | action = 'delete' | |
1273 | if remote.pushkey('bookmarks', b, old, new): |
|
1277 | ||
|
1278 | with remote.commandexecutor() as e: | |||
|
1279 | r = e.callcommand('pushkey', { | |||
|
1280 | 'namespace': 'bookmarks', | |||
|
1281 | 'key': b, | |||
|
1282 | 'old': old, | |||
|
1283 | 'new': new, | |||
|
1284 | }).result() | |||
|
1285 | ||||
|
1286 | if r: | |||
1274 | ui.status(bookmsgmap[action][0] % b) |
|
1287 | ui.status(bookmsgmap[action][0] % b) | |
1275 | else: |
|
1288 | else: | |
1276 | ui.warn(bookmsgmap[action][1] % b) |
|
1289 | ui.warn(bookmsgmap[action][1] % b) | |
1277 | # discovery can have set the value form invalid entry |
|
1290 | # discovery can have set the value form invalid entry | |
1278 | if pushop.bkresult is not None: |
|
1291 | if pushop.bkresult is not None: | |
1279 | pushop.bkresult = 1 |
|
1292 | pushop.bkresult = 1 | |
1280 |
|
1293 | |||
1281 | class pulloperation(object): |
|
1294 | class pulloperation(object): | |
1282 | """A object that represent a single pull operation |
|
1295 | """A object that represent a single pull operation | |
1283 |
|
1296 | |||
1284 | It purpose is to carry pull related state and very common operation. |
|
1297 | It purpose is to carry pull related state and very common operation. | |
1285 |
|
1298 | |||
1286 | A new should be created at the beginning of each pull and discarded |
|
1299 | A new should be created at the beginning of each pull and discarded | |
1287 | afterward. |
|
1300 | afterward. | |
1288 | """ |
|
1301 | """ | |
1289 |
|
1302 | |||
1290 | def __init__(self, repo, remote, heads=None, force=False, bookmarks=(), |
|
1303 | def __init__(self, repo, remote, heads=None, force=False, bookmarks=(), | |
1291 | remotebookmarks=None, streamclonerequested=None): |
|
1304 | remotebookmarks=None, streamclonerequested=None): | |
1292 | # repo we pull into |
|
1305 | # repo we pull into | |
1293 | self.repo = repo |
|
1306 | self.repo = repo | |
1294 | # repo we pull from |
|
1307 | # repo we pull from | |
1295 | self.remote = remote |
|
1308 | self.remote = remote | |
1296 | # revision we try to pull (None is "all") |
|
1309 | # revision we try to pull (None is "all") | |
1297 | self.heads = heads |
|
1310 | self.heads = heads | |
1298 | # bookmark pulled explicitly |
|
1311 | # bookmark pulled explicitly | |
1299 | self.explicitbookmarks = [repo._bookmarks.expandname(bookmark) |
|
1312 | self.explicitbookmarks = [repo._bookmarks.expandname(bookmark) | |
1300 | for bookmark in bookmarks] |
|
1313 | for bookmark in bookmarks] | |
1301 | # do we force pull? |
|
1314 | # do we force pull? | |
1302 | self.force = force |
|
1315 | self.force = force | |
1303 | # whether a streaming clone was requested |
|
1316 | # whether a streaming clone was requested | |
1304 | self.streamclonerequested = streamclonerequested |
|
1317 | self.streamclonerequested = streamclonerequested | |
1305 | # transaction manager |
|
1318 | # transaction manager | |
1306 | self.trmanager = None |
|
1319 | self.trmanager = None | |
1307 | # set of common changeset between local and remote before pull |
|
1320 | # set of common changeset between local and remote before pull | |
1308 | self.common = None |
|
1321 | self.common = None | |
1309 | # set of pulled head |
|
1322 | # set of pulled head | |
1310 | self.rheads = None |
|
1323 | self.rheads = None | |
1311 | # list of missing changeset to fetch remotely |
|
1324 | # list of missing changeset to fetch remotely | |
1312 | self.fetch = None |
|
1325 | self.fetch = None | |
1313 | # remote bookmarks data |
|
1326 | # remote bookmarks data | |
1314 | self.remotebookmarks = remotebookmarks |
|
1327 | self.remotebookmarks = remotebookmarks | |
1315 | # result of changegroup pulling (used as return code by pull) |
|
1328 | # result of changegroup pulling (used as return code by pull) | |
1316 | self.cgresult = None |
|
1329 | self.cgresult = None | |
1317 | # list of step already done |
|
1330 | # list of step already done | |
1318 | self.stepsdone = set() |
|
1331 | self.stepsdone = set() | |
1319 | # Whether we attempted a clone from pre-generated bundles. |
|
1332 | # Whether we attempted a clone from pre-generated bundles. | |
1320 | self.clonebundleattempted = False |
|
1333 | self.clonebundleattempted = False | |
1321 |
|
1334 | |||
1322 | @util.propertycache |
|
1335 | @util.propertycache | |
1323 | def pulledsubset(self): |
|
1336 | def pulledsubset(self): | |
1324 | """heads of the set of changeset target by the pull""" |
|
1337 | """heads of the set of changeset target by the pull""" | |
1325 | # compute target subset |
|
1338 | # compute target subset | |
1326 | if self.heads is None: |
|
1339 | if self.heads is None: | |
1327 | # We pulled every thing possible |
|
1340 | # We pulled every thing possible | |
1328 | # sync on everything common |
|
1341 | # sync on everything common | |
1329 | c = set(self.common) |
|
1342 | c = set(self.common) | |
1330 | ret = list(self.common) |
|
1343 | ret = list(self.common) | |
1331 | for n in self.rheads: |
|
1344 | for n in self.rheads: | |
1332 | if n not in c: |
|
1345 | if n not in c: | |
1333 | ret.append(n) |
|
1346 | ret.append(n) | |
1334 | return ret |
|
1347 | return ret | |
1335 | else: |
|
1348 | else: | |
1336 | # We pulled a specific subset |
|
1349 | # We pulled a specific subset | |
1337 | # sync on this subset |
|
1350 | # sync on this subset | |
1338 | return self.heads |
|
1351 | return self.heads | |
1339 |
|
1352 | |||
1340 | @util.propertycache |
|
1353 | @util.propertycache | |
1341 | def canusebundle2(self): |
|
1354 | def canusebundle2(self): | |
1342 | return not _forcebundle1(self) |
|
1355 | return not _forcebundle1(self) | |
1343 |
|
1356 | |||
1344 | @util.propertycache |
|
1357 | @util.propertycache | |
1345 | def remotebundle2caps(self): |
|
1358 | def remotebundle2caps(self): | |
1346 | return bundle2.bundle2caps(self.remote) |
|
1359 | return bundle2.bundle2caps(self.remote) | |
1347 |
|
1360 | |||
1348 | def gettransaction(self): |
|
1361 | def gettransaction(self): | |
1349 | # deprecated; talk to trmanager directly |
|
1362 | # deprecated; talk to trmanager directly | |
1350 | return self.trmanager.transaction() |
|
1363 | return self.trmanager.transaction() | |
1351 |
|
1364 | |||
1352 | class transactionmanager(util.transactional): |
|
1365 | class transactionmanager(util.transactional): | |
1353 | """An object to manage the life cycle of a transaction |
|
1366 | """An object to manage the life cycle of a transaction | |
1354 |
|
1367 | |||
1355 | It creates the transaction on demand and calls the appropriate hooks when |
|
1368 | It creates the transaction on demand and calls the appropriate hooks when | |
1356 | closing the transaction.""" |
|
1369 | closing the transaction.""" | |
1357 | def __init__(self, repo, source, url): |
|
1370 | def __init__(self, repo, source, url): | |
1358 | self.repo = repo |
|
1371 | self.repo = repo | |
1359 | self.source = source |
|
1372 | self.source = source | |
1360 | self.url = url |
|
1373 | self.url = url | |
1361 | self._tr = None |
|
1374 | self._tr = None | |
1362 |
|
1375 | |||
1363 | def transaction(self): |
|
1376 | def transaction(self): | |
1364 | """Return an open transaction object, constructing if necessary""" |
|
1377 | """Return an open transaction object, constructing if necessary""" | |
1365 | if not self._tr: |
|
1378 | if not self._tr: | |
1366 | trname = '%s\n%s' % (self.source, util.hidepassword(self.url)) |
|
1379 | trname = '%s\n%s' % (self.source, util.hidepassword(self.url)) | |
1367 | self._tr = self.repo.transaction(trname) |
|
1380 | self._tr = self.repo.transaction(trname) | |
1368 | self._tr.hookargs['source'] = self.source |
|
1381 | self._tr.hookargs['source'] = self.source | |
1369 | self._tr.hookargs['url'] = self.url |
|
1382 | self._tr.hookargs['url'] = self.url | |
1370 | return self._tr |
|
1383 | return self._tr | |
1371 |
|
1384 | |||
1372 | def close(self): |
|
1385 | def close(self): | |
1373 | """close transaction if created""" |
|
1386 | """close transaction if created""" | |
1374 | if self._tr is not None: |
|
1387 | if self._tr is not None: | |
1375 | self._tr.close() |
|
1388 | self._tr.close() | |
1376 |
|
1389 | |||
1377 | def release(self): |
|
1390 | def release(self): | |
1378 | """release transaction if created""" |
|
1391 | """release transaction if created""" | |
1379 | if self._tr is not None: |
|
1392 | if self._tr is not None: | |
1380 | self._tr.release() |
|
1393 | self._tr.release() | |
1381 |
|
1394 | |||
1382 | def _fullpullbundle2(repo, pullop): |
|
1395 | def _fullpullbundle2(repo, pullop): | |
1383 | # The server may send a partial reply, i.e. when inlining |
|
1396 | # The server may send a partial reply, i.e. when inlining | |
1384 | # pre-computed bundles. In that case, update the common |
|
1397 | # pre-computed bundles. In that case, update the common | |
1385 | # set based on the results and pull another bundle. |
|
1398 | # set based on the results and pull another bundle. | |
1386 | # |
|
1399 | # | |
1387 | # There are two indicators that the process is finished: |
|
1400 | # There are two indicators that the process is finished: | |
1388 | # - no changeset has been added, or |
|
1401 | # - no changeset has been added, or | |
1389 | # - all remote heads are known locally. |
|
1402 | # - all remote heads are known locally. | |
1390 | # The head check must use the unfiltered view as obsoletion |
|
1403 | # The head check must use the unfiltered view as obsoletion | |
1391 | # markers can hide heads. |
|
1404 | # markers can hide heads. | |
1392 | unfi = repo.unfiltered() |
|
1405 | unfi = repo.unfiltered() | |
1393 | unficl = unfi.changelog |
|
1406 | unficl = unfi.changelog | |
1394 | def headsofdiff(h1, h2): |
|
1407 | def headsofdiff(h1, h2): | |
1395 | """Returns heads(h1 % h2)""" |
|
1408 | """Returns heads(h1 % h2)""" | |
1396 | res = unfi.set('heads(%ln %% %ln)', h1, h2) |
|
1409 | res = unfi.set('heads(%ln %% %ln)', h1, h2) | |
1397 | return set(ctx.node() for ctx in res) |
|
1410 | return set(ctx.node() for ctx in res) | |
1398 | def headsofunion(h1, h2): |
|
1411 | def headsofunion(h1, h2): | |
1399 | """Returns heads((h1 + h2) - null)""" |
|
1412 | """Returns heads((h1 + h2) - null)""" | |
1400 | res = unfi.set('heads((%ln + %ln - null))', h1, h2) |
|
1413 | res = unfi.set('heads((%ln + %ln - null))', h1, h2) | |
1401 | return set(ctx.node() for ctx in res) |
|
1414 | return set(ctx.node() for ctx in res) | |
1402 | while True: |
|
1415 | while True: | |
1403 | old_heads = unficl.heads() |
|
1416 | old_heads = unficl.heads() | |
1404 | clstart = len(unficl) |
|
1417 | clstart = len(unficl) | |
1405 | _pullbundle2(pullop) |
|
1418 | _pullbundle2(pullop) | |
1406 | if changegroup.NARROW_REQUIREMENT in repo.requirements: |
|
1419 | if changegroup.NARROW_REQUIREMENT in repo.requirements: | |
1407 | # XXX narrow clones filter the heads on the server side during |
|
1420 | # XXX narrow clones filter the heads on the server side during | |
1408 | # XXX getbundle and result in partial replies as well. |
|
1421 | # XXX getbundle and result in partial replies as well. | |
1409 | # XXX Disable pull bundles in this case as band aid to avoid |
|
1422 | # XXX Disable pull bundles in this case as band aid to avoid | |
1410 | # XXX extra round trips. |
|
1423 | # XXX extra round trips. | |
1411 | break |
|
1424 | break | |
1412 | if clstart == len(unficl): |
|
1425 | if clstart == len(unficl): | |
1413 | break |
|
1426 | break | |
1414 | if all(unficl.hasnode(n) for n in pullop.rheads): |
|
1427 | if all(unficl.hasnode(n) for n in pullop.rheads): | |
1415 | break |
|
1428 | break | |
1416 | new_heads = headsofdiff(unficl.heads(), old_heads) |
|
1429 | new_heads = headsofdiff(unficl.heads(), old_heads) | |
1417 | pullop.common = headsofunion(new_heads, pullop.common) |
|
1430 | pullop.common = headsofunion(new_heads, pullop.common) | |
1418 | pullop.rheads = set(pullop.rheads) - pullop.common |
|
1431 | pullop.rheads = set(pullop.rheads) - pullop.common | |
1419 |
|
1432 | |||
1420 | def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None, |
|
1433 | def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None, | |
1421 | streamclonerequested=None): |
|
1434 | streamclonerequested=None): | |
1422 | """Fetch repository data from a remote. |
|
1435 | """Fetch repository data from a remote. | |
1423 |
|
1436 | |||
1424 | This is the main function used to retrieve data from a remote repository. |
|
1437 | This is the main function used to retrieve data from a remote repository. | |
1425 |
|
1438 | |||
1426 | ``repo`` is the local repository to clone into. |
|
1439 | ``repo`` is the local repository to clone into. | |
1427 | ``remote`` is a peer instance. |
|
1440 | ``remote`` is a peer instance. | |
1428 | ``heads`` is an iterable of revisions we want to pull. ``None`` (the |
|
1441 | ``heads`` is an iterable of revisions we want to pull. ``None`` (the | |
1429 | default) means to pull everything from the remote. |
|
1442 | default) means to pull everything from the remote. | |
1430 | ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By |
|
1443 | ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By | |
1431 | default, all remote bookmarks are pulled. |
|
1444 | default, all remote bookmarks are pulled. | |
1432 | ``opargs`` are additional keyword arguments to pass to ``pulloperation`` |
|
1445 | ``opargs`` are additional keyword arguments to pass to ``pulloperation`` | |
1433 | initialization. |
|
1446 | initialization. | |
1434 | ``streamclonerequested`` is a boolean indicating whether a "streaming |
|
1447 | ``streamclonerequested`` is a boolean indicating whether a "streaming | |
1435 | clone" is requested. A "streaming clone" is essentially a raw file copy |
|
1448 | clone" is requested. A "streaming clone" is essentially a raw file copy | |
1436 | of revlogs from the server. This only works when the local repository is |
|
1449 | of revlogs from the server. This only works when the local repository is | |
1437 | empty. The default value of ``None`` means to respect the server |
|
1450 | empty. The default value of ``None`` means to respect the server | |
1438 | configuration for preferring stream clones. |
|
1451 | configuration for preferring stream clones. | |
1439 |
|
1452 | |||
1440 | Returns the ``pulloperation`` created for this pull. |
|
1453 | Returns the ``pulloperation`` created for this pull. | |
1441 | """ |
|
1454 | """ | |
1442 | if opargs is None: |
|
1455 | if opargs is None: | |
1443 | opargs = {} |
|
1456 | opargs = {} | |
1444 | pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks, |
|
1457 | pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks, | |
1445 | streamclonerequested=streamclonerequested, |
|
1458 | streamclonerequested=streamclonerequested, | |
1446 | **pycompat.strkwargs(opargs)) |
|
1459 | **pycompat.strkwargs(opargs)) | |
1447 |
|
1460 | |||
1448 | peerlocal = pullop.remote.local() |
|
1461 | peerlocal = pullop.remote.local() | |
1449 | if peerlocal: |
|
1462 | if peerlocal: | |
1450 | missing = set(peerlocal.requirements) - pullop.repo.supported |
|
1463 | missing = set(peerlocal.requirements) - pullop.repo.supported | |
1451 | if missing: |
|
1464 | if missing: | |
1452 | msg = _("required features are not" |
|
1465 | msg = _("required features are not" | |
1453 | " supported in the destination:" |
|
1466 | " supported in the destination:" | |
1454 | " %s") % (', '.join(sorted(missing))) |
|
1467 | " %s") % (', '.join(sorted(missing))) | |
1455 | raise error.Abort(msg) |
|
1468 | raise error.Abort(msg) | |
1456 |
|
1469 | |||
1457 | pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) |
|
1470 | pullop.trmanager = transactionmanager(repo, 'pull', remote.url()) | |
1458 | with repo.wlock(), repo.lock(), pullop.trmanager: |
|
1471 | with repo.wlock(), repo.lock(), pullop.trmanager: | |
1459 | # This should ideally be in _pullbundle2(). However, it needs to run |
|
1472 | # This should ideally be in _pullbundle2(). However, it needs to run | |
1460 | # before discovery to avoid extra work. |
|
1473 | # before discovery to avoid extra work. | |
1461 | _maybeapplyclonebundle(pullop) |
|
1474 | _maybeapplyclonebundle(pullop) | |
1462 | streamclone.maybeperformlegacystreamclone(pullop) |
|
1475 | streamclone.maybeperformlegacystreamclone(pullop) | |
1463 | _pulldiscovery(pullop) |
|
1476 | _pulldiscovery(pullop) | |
1464 | if pullop.canusebundle2: |
|
1477 | if pullop.canusebundle2: | |
1465 | _fullpullbundle2(repo, pullop) |
|
1478 | _fullpullbundle2(repo, pullop) | |
1466 | _pullchangeset(pullop) |
|
1479 | _pullchangeset(pullop) | |
1467 | _pullphase(pullop) |
|
1480 | _pullphase(pullop) | |
1468 | _pullbookmarks(pullop) |
|
1481 | _pullbookmarks(pullop) | |
1469 | _pullobsolete(pullop) |
|
1482 | _pullobsolete(pullop) | |
1470 |
|
1483 | |||
1471 | # storing remotenames |
|
1484 | # storing remotenames | |
1472 | if repo.ui.configbool('experimental', 'remotenames'): |
|
1485 | if repo.ui.configbool('experimental', 'remotenames'): | |
1473 | logexchange.pullremotenames(repo, remote) |
|
1486 | logexchange.pullremotenames(repo, remote) | |
1474 |
|
1487 | |||
1475 | return pullop |
|
1488 | return pullop | |
1476 |
|
1489 | |||
1477 | # list of steps to perform discovery before pull |
|
1490 | # list of steps to perform discovery before pull | |
1478 | pulldiscoveryorder = [] |
|
1491 | pulldiscoveryorder = [] | |
1479 |
|
1492 | |||
1480 | # Mapping between step name and function |
|
1493 | # Mapping between step name and function | |
1481 | # |
|
1494 | # | |
1482 | # This exists to help extensions wrap steps if necessary |
|
1495 | # This exists to help extensions wrap steps if necessary | |
1483 | pulldiscoverymapping = {} |
|
1496 | pulldiscoverymapping = {} | |
1484 |
|
1497 | |||
1485 | def pulldiscovery(stepname): |
|
1498 | def pulldiscovery(stepname): | |
1486 | """decorator for function performing discovery before pull |
|
1499 | """decorator for function performing discovery before pull | |
1487 |
|
1500 | |||
1488 | The function is added to the step -> function mapping and appended to the |
|
1501 | The function is added to the step -> function mapping and appended to the | |
1489 | list of steps. Beware that decorated function will be added in order (this |
|
1502 | list of steps. Beware that decorated function will be added in order (this | |
1490 | may matter). |
|
1503 | may matter). | |
1491 |
|
1504 | |||
1492 | You can only use this decorator for a new step, if you want to wrap a step |
|
1505 | You can only use this decorator for a new step, if you want to wrap a step | |
1493 | from an extension, change the pulldiscovery dictionary directly.""" |
|
1506 | from an extension, change the pulldiscovery dictionary directly.""" | |
1494 | def dec(func): |
|
1507 | def dec(func): | |
1495 | assert stepname not in pulldiscoverymapping |
|
1508 | assert stepname not in pulldiscoverymapping | |
1496 | pulldiscoverymapping[stepname] = func |
|
1509 | pulldiscoverymapping[stepname] = func | |
1497 | pulldiscoveryorder.append(stepname) |
|
1510 | pulldiscoveryorder.append(stepname) | |
1498 | return func |
|
1511 | return func | |
1499 | return dec |
|
1512 | return dec | |
1500 |
|
1513 | |||
1501 | def _pulldiscovery(pullop): |
|
1514 | def _pulldiscovery(pullop): | |
1502 | """Run all discovery steps""" |
|
1515 | """Run all discovery steps""" | |
1503 | for stepname in pulldiscoveryorder: |
|
1516 | for stepname in pulldiscoveryorder: | |
1504 | step = pulldiscoverymapping[stepname] |
|
1517 | step = pulldiscoverymapping[stepname] | |
1505 | step(pullop) |
|
1518 | step(pullop) | |
1506 |
|
1519 | |||
1507 | @pulldiscovery('b1:bookmarks') |
|
1520 | @pulldiscovery('b1:bookmarks') | |
1508 | def _pullbookmarkbundle1(pullop): |
|
1521 | def _pullbookmarkbundle1(pullop): | |
1509 | """fetch bookmark data in bundle1 case |
|
1522 | """fetch bookmark data in bundle1 case | |
1510 |
|
1523 | |||
1511 | If not using bundle2, we have to fetch bookmarks before changeset |
|
1524 | If not using bundle2, we have to fetch bookmarks before changeset | |
1512 | discovery to reduce the chance and impact of race conditions.""" |
|
1525 | discovery to reduce the chance and impact of race conditions.""" | |
1513 | if pullop.remotebookmarks is not None: |
|
1526 | if pullop.remotebookmarks is not None: | |
1514 | return |
|
1527 | return | |
1515 | if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps: |
|
1528 | if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps: | |
1516 | # all known bundle2 servers now support listkeys, but lets be nice with |
|
1529 | # all known bundle2 servers now support listkeys, but lets be nice with | |
1517 | # new implementation. |
|
1530 | # new implementation. | |
1518 | return |
|
1531 | return | |
1519 | books = pullop.remote.listkeys('bookmarks') |
|
1532 | books = pullop.remote.listkeys('bookmarks') | |
1520 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) |
|
1533 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(books) | |
1521 |
|
1534 | |||
1522 |
|
1535 | |||
1523 | @pulldiscovery('changegroup') |
|
1536 | @pulldiscovery('changegroup') | |
1524 | def _pulldiscoverychangegroup(pullop): |
|
1537 | def _pulldiscoverychangegroup(pullop): | |
1525 | """discovery phase for the pull |
|
1538 | """discovery phase for the pull | |
1526 |
|
1539 | |||
1527 | Current handle changeset discovery only, will change handle all discovery |
|
1540 | Current handle changeset discovery only, will change handle all discovery | |
1528 | at some point.""" |
|
1541 | at some point.""" | |
1529 | tmp = discovery.findcommonincoming(pullop.repo, |
|
1542 | tmp = discovery.findcommonincoming(pullop.repo, | |
1530 | pullop.remote, |
|
1543 | pullop.remote, | |
1531 | heads=pullop.heads, |
|
1544 | heads=pullop.heads, | |
1532 | force=pullop.force) |
|
1545 | force=pullop.force) | |
1533 | common, fetch, rheads = tmp |
|
1546 | common, fetch, rheads = tmp | |
1534 | nm = pullop.repo.unfiltered().changelog.nodemap |
|
1547 | nm = pullop.repo.unfiltered().changelog.nodemap | |
1535 | if fetch and rheads: |
|
1548 | if fetch and rheads: | |
1536 | # If a remote heads is filtered locally, put in back in common. |
|
1549 | # If a remote heads is filtered locally, put in back in common. | |
1537 | # |
|
1550 | # | |
1538 | # This is a hackish solution to catch most of "common but locally |
|
1551 | # This is a hackish solution to catch most of "common but locally | |
1539 | # hidden situation". We do not performs discovery on unfiltered |
|
1552 | # hidden situation". We do not performs discovery on unfiltered | |
1540 | # repository because it end up doing a pathological amount of round |
|
1553 | # repository because it end up doing a pathological amount of round | |
1541 | # trip for w huge amount of changeset we do not care about. |
|
1554 | # trip for w huge amount of changeset we do not care about. | |
1542 | # |
|
1555 | # | |
1543 | # If a set of such "common but filtered" changeset exist on the server |
|
1556 | # If a set of such "common but filtered" changeset exist on the server | |
1544 | # but are not including a remote heads, we'll not be able to detect it, |
|
1557 | # but are not including a remote heads, we'll not be able to detect it, | |
1545 | scommon = set(common) |
|
1558 | scommon = set(common) | |
1546 | for n in rheads: |
|
1559 | for n in rheads: | |
1547 | if n in nm: |
|
1560 | if n in nm: | |
1548 | if n not in scommon: |
|
1561 | if n not in scommon: | |
1549 | common.append(n) |
|
1562 | common.append(n) | |
1550 | if set(rheads).issubset(set(common)): |
|
1563 | if set(rheads).issubset(set(common)): | |
1551 | fetch = [] |
|
1564 | fetch = [] | |
1552 | pullop.common = common |
|
1565 | pullop.common = common | |
1553 | pullop.fetch = fetch |
|
1566 | pullop.fetch = fetch | |
1554 | pullop.rheads = rheads |
|
1567 | pullop.rheads = rheads | |
1555 |
|
1568 | |||
1556 | def _pullbundle2(pullop): |
|
1569 | def _pullbundle2(pullop): | |
1557 | """pull data using bundle2 |
|
1570 | """pull data using bundle2 | |
1558 |
|
1571 | |||
1559 | For now, the only supported data are changegroup.""" |
|
1572 | For now, the only supported data are changegroup.""" | |
1560 | kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')} |
|
1573 | kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')} | |
1561 |
|
1574 | |||
1562 | # make ui easier to access |
|
1575 | # make ui easier to access | |
1563 | ui = pullop.repo.ui |
|
1576 | ui = pullop.repo.ui | |
1564 |
|
1577 | |||
1565 | # At the moment we don't do stream clones over bundle2. If that is |
|
1578 | # At the moment we don't do stream clones over bundle2. If that is | |
1566 | # implemented then here's where the check for that will go. |
|
1579 | # implemented then here's where the check for that will go. | |
1567 | streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] |
|
1580 | streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0] | |
1568 |
|
1581 | |||
1569 | # declare pull perimeters |
|
1582 | # declare pull perimeters | |
1570 | kwargs['common'] = pullop.common |
|
1583 | kwargs['common'] = pullop.common | |
1571 | kwargs['heads'] = pullop.heads or pullop.rheads |
|
1584 | kwargs['heads'] = pullop.heads or pullop.rheads | |
1572 |
|
1585 | |||
1573 | if streaming: |
|
1586 | if streaming: | |
1574 | kwargs['cg'] = False |
|
1587 | kwargs['cg'] = False | |
1575 | kwargs['stream'] = True |
|
1588 | kwargs['stream'] = True | |
1576 | pullop.stepsdone.add('changegroup') |
|
1589 | pullop.stepsdone.add('changegroup') | |
1577 | pullop.stepsdone.add('phases') |
|
1590 | pullop.stepsdone.add('phases') | |
1578 |
|
1591 | |||
1579 | else: |
|
1592 | else: | |
1580 | # pulling changegroup |
|
1593 | # pulling changegroup | |
1581 | pullop.stepsdone.add('changegroup') |
|
1594 | pullop.stepsdone.add('changegroup') | |
1582 |
|
1595 | |||
1583 | kwargs['cg'] = pullop.fetch |
|
1596 | kwargs['cg'] = pullop.fetch | |
1584 |
|
1597 | |||
1585 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') |
|
1598 | legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange') | |
1586 | hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) |
|
1599 | hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ()) | |
1587 | if (not legacyphase and hasbinaryphase): |
|
1600 | if (not legacyphase and hasbinaryphase): | |
1588 | kwargs['phases'] = True |
|
1601 | kwargs['phases'] = True | |
1589 | pullop.stepsdone.add('phases') |
|
1602 | pullop.stepsdone.add('phases') | |
1590 |
|
1603 | |||
1591 | if 'listkeys' in pullop.remotebundle2caps: |
|
1604 | if 'listkeys' in pullop.remotebundle2caps: | |
1592 | if 'phases' not in pullop.stepsdone: |
|
1605 | if 'phases' not in pullop.stepsdone: | |
1593 | kwargs['listkeys'] = ['phases'] |
|
1606 | kwargs['listkeys'] = ['phases'] | |
1594 |
|
1607 | |||
1595 | bookmarksrequested = False |
|
1608 | bookmarksrequested = False | |
1596 | legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') |
|
1609 | legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange') | |
1597 | hasbinarybook = 'bookmarks' in pullop.remotebundle2caps |
|
1610 | hasbinarybook = 'bookmarks' in pullop.remotebundle2caps | |
1598 |
|
1611 | |||
1599 | if pullop.remotebookmarks is not None: |
|
1612 | if pullop.remotebookmarks is not None: | |
1600 | pullop.stepsdone.add('request-bookmarks') |
|
1613 | pullop.stepsdone.add('request-bookmarks') | |
1601 |
|
1614 | |||
1602 | if ('request-bookmarks' not in pullop.stepsdone |
|
1615 | if ('request-bookmarks' not in pullop.stepsdone | |
1603 | and pullop.remotebookmarks is None |
|
1616 | and pullop.remotebookmarks is None | |
1604 | and not legacybookmark and hasbinarybook): |
|
1617 | and not legacybookmark and hasbinarybook): | |
1605 | kwargs['bookmarks'] = True |
|
1618 | kwargs['bookmarks'] = True | |
1606 | bookmarksrequested = True |
|
1619 | bookmarksrequested = True | |
1607 |
|
1620 | |||
1608 | if 'listkeys' in pullop.remotebundle2caps: |
|
1621 | if 'listkeys' in pullop.remotebundle2caps: | |
1609 | if 'request-bookmarks' not in pullop.stepsdone: |
|
1622 | if 'request-bookmarks' not in pullop.stepsdone: | |
1610 | # make sure to always includes bookmark data when migrating |
|
1623 | # make sure to always includes bookmark data when migrating | |
1611 | # `hg incoming --bundle` to using this function. |
|
1624 | # `hg incoming --bundle` to using this function. | |
1612 | pullop.stepsdone.add('request-bookmarks') |
|
1625 | pullop.stepsdone.add('request-bookmarks') | |
1613 | kwargs.setdefault('listkeys', []).append('bookmarks') |
|
1626 | kwargs.setdefault('listkeys', []).append('bookmarks') | |
1614 |
|
1627 | |||
1615 | # If this is a full pull / clone and the server supports the clone bundles |
|
1628 | # If this is a full pull / clone and the server supports the clone bundles | |
1616 | # feature, tell the server whether we attempted a clone bundle. The |
|
1629 | # feature, tell the server whether we attempted a clone bundle. The | |
1617 | # presence of this flag indicates the client supports clone bundles. This |
|
1630 | # presence of this flag indicates the client supports clone bundles. This | |
1618 | # will enable the server to treat clients that support clone bundles |
|
1631 | # will enable the server to treat clients that support clone bundles | |
1619 | # differently from those that don't. |
|
1632 | # differently from those that don't. | |
1620 | if (pullop.remote.capable('clonebundles') |
|
1633 | if (pullop.remote.capable('clonebundles') | |
1621 | and pullop.heads is None and list(pullop.common) == [nullid]): |
|
1634 | and pullop.heads is None and list(pullop.common) == [nullid]): | |
1622 | kwargs['cbattempted'] = pullop.clonebundleattempted |
|
1635 | kwargs['cbattempted'] = pullop.clonebundleattempted | |
1623 |
|
1636 | |||
1624 | if streaming: |
|
1637 | if streaming: | |
1625 | pullop.repo.ui.status(_('streaming all changes\n')) |
|
1638 | pullop.repo.ui.status(_('streaming all changes\n')) | |
1626 | elif not pullop.fetch: |
|
1639 | elif not pullop.fetch: | |
1627 | pullop.repo.ui.status(_("no changes found\n")) |
|
1640 | pullop.repo.ui.status(_("no changes found\n")) | |
1628 | pullop.cgresult = 0 |
|
1641 | pullop.cgresult = 0 | |
1629 | else: |
|
1642 | else: | |
1630 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1643 | if pullop.heads is None and list(pullop.common) == [nullid]: | |
1631 | pullop.repo.ui.status(_("requesting all changes\n")) |
|
1644 | pullop.repo.ui.status(_("requesting all changes\n")) | |
1632 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1645 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): | |
1633 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) |
|
1646 | remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps) | |
1634 | if obsolete.commonversion(remoteversions) is not None: |
|
1647 | if obsolete.commonversion(remoteversions) is not None: | |
1635 | kwargs['obsmarkers'] = True |
|
1648 | kwargs['obsmarkers'] = True | |
1636 | pullop.stepsdone.add('obsmarkers') |
|
1649 | pullop.stepsdone.add('obsmarkers') | |
1637 | _pullbundle2extraprepare(pullop, kwargs) |
|
1650 | _pullbundle2extraprepare(pullop, kwargs) | |
1638 | bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs)) |
|
1651 | bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs)) | |
1639 | try: |
|
1652 | try: | |
1640 | op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction, |
|
1653 | op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction, | |
1641 | source='pull') |
|
1654 | source='pull') | |
1642 | op.modes['bookmarks'] = 'records' |
|
1655 | op.modes['bookmarks'] = 'records' | |
1643 | bundle2.processbundle(pullop.repo, bundle, op=op) |
|
1656 | bundle2.processbundle(pullop.repo, bundle, op=op) | |
1644 | except bundle2.AbortFromPart as exc: |
|
1657 | except bundle2.AbortFromPart as exc: | |
1645 | pullop.repo.ui.status(_('remote: abort: %s\n') % exc) |
|
1658 | pullop.repo.ui.status(_('remote: abort: %s\n') % exc) | |
1646 | raise error.Abort(_('pull failed on remote'), hint=exc.hint) |
|
1659 | raise error.Abort(_('pull failed on remote'), hint=exc.hint) | |
1647 | except error.BundleValueError as exc: |
|
1660 | except error.BundleValueError as exc: | |
1648 | raise error.Abort(_('missing support for %s') % exc) |
|
1661 | raise error.Abort(_('missing support for %s') % exc) | |
1649 |
|
1662 | |||
1650 | if pullop.fetch: |
|
1663 | if pullop.fetch: | |
1651 | pullop.cgresult = bundle2.combinechangegroupresults(op) |
|
1664 | pullop.cgresult = bundle2.combinechangegroupresults(op) | |
1652 |
|
1665 | |||
1653 | # processing phases change |
|
1666 | # processing phases change | |
1654 | for namespace, value in op.records['listkeys']: |
|
1667 | for namespace, value in op.records['listkeys']: | |
1655 | if namespace == 'phases': |
|
1668 | if namespace == 'phases': | |
1656 | _pullapplyphases(pullop, value) |
|
1669 | _pullapplyphases(pullop, value) | |
1657 |
|
1670 | |||
1658 | # processing bookmark update |
|
1671 | # processing bookmark update | |
1659 | if bookmarksrequested: |
|
1672 | if bookmarksrequested: | |
1660 | books = {} |
|
1673 | books = {} | |
1661 | for record in op.records['bookmarks']: |
|
1674 | for record in op.records['bookmarks']: | |
1662 | books[record['bookmark']] = record["node"] |
|
1675 | books[record['bookmark']] = record["node"] | |
1663 | pullop.remotebookmarks = books |
|
1676 | pullop.remotebookmarks = books | |
1664 | else: |
|
1677 | else: | |
1665 | for namespace, value in op.records['listkeys']: |
|
1678 | for namespace, value in op.records['listkeys']: | |
1666 | if namespace == 'bookmarks': |
|
1679 | if namespace == 'bookmarks': | |
1667 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) |
|
1680 | pullop.remotebookmarks = bookmod.unhexlifybookmarks(value) | |
1668 |
|
1681 | |||
1669 | # bookmark data were either already there or pulled in the bundle |
|
1682 | # bookmark data were either already there or pulled in the bundle | |
1670 | if pullop.remotebookmarks is not None: |
|
1683 | if pullop.remotebookmarks is not None: | |
1671 | _pullbookmarks(pullop) |
|
1684 | _pullbookmarks(pullop) | |
1672 |
|
1685 | |||
1673 | def _pullbundle2extraprepare(pullop, kwargs): |
|
1686 | def _pullbundle2extraprepare(pullop, kwargs): | |
1674 | """hook function so that extensions can extend the getbundle call""" |
|
1687 | """hook function so that extensions can extend the getbundle call""" | |
1675 |
|
1688 | |||
1676 | def _pullchangeset(pullop): |
|
1689 | def _pullchangeset(pullop): | |
1677 | """pull changeset from unbundle into the local repo""" |
|
1690 | """pull changeset from unbundle into the local repo""" | |
1678 | # We delay the open of the transaction as late as possible so we |
|
1691 | # We delay the open of the transaction as late as possible so we | |
1679 | # don't open transaction for nothing or you break future useful |
|
1692 | # don't open transaction for nothing or you break future useful | |
1680 | # rollback call |
|
1693 | # rollback call | |
1681 | if 'changegroup' in pullop.stepsdone: |
|
1694 | if 'changegroup' in pullop.stepsdone: | |
1682 | return |
|
1695 | return | |
1683 | pullop.stepsdone.add('changegroup') |
|
1696 | pullop.stepsdone.add('changegroup') | |
1684 | if not pullop.fetch: |
|
1697 | if not pullop.fetch: | |
1685 | pullop.repo.ui.status(_("no changes found\n")) |
|
1698 | pullop.repo.ui.status(_("no changes found\n")) | |
1686 | pullop.cgresult = 0 |
|
1699 | pullop.cgresult = 0 | |
1687 | return |
|
1700 | return | |
1688 | tr = pullop.gettransaction() |
|
1701 | tr = pullop.gettransaction() | |
1689 | if pullop.heads is None and list(pullop.common) == [nullid]: |
|
1702 | if pullop.heads is None and list(pullop.common) == [nullid]: | |
1690 | pullop.repo.ui.status(_("requesting all changes\n")) |
|
1703 | pullop.repo.ui.status(_("requesting all changes\n")) | |
1691 | elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): |
|
1704 | elif pullop.heads is None and pullop.remote.capable('changegroupsubset'): | |
1692 | # issue1320, avoid a race if remote changed after discovery |
|
1705 | # issue1320, avoid a race if remote changed after discovery | |
1693 | pullop.heads = pullop.rheads |
|
1706 | pullop.heads = pullop.rheads | |
1694 |
|
1707 | |||
1695 | if pullop.remote.capable('getbundle'): |
|
1708 | if pullop.remote.capable('getbundle'): | |
1696 | # TODO: get bundlecaps from remote |
|
1709 | # TODO: get bundlecaps from remote | |
1697 | cg = pullop.remote.getbundle('pull', common=pullop.common, |
|
1710 | cg = pullop.remote.getbundle('pull', common=pullop.common, | |
1698 | heads=pullop.heads or pullop.rheads) |
|
1711 | heads=pullop.heads or pullop.rheads) | |
1699 | elif pullop.heads is None: |
|
1712 | elif pullop.heads is None: | |
1700 | with pullop.remote.commandexecutor() as e: |
|
1713 | with pullop.remote.commandexecutor() as e: | |
1701 | cg = e.callcommand('changegroup', { |
|
1714 | cg = e.callcommand('changegroup', { | |
1702 | 'nodes': pullop.fetch, |
|
1715 | 'nodes': pullop.fetch, | |
1703 | 'source': 'pull', |
|
1716 | 'source': 'pull', | |
1704 | }).result() |
|
1717 | }).result() | |
1705 |
|
1718 | |||
1706 | elif not pullop.remote.capable('changegroupsubset'): |
|
1719 | elif not pullop.remote.capable('changegroupsubset'): | |
1707 | raise error.Abort(_("partial pull cannot be done because " |
|
1720 | raise error.Abort(_("partial pull cannot be done because " | |
1708 | "other repository doesn't support " |
|
1721 | "other repository doesn't support " | |
1709 | "changegroupsubset.")) |
|
1722 | "changegroupsubset.")) | |
1710 | else: |
|
1723 | else: | |
1711 | with pullop.remote.commandexecutor() as e: |
|
1724 | with pullop.remote.commandexecutor() as e: | |
1712 | cg = e.callcommand('changegroupsubset', { |
|
1725 | cg = e.callcommand('changegroupsubset', { | |
1713 | 'bases': pullop.fetch, |
|
1726 | 'bases': pullop.fetch, | |
1714 | 'heads': pullop.heads, |
|
1727 | 'heads': pullop.heads, | |
1715 | 'source': 'pull', |
|
1728 | 'source': 'pull', | |
1716 | }).result() |
|
1729 | }).result() | |
1717 |
|
1730 | |||
1718 | bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull', |
|
1731 | bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull', | |
1719 | pullop.remote.url()) |
|
1732 | pullop.remote.url()) | |
1720 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) |
|
1733 | pullop.cgresult = bundle2.combinechangegroupresults(bundleop) | |
1721 |
|
1734 | |||
1722 | def _pullphase(pullop): |
|
1735 | def _pullphase(pullop): | |
1723 | # Get remote phases data from remote |
|
1736 | # Get remote phases data from remote | |
1724 | if 'phases' in pullop.stepsdone: |
|
1737 | if 'phases' in pullop.stepsdone: | |
1725 | return |
|
1738 | return | |
1726 | remotephases = pullop.remote.listkeys('phases') |
|
1739 | remotephases = pullop.remote.listkeys('phases') | |
1727 | _pullapplyphases(pullop, remotephases) |
|
1740 | _pullapplyphases(pullop, remotephases) | |
1728 |
|
1741 | |||
1729 | def _pullapplyphases(pullop, remotephases): |
|
1742 | def _pullapplyphases(pullop, remotephases): | |
1730 | """apply phase movement from observed remote state""" |
|
1743 | """apply phase movement from observed remote state""" | |
1731 | if 'phases' in pullop.stepsdone: |
|
1744 | if 'phases' in pullop.stepsdone: | |
1732 | return |
|
1745 | return | |
1733 | pullop.stepsdone.add('phases') |
|
1746 | pullop.stepsdone.add('phases') | |
1734 | publishing = bool(remotephases.get('publishing', False)) |
|
1747 | publishing = bool(remotephases.get('publishing', False)) | |
1735 | if remotephases and not publishing: |
|
1748 | if remotephases and not publishing: | |
1736 | # remote is new and non-publishing |
|
1749 | # remote is new and non-publishing | |
1737 | pheads, _dr = phases.analyzeremotephases(pullop.repo, |
|
1750 | pheads, _dr = phases.analyzeremotephases(pullop.repo, | |
1738 | pullop.pulledsubset, |
|
1751 | pullop.pulledsubset, | |
1739 | remotephases) |
|
1752 | remotephases) | |
1740 | dheads = pullop.pulledsubset |
|
1753 | dheads = pullop.pulledsubset | |
1741 | else: |
|
1754 | else: | |
1742 | # Remote is old or publishing all common changesets |
|
1755 | # Remote is old or publishing all common changesets | |
1743 | # should be seen as public |
|
1756 | # should be seen as public | |
1744 | pheads = pullop.pulledsubset |
|
1757 | pheads = pullop.pulledsubset | |
1745 | dheads = [] |
|
1758 | dheads = [] | |
1746 | unfi = pullop.repo.unfiltered() |
|
1759 | unfi = pullop.repo.unfiltered() | |
1747 | phase = unfi._phasecache.phase |
|
1760 | phase = unfi._phasecache.phase | |
1748 | rev = unfi.changelog.nodemap.get |
|
1761 | rev = unfi.changelog.nodemap.get | |
1749 | public = phases.public |
|
1762 | public = phases.public | |
1750 | draft = phases.draft |
|
1763 | draft = phases.draft | |
1751 |
|
1764 | |||
1752 | # exclude changesets already public locally and update the others |
|
1765 | # exclude changesets already public locally and update the others | |
1753 | pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] |
|
1766 | pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public] | |
1754 | if pheads: |
|
1767 | if pheads: | |
1755 | tr = pullop.gettransaction() |
|
1768 | tr = pullop.gettransaction() | |
1756 | phases.advanceboundary(pullop.repo, tr, public, pheads) |
|
1769 | phases.advanceboundary(pullop.repo, tr, public, pheads) | |
1757 |
|
1770 | |||
1758 | # exclude changesets already draft locally and update the others |
|
1771 | # exclude changesets already draft locally and update the others | |
1759 | dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] |
|
1772 | dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft] | |
1760 | if dheads: |
|
1773 | if dheads: | |
1761 | tr = pullop.gettransaction() |
|
1774 | tr = pullop.gettransaction() | |
1762 | phases.advanceboundary(pullop.repo, tr, draft, dheads) |
|
1775 | phases.advanceboundary(pullop.repo, tr, draft, dheads) | |
1763 |
|
1776 | |||
1764 | def _pullbookmarks(pullop): |
|
1777 | def _pullbookmarks(pullop): | |
1765 | """process the remote bookmark information to update the local one""" |
|
1778 | """process the remote bookmark information to update the local one""" | |
1766 | if 'bookmarks' in pullop.stepsdone: |
|
1779 | if 'bookmarks' in pullop.stepsdone: | |
1767 | return |
|
1780 | return | |
1768 | pullop.stepsdone.add('bookmarks') |
|
1781 | pullop.stepsdone.add('bookmarks') | |
1769 | repo = pullop.repo |
|
1782 | repo = pullop.repo | |
1770 | remotebookmarks = pullop.remotebookmarks |
|
1783 | remotebookmarks = pullop.remotebookmarks | |
1771 | bookmod.updatefromremote(repo.ui, repo, remotebookmarks, |
|
1784 | bookmod.updatefromremote(repo.ui, repo, remotebookmarks, | |
1772 | pullop.remote.url(), |
|
1785 | pullop.remote.url(), | |
1773 | pullop.gettransaction, |
|
1786 | pullop.gettransaction, | |
1774 | explicit=pullop.explicitbookmarks) |
|
1787 | explicit=pullop.explicitbookmarks) | |
1775 |
|
1788 | |||
1776 | def _pullobsolete(pullop): |
|
1789 | def _pullobsolete(pullop): | |
1777 | """utility function to pull obsolete markers from a remote |
|
1790 | """utility function to pull obsolete markers from a remote | |
1778 |
|
1791 | |||
1779 | The `gettransaction` is function that return the pull transaction, creating |
|
1792 | The `gettransaction` is function that return the pull transaction, creating | |
1780 | one if necessary. We return the transaction to inform the calling code that |
|
1793 | one if necessary. We return the transaction to inform the calling code that | |
1781 | a new transaction have been created (when applicable). |
|
1794 | a new transaction have been created (when applicable). | |
1782 |
|
1795 | |||
1783 | Exists mostly to allow overriding for experimentation purpose""" |
|
1796 | Exists mostly to allow overriding for experimentation purpose""" | |
1784 | if 'obsmarkers' in pullop.stepsdone: |
|
1797 | if 'obsmarkers' in pullop.stepsdone: | |
1785 | return |
|
1798 | return | |
1786 | pullop.stepsdone.add('obsmarkers') |
|
1799 | pullop.stepsdone.add('obsmarkers') | |
1787 | tr = None |
|
1800 | tr = None | |
1788 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): |
|
1801 | if obsolete.isenabled(pullop.repo, obsolete.exchangeopt): | |
1789 | pullop.repo.ui.debug('fetching remote obsolete markers\n') |
|
1802 | pullop.repo.ui.debug('fetching remote obsolete markers\n') | |
1790 | remoteobs = pullop.remote.listkeys('obsolete') |
|
1803 | remoteobs = pullop.remote.listkeys('obsolete') | |
1791 | if 'dump0' in remoteobs: |
|
1804 | if 'dump0' in remoteobs: | |
1792 | tr = pullop.gettransaction() |
|
1805 | tr = pullop.gettransaction() | |
1793 | markers = [] |
|
1806 | markers = [] | |
1794 | for key in sorted(remoteobs, reverse=True): |
|
1807 | for key in sorted(remoteobs, reverse=True): | |
1795 | if key.startswith('dump'): |
|
1808 | if key.startswith('dump'): | |
1796 | data = util.b85decode(remoteobs[key]) |
|
1809 | data = util.b85decode(remoteobs[key]) | |
1797 | version, newmarks = obsolete._readmarkers(data) |
|
1810 | version, newmarks = obsolete._readmarkers(data) | |
1798 | markers += newmarks |
|
1811 | markers += newmarks | |
1799 | if markers: |
|
1812 | if markers: | |
1800 | pullop.repo.obsstore.add(tr, markers) |
|
1813 | pullop.repo.obsstore.add(tr, markers) | |
1801 | pullop.repo.invalidatevolatilesets() |
|
1814 | pullop.repo.invalidatevolatilesets() | |
1802 | return tr |
|
1815 | return tr | |
1803 |
|
1816 | |||
1804 | def caps20to10(repo, role): |
|
1817 | def caps20to10(repo, role): | |
1805 | """return a set with appropriate options to use bundle20 during getbundle""" |
|
1818 | """return a set with appropriate options to use bundle20 during getbundle""" | |
1806 | caps = {'HG20'} |
|
1819 | caps = {'HG20'} | |
1807 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) |
|
1820 | capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role)) | |
1808 | caps.add('bundle2=' + urlreq.quote(capsblob)) |
|
1821 | caps.add('bundle2=' + urlreq.quote(capsblob)) | |
1809 | return caps |
|
1822 | return caps | |
1810 |
|
1823 | |||
1811 | # List of names of steps to perform for a bundle2 for getbundle, order matters. |
|
1824 | # List of names of steps to perform for a bundle2 for getbundle, order matters. | |
1812 | getbundle2partsorder = [] |
|
1825 | getbundle2partsorder = [] | |
1813 |
|
1826 | |||
1814 | # Mapping between step name and function |
|
1827 | # Mapping between step name and function | |
1815 | # |
|
1828 | # | |
1816 | # This exists to help extensions wrap steps if necessary |
|
1829 | # This exists to help extensions wrap steps if necessary | |
1817 | getbundle2partsmapping = {} |
|
1830 | getbundle2partsmapping = {} | |
1818 |
|
1831 | |||
1819 | def getbundle2partsgenerator(stepname, idx=None): |
|
1832 | def getbundle2partsgenerator(stepname, idx=None): | |
1820 | """decorator for function generating bundle2 part for getbundle |
|
1833 | """decorator for function generating bundle2 part for getbundle | |
1821 |
|
1834 | |||
1822 | The function is added to the step -> function mapping and appended to the |
|
1835 | The function is added to the step -> function mapping and appended to the | |
1823 | list of steps. Beware that decorated functions will be added in order |
|
1836 | list of steps. Beware that decorated functions will be added in order | |
1824 | (this may matter). |
|
1837 | (this may matter). | |
1825 |
|
1838 | |||
1826 | You can only use this decorator for new steps, if you want to wrap a step |
|
1839 | You can only use this decorator for new steps, if you want to wrap a step | |
1827 | from an extension, attack the getbundle2partsmapping dictionary directly.""" |
|
1840 | from an extension, attack the getbundle2partsmapping dictionary directly.""" | |
1828 | def dec(func): |
|
1841 | def dec(func): | |
1829 | assert stepname not in getbundle2partsmapping |
|
1842 | assert stepname not in getbundle2partsmapping | |
1830 | getbundle2partsmapping[stepname] = func |
|
1843 | getbundle2partsmapping[stepname] = func | |
1831 | if idx is None: |
|
1844 | if idx is None: | |
1832 | getbundle2partsorder.append(stepname) |
|
1845 | getbundle2partsorder.append(stepname) | |
1833 | else: |
|
1846 | else: | |
1834 | getbundle2partsorder.insert(idx, stepname) |
|
1847 | getbundle2partsorder.insert(idx, stepname) | |
1835 | return func |
|
1848 | return func | |
1836 | return dec |
|
1849 | return dec | |
1837 |
|
1850 | |||
1838 | def bundle2requested(bundlecaps): |
|
1851 | def bundle2requested(bundlecaps): | |
1839 | if bundlecaps is not None: |
|
1852 | if bundlecaps is not None: | |
1840 | return any(cap.startswith('HG2') for cap in bundlecaps) |
|
1853 | return any(cap.startswith('HG2') for cap in bundlecaps) | |
1841 | return False |
|
1854 | return False | |
1842 |
|
1855 | |||
1843 | def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None, |
|
1856 | def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None, | |
1844 | **kwargs): |
|
1857 | **kwargs): | |
1845 | """Return chunks constituting a bundle's raw data. |
|
1858 | """Return chunks constituting a bundle's raw data. | |
1846 |
|
1859 | |||
1847 | Could be a bundle HG10 or a bundle HG20 depending on bundlecaps |
|
1860 | Could be a bundle HG10 or a bundle HG20 depending on bundlecaps | |
1848 | passed. |
|
1861 | passed. | |
1849 |
|
1862 | |||
1850 | Returns a 2-tuple of a dict with metadata about the generated bundle |
|
1863 | Returns a 2-tuple of a dict with metadata about the generated bundle | |
1851 | and an iterator over raw chunks (of varying sizes). |
|
1864 | and an iterator over raw chunks (of varying sizes). | |
1852 | """ |
|
1865 | """ | |
1853 | kwargs = pycompat.byteskwargs(kwargs) |
|
1866 | kwargs = pycompat.byteskwargs(kwargs) | |
1854 | info = {} |
|
1867 | info = {} | |
1855 | usebundle2 = bundle2requested(bundlecaps) |
|
1868 | usebundle2 = bundle2requested(bundlecaps) | |
1856 | # bundle10 case |
|
1869 | # bundle10 case | |
1857 | if not usebundle2: |
|
1870 | if not usebundle2: | |
1858 | if bundlecaps and not kwargs.get('cg', True): |
|
1871 | if bundlecaps and not kwargs.get('cg', True): | |
1859 | raise ValueError(_('request for bundle10 must include changegroup')) |
|
1872 | raise ValueError(_('request for bundle10 must include changegroup')) | |
1860 |
|
1873 | |||
1861 | if kwargs: |
|
1874 | if kwargs: | |
1862 | raise ValueError(_('unsupported getbundle arguments: %s') |
|
1875 | raise ValueError(_('unsupported getbundle arguments: %s') | |
1863 | % ', '.join(sorted(kwargs.keys()))) |
|
1876 | % ', '.join(sorted(kwargs.keys()))) | |
1864 | outgoing = _computeoutgoing(repo, heads, common) |
|
1877 | outgoing = _computeoutgoing(repo, heads, common) | |
1865 | info['bundleversion'] = 1 |
|
1878 | info['bundleversion'] = 1 | |
1866 | return info, changegroup.makestream(repo, outgoing, '01', source, |
|
1879 | return info, changegroup.makestream(repo, outgoing, '01', source, | |
1867 | bundlecaps=bundlecaps) |
|
1880 | bundlecaps=bundlecaps) | |
1868 |
|
1881 | |||
1869 | # bundle20 case |
|
1882 | # bundle20 case | |
1870 | info['bundleversion'] = 2 |
|
1883 | info['bundleversion'] = 2 | |
1871 | b2caps = {} |
|
1884 | b2caps = {} | |
1872 | for bcaps in bundlecaps: |
|
1885 | for bcaps in bundlecaps: | |
1873 | if bcaps.startswith('bundle2='): |
|
1886 | if bcaps.startswith('bundle2='): | |
1874 | blob = urlreq.unquote(bcaps[len('bundle2='):]) |
|
1887 | blob = urlreq.unquote(bcaps[len('bundle2='):]) | |
1875 | b2caps.update(bundle2.decodecaps(blob)) |
|
1888 | b2caps.update(bundle2.decodecaps(blob)) | |
1876 | bundler = bundle2.bundle20(repo.ui, b2caps) |
|
1889 | bundler = bundle2.bundle20(repo.ui, b2caps) | |
1877 |
|
1890 | |||
1878 | kwargs['heads'] = heads |
|
1891 | kwargs['heads'] = heads | |
1879 | kwargs['common'] = common |
|
1892 | kwargs['common'] = common | |
1880 |
|
1893 | |||
1881 | for name in getbundle2partsorder: |
|
1894 | for name in getbundle2partsorder: | |
1882 | func = getbundle2partsmapping[name] |
|
1895 | func = getbundle2partsmapping[name] | |
1883 | func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps, |
|
1896 | func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps, | |
1884 | **pycompat.strkwargs(kwargs)) |
|
1897 | **pycompat.strkwargs(kwargs)) | |
1885 |
|
1898 | |||
1886 | info['prefercompressed'] = bundler.prefercompressed |
|
1899 | info['prefercompressed'] = bundler.prefercompressed | |
1887 |
|
1900 | |||
1888 | return info, bundler.getchunks() |
|
1901 | return info, bundler.getchunks() | |
1889 |
|
1902 | |||
1890 | @getbundle2partsgenerator('stream2') |
|
1903 | @getbundle2partsgenerator('stream2') | |
1891 | def _getbundlestream2(bundler, repo, *args, **kwargs): |
|
1904 | def _getbundlestream2(bundler, repo, *args, **kwargs): | |
1892 | return bundle2.addpartbundlestream2(bundler, repo, **kwargs) |
|
1905 | return bundle2.addpartbundlestream2(bundler, repo, **kwargs) | |
1893 |
|
1906 | |||
1894 | @getbundle2partsgenerator('changegroup') |
|
1907 | @getbundle2partsgenerator('changegroup') | |
1895 | def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, |
|
1908 | def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, | |
1896 | b2caps=None, heads=None, common=None, **kwargs): |
|
1909 | b2caps=None, heads=None, common=None, **kwargs): | |
1897 | """add a changegroup part to the requested bundle""" |
|
1910 | """add a changegroup part to the requested bundle""" | |
1898 | cgstream = None |
|
1911 | cgstream = None | |
1899 | if kwargs.get(r'cg', True): |
|
1912 | if kwargs.get(r'cg', True): | |
1900 | # build changegroup bundle here. |
|
1913 | # build changegroup bundle here. | |
1901 | version = '01' |
|
1914 | version = '01' | |
1902 | cgversions = b2caps.get('changegroup') |
|
1915 | cgversions = b2caps.get('changegroup') | |
1903 | if cgversions: # 3.1 and 3.2 ship with an empty value |
|
1916 | if cgversions: # 3.1 and 3.2 ship with an empty value | |
1904 | cgversions = [v for v in cgversions |
|
1917 | cgversions = [v for v in cgversions | |
1905 | if v in changegroup.supportedoutgoingversions(repo)] |
|
1918 | if v in changegroup.supportedoutgoingversions(repo)] | |
1906 | if not cgversions: |
|
1919 | if not cgversions: | |
1907 | raise ValueError(_('no common changegroup version')) |
|
1920 | raise ValueError(_('no common changegroup version')) | |
1908 | version = max(cgversions) |
|
1921 | version = max(cgversions) | |
1909 | outgoing = _computeoutgoing(repo, heads, common) |
|
1922 | outgoing = _computeoutgoing(repo, heads, common) | |
1910 | if outgoing.missing: |
|
1923 | if outgoing.missing: | |
1911 | cgstream = changegroup.makestream(repo, outgoing, version, source, |
|
1924 | cgstream = changegroup.makestream(repo, outgoing, version, source, | |
1912 | bundlecaps=bundlecaps) |
|
1925 | bundlecaps=bundlecaps) | |
1913 |
|
1926 | |||
1914 | if cgstream: |
|
1927 | if cgstream: | |
1915 | part = bundler.newpart('changegroup', data=cgstream) |
|
1928 | part = bundler.newpart('changegroup', data=cgstream) | |
1916 | if cgversions: |
|
1929 | if cgversions: | |
1917 | part.addparam('version', version) |
|
1930 | part.addparam('version', version) | |
1918 | part.addparam('nbchanges', '%d' % len(outgoing.missing), |
|
1931 | part.addparam('nbchanges', '%d' % len(outgoing.missing), | |
1919 | mandatory=False) |
|
1932 | mandatory=False) | |
1920 | if 'treemanifest' in repo.requirements: |
|
1933 | if 'treemanifest' in repo.requirements: | |
1921 | part.addparam('treemanifest', '1') |
|
1934 | part.addparam('treemanifest', '1') | |
1922 |
|
1935 | |||
1923 | @getbundle2partsgenerator('bookmarks') |
|
1936 | @getbundle2partsgenerator('bookmarks') | |
1924 | def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None, |
|
1937 | def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None, | |
1925 | b2caps=None, **kwargs): |
|
1938 | b2caps=None, **kwargs): | |
1926 | """add a bookmark part to the requested bundle""" |
|
1939 | """add a bookmark part to the requested bundle""" | |
1927 | if not kwargs.get(r'bookmarks', False): |
|
1940 | if not kwargs.get(r'bookmarks', False): | |
1928 | return |
|
1941 | return | |
1929 | if 'bookmarks' not in b2caps: |
|
1942 | if 'bookmarks' not in b2caps: | |
1930 | raise ValueError(_('no common bookmarks exchange method')) |
|
1943 | raise ValueError(_('no common bookmarks exchange method')) | |
1931 | books = bookmod.listbinbookmarks(repo) |
|
1944 | books = bookmod.listbinbookmarks(repo) | |
1932 | data = bookmod.binaryencode(books) |
|
1945 | data = bookmod.binaryencode(books) | |
1933 | if data: |
|
1946 | if data: | |
1934 | bundler.newpart('bookmarks', data=data) |
|
1947 | bundler.newpart('bookmarks', data=data) | |
1935 |
|
1948 | |||
1936 | @getbundle2partsgenerator('listkeys') |
|
1949 | @getbundle2partsgenerator('listkeys') | |
1937 | def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None, |
|
1950 | def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None, | |
1938 | b2caps=None, **kwargs): |
|
1951 | b2caps=None, **kwargs): | |
1939 | """add parts containing listkeys namespaces to the requested bundle""" |
|
1952 | """add parts containing listkeys namespaces to the requested bundle""" | |
1940 | listkeys = kwargs.get(r'listkeys', ()) |
|
1953 | listkeys = kwargs.get(r'listkeys', ()) | |
1941 | for namespace in listkeys: |
|
1954 | for namespace in listkeys: | |
1942 | part = bundler.newpart('listkeys') |
|
1955 | part = bundler.newpart('listkeys') | |
1943 | part.addparam('namespace', namespace) |
|
1956 | part.addparam('namespace', namespace) | |
1944 | keys = repo.listkeys(namespace).items() |
|
1957 | keys = repo.listkeys(namespace).items() | |
1945 | part.data = pushkey.encodekeys(keys) |
|
1958 | part.data = pushkey.encodekeys(keys) | |
1946 |
|
1959 | |||
1947 | @getbundle2partsgenerator('obsmarkers') |
|
1960 | @getbundle2partsgenerator('obsmarkers') | |
1948 | def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None, |
|
1961 | def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None, | |
1949 | b2caps=None, heads=None, **kwargs): |
|
1962 | b2caps=None, heads=None, **kwargs): | |
1950 | """add an obsolescence markers part to the requested bundle""" |
|
1963 | """add an obsolescence markers part to the requested bundle""" | |
1951 | if kwargs.get(r'obsmarkers', False): |
|
1964 | if kwargs.get(r'obsmarkers', False): | |
1952 | if heads is None: |
|
1965 | if heads is None: | |
1953 | heads = repo.heads() |
|
1966 | heads = repo.heads() | |
1954 | subset = [c.node() for c in repo.set('::%ln', heads)] |
|
1967 | subset = [c.node() for c in repo.set('::%ln', heads)] | |
1955 | markers = repo.obsstore.relevantmarkers(subset) |
|
1968 | markers = repo.obsstore.relevantmarkers(subset) | |
1956 | markers = sorted(markers) |
|
1969 | markers = sorted(markers) | |
1957 | bundle2.buildobsmarkerspart(bundler, markers) |
|
1970 | bundle2.buildobsmarkerspart(bundler, markers) | |
1958 |
|
1971 | |||
1959 | @getbundle2partsgenerator('phases') |
|
1972 | @getbundle2partsgenerator('phases') | |
1960 | def _getbundlephasespart(bundler, repo, source, bundlecaps=None, |
|
1973 | def _getbundlephasespart(bundler, repo, source, bundlecaps=None, | |
1961 | b2caps=None, heads=None, **kwargs): |
|
1974 | b2caps=None, heads=None, **kwargs): | |
1962 | """add phase heads part to the requested bundle""" |
|
1975 | """add phase heads part to the requested bundle""" | |
1963 | if kwargs.get(r'phases', False): |
|
1976 | if kwargs.get(r'phases', False): | |
1964 | if not 'heads' in b2caps.get('phases'): |
|
1977 | if not 'heads' in b2caps.get('phases'): | |
1965 | raise ValueError(_('no common phases exchange method')) |
|
1978 | raise ValueError(_('no common phases exchange method')) | |
1966 | if heads is None: |
|
1979 | if heads is None: | |
1967 | heads = repo.heads() |
|
1980 | heads = repo.heads() | |
1968 |
|
1981 | |||
1969 | headsbyphase = collections.defaultdict(set) |
|
1982 | headsbyphase = collections.defaultdict(set) | |
1970 | if repo.publishing(): |
|
1983 | if repo.publishing(): | |
1971 | headsbyphase[phases.public] = heads |
|
1984 | headsbyphase[phases.public] = heads | |
1972 | else: |
|
1985 | else: | |
1973 | # find the appropriate heads to move |
|
1986 | # find the appropriate heads to move | |
1974 |
|
1987 | |||
1975 | phase = repo._phasecache.phase |
|
1988 | phase = repo._phasecache.phase | |
1976 | node = repo.changelog.node |
|
1989 | node = repo.changelog.node | |
1977 | rev = repo.changelog.rev |
|
1990 | rev = repo.changelog.rev | |
1978 | for h in heads: |
|
1991 | for h in heads: | |
1979 | headsbyphase[phase(repo, rev(h))].add(h) |
|
1992 | headsbyphase[phase(repo, rev(h))].add(h) | |
1980 | seenphases = list(headsbyphase.keys()) |
|
1993 | seenphases = list(headsbyphase.keys()) | |
1981 |
|
1994 | |||
1982 | # We do not handle anything but public and draft phase for now) |
|
1995 | # We do not handle anything but public and draft phase for now) | |
1983 | if seenphases: |
|
1996 | if seenphases: | |
1984 | assert max(seenphases) <= phases.draft |
|
1997 | assert max(seenphases) <= phases.draft | |
1985 |
|
1998 | |||
1986 | # if client is pulling non-public changesets, we need to find |
|
1999 | # if client is pulling non-public changesets, we need to find | |
1987 | # intermediate public heads. |
|
2000 | # intermediate public heads. | |
1988 | draftheads = headsbyphase.get(phases.draft, set()) |
|
2001 | draftheads = headsbyphase.get(phases.draft, set()) | |
1989 | if draftheads: |
|
2002 | if draftheads: | |
1990 | publicheads = headsbyphase.get(phases.public, set()) |
|
2003 | publicheads = headsbyphase.get(phases.public, set()) | |
1991 |
|
2004 | |||
1992 | revset = 'heads(only(%ln, %ln) and public())' |
|
2005 | revset = 'heads(only(%ln, %ln) and public())' | |
1993 | extraheads = repo.revs(revset, draftheads, publicheads) |
|
2006 | extraheads = repo.revs(revset, draftheads, publicheads) | |
1994 | for r in extraheads: |
|
2007 | for r in extraheads: | |
1995 | headsbyphase[phases.public].add(node(r)) |
|
2008 | headsbyphase[phases.public].add(node(r)) | |
1996 |
|
2009 | |||
1997 | # transform data in a format used by the encoding function |
|
2010 | # transform data in a format used by the encoding function | |
1998 | phasemapping = [] |
|
2011 | phasemapping = [] | |
1999 | for phase in phases.allphases: |
|
2012 | for phase in phases.allphases: | |
2000 | phasemapping.append(sorted(headsbyphase[phase])) |
|
2013 | phasemapping.append(sorted(headsbyphase[phase])) | |
2001 |
|
2014 | |||
2002 | # generate the actual part |
|
2015 | # generate the actual part | |
2003 | phasedata = phases.binaryencode(phasemapping) |
|
2016 | phasedata = phases.binaryencode(phasemapping) | |
2004 | bundler.newpart('phase-heads', data=phasedata) |
|
2017 | bundler.newpart('phase-heads', data=phasedata) | |
2005 |
|
2018 | |||
2006 | @getbundle2partsgenerator('hgtagsfnodes') |
|
2019 | @getbundle2partsgenerator('hgtagsfnodes') | |
2007 | def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None, |
|
2020 | def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None, | |
2008 | b2caps=None, heads=None, common=None, |
|
2021 | b2caps=None, heads=None, common=None, | |
2009 | **kwargs): |
|
2022 | **kwargs): | |
2010 | """Transfer the .hgtags filenodes mapping. |
|
2023 | """Transfer the .hgtags filenodes mapping. | |
2011 |
|
2024 | |||
2012 | Only values for heads in this bundle will be transferred. |
|
2025 | Only values for heads in this bundle will be transferred. | |
2013 |
|
2026 | |||
2014 | The part data consists of pairs of 20 byte changeset node and .hgtags |
|
2027 | The part data consists of pairs of 20 byte changeset node and .hgtags | |
2015 | filenodes raw values. |
|
2028 | filenodes raw values. | |
2016 | """ |
|
2029 | """ | |
2017 | # Don't send unless: |
|
2030 | # Don't send unless: | |
2018 | # - changeset are being exchanged, |
|
2031 | # - changeset are being exchanged, | |
2019 | # - the client supports it. |
|
2032 | # - the client supports it. | |
2020 | if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps): |
|
2033 | if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps): | |
2021 | return |
|
2034 | return | |
2022 |
|
2035 | |||
2023 | outgoing = _computeoutgoing(repo, heads, common) |
|
2036 | outgoing = _computeoutgoing(repo, heads, common) | |
2024 | bundle2.addparttagsfnodescache(repo, bundler, outgoing) |
|
2037 | bundle2.addparttagsfnodescache(repo, bundler, outgoing) | |
2025 |
|
2038 | |||
2026 | @getbundle2partsgenerator('cache:rev-branch-cache') |
|
2039 | @getbundle2partsgenerator('cache:rev-branch-cache') | |
2027 | def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None, |
|
2040 | def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None, | |
2028 | b2caps=None, heads=None, common=None, |
|
2041 | b2caps=None, heads=None, common=None, | |
2029 | **kwargs): |
|
2042 | **kwargs): | |
2030 | """Transfer the rev-branch-cache mapping |
|
2043 | """Transfer the rev-branch-cache mapping | |
2031 |
|
2044 | |||
2032 | The payload is a series of data related to each branch |
|
2045 | The payload is a series of data related to each branch | |
2033 |
|
2046 | |||
2034 | 1) branch name length |
|
2047 | 1) branch name length | |
2035 | 2) number of open heads |
|
2048 | 2) number of open heads | |
2036 | 3) number of closed heads |
|
2049 | 3) number of closed heads | |
2037 | 4) open heads nodes |
|
2050 | 4) open heads nodes | |
2038 | 5) closed heads nodes |
|
2051 | 5) closed heads nodes | |
2039 | """ |
|
2052 | """ | |
2040 | # Don't send unless: |
|
2053 | # Don't send unless: | |
2041 | # - changeset are being exchanged, |
|
2054 | # - changeset are being exchanged, | |
2042 | # - the client supports it. |
|
2055 | # - the client supports it. | |
2043 | if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps: |
|
2056 | if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps: | |
2044 | return |
|
2057 | return | |
2045 | outgoing = _computeoutgoing(repo, heads, common) |
|
2058 | outgoing = _computeoutgoing(repo, heads, common) | |
2046 | bundle2.addpartrevbranchcache(repo, bundler, outgoing) |
|
2059 | bundle2.addpartrevbranchcache(repo, bundler, outgoing) | |
2047 |
|
2060 | |||
2048 | def check_heads(repo, their_heads, context): |
|
2061 | def check_heads(repo, their_heads, context): | |
2049 | """check if the heads of a repo have been modified |
|
2062 | """check if the heads of a repo have been modified | |
2050 |
|
2063 | |||
2051 | Used by peer for unbundling. |
|
2064 | Used by peer for unbundling. | |
2052 | """ |
|
2065 | """ | |
2053 | heads = repo.heads() |
|
2066 | heads = repo.heads() | |
2054 | heads_hash = hashlib.sha1(''.join(sorted(heads))).digest() |
|
2067 | heads_hash = hashlib.sha1(''.join(sorted(heads))).digest() | |
2055 | if not (their_heads == ['force'] or their_heads == heads or |
|
2068 | if not (their_heads == ['force'] or their_heads == heads or | |
2056 | their_heads == ['hashed', heads_hash]): |
|
2069 | their_heads == ['hashed', heads_hash]): | |
2057 | # someone else committed/pushed/unbundled while we |
|
2070 | # someone else committed/pushed/unbundled while we | |
2058 | # were transferring data |
|
2071 | # were transferring data | |
2059 | raise error.PushRaced('repository changed while %s - ' |
|
2072 | raise error.PushRaced('repository changed while %s - ' | |
2060 | 'please try again' % context) |
|
2073 | 'please try again' % context) | |
2061 |
|
2074 | |||
2062 | def unbundle(repo, cg, heads, source, url): |
|
2075 | def unbundle(repo, cg, heads, source, url): | |
2063 | """Apply a bundle to a repo. |
|
2076 | """Apply a bundle to a repo. | |
2064 |
|
2077 | |||
2065 | this function makes sure the repo is locked during the application and have |
|
2078 | this function makes sure the repo is locked during the application and have | |
2066 | mechanism to check that no push race occurred between the creation of the |
|
2079 | mechanism to check that no push race occurred between the creation of the | |
2067 | bundle and its application. |
|
2080 | bundle and its application. | |
2068 |
|
2081 | |||
2069 | If the push was raced as PushRaced exception is raised.""" |
|
2082 | If the push was raced as PushRaced exception is raised.""" | |
2070 | r = 0 |
|
2083 | r = 0 | |
2071 | # need a transaction when processing a bundle2 stream |
|
2084 | # need a transaction when processing a bundle2 stream | |
2072 | # [wlock, lock, tr] - needs to be an array so nested functions can modify it |
|
2085 | # [wlock, lock, tr] - needs to be an array so nested functions can modify it | |
2073 | lockandtr = [None, None, None] |
|
2086 | lockandtr = [None, None, None] | |
2074 | recordout = None |
|
2087 | recordout = None | |
2075 | # quick fix for output mismatch with bundle2 in 3.4 |
|
2088 | # quick fix for output mismatch with bundle2 in 3.4 | |
2076 | captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture') |
|
2089 | captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture') | |
2077 | if url.startswith('remote:http:') or url.startswith('remote:https:'): |
|
2090 | if url.startswith('remote:http:') or url.startswith('remote:https:'): | |
2078 | captureoutput = True |
|
2091 | captureoutput = True | |
2079 | try: |
|
2092 | try: | |
2080 | # note: outside bundle1, 'heads' is expected to be empty and this |
|
2093 | # note: outside bundle1, 'heads' is expected to be empty and this | |
2081 | # 'check_heads' call wil be a no-op |
|
2094 | # 'check_heads' call wil be a no-op | |
2082 | check_heads(repo, heads, 'uploading changes') |
|
2095 | check_heads(repo, heads, 'uploading changes') | |
2083 | # push can proceed |
|
2096 | # push can proceed | |
2084 | if not isinstance(cg, bundle2.unbundle20): |
|
2097 | if not isinstance(cg, bundle2.unbundle20): | |
2085 | # legacy case: bundle1 (changegroup 01) |
|
2098 | # legacy case: bundle1 (changegroup 01) | |
2086 | txnname = "\n".join([source, util.hidepassword(url)]) |
|
2099 | txnname = "\n".join([source, util.hidepassword(url)]) | |
2087 | with repo.lock(), repo.transaction(txnname) as tr: |
|
2100 | with repo.lock(), repo.transaction(txnname) as tr: | |
2088 | op = bundle2.applybundle(repo, cg, tr, source, url) |
|
2101 | op = bundle2.applybundle(repo, cg, tr, source, url) | |
2089 | r = bundle2.combinechangegroupresults(op) |
|
2102 | r = bundle2.combinechangegroupresults(op) | |
2090 | else: |
|
2103 | else: | |
2091 | r = None |
|
2104 | r = None | |
2092 | try: |
|
2105 | try: | |
2093 | def gettransaction(): |
|
2106 | def gettransaction(): | |
2094 | if not lockandtr[2]: |
|
2107 | if not lockandtr[2]: | |
2095 | lockandtr[0] = repo.wlock() |
|
2108 | lockandtr[0] = repo.wlock() | |
2096 | lockandtr[1] = repo.lock() |
|
2109 | lockandtr[1] = repo.lock() | |
2097 | lockandtr[2] = repo.transaction(source) |
|
2110 | lockandtr[2] = repo.transaction(source) | |
2098 | lockandtr[2].hookargs['source'] = source |
|
2111 | lockandtr[2].hookargs['source'] = source | |
2099 | lockandtr[2].hookargs['url'] = url |
|
2112 | lockandtr[2].hookargs['url'] = url | |
2100 | lockandtr[2].hookargs['bundle2'] = '1' |
|
2113 | lockandtr[2].hookargs['bundle2'] = '1' | |
2101 | return lockandtr[2] |
|
2114 | return lockandtr[2] | |
2102 |
|
2115 | |||
2103 | # Do greedy locking by default until we're satisfied with lazy |
|
2116 | # Do greedy locking by default until we're satisfied with lazy | |
2104 | # locking. |
|
2117 | # locking. | |
2105 | if not repo.ui.configbool('experimental', 'bundle2lazylocking'): |
|
2118 | if not repo.ui.configbool('experimental', 'bundle2lazylocking'): | |
2106 | gettransaction() |
|
2119 | gettransaction() | |
2107 |
|
2120 | |||
2108 | op = bundle2.bundleoperation(repo, gettransaction, |
|
2121 | op = bundle2.bundleoperation(repo, gettransaction, | |
2109 | captureoutput=captureoutput, |
|
2122 | captureoutput=captureoutput, | |
2110 | source='push') |
|
2123 | source='push') | |
2111 | try: |
|
2124 | try: | |
2112 | op = bundle2.processbundle(repo, cg, op=op) |
|
2125 | op = bundle2.processbundle(repo, cg, op=op) | |
2113 | finally: |
|
2126 | finally: | |
2114 | r = op.reply |
|
2127 | r = op.reply | |
2115 | if captureoutput and r is not None: |
|
2128 | if captureoutput and r is not None: | |
2116 | repo.ui.pushbuffer(error=True, subproc=True) |
|
2129 | repo.ui.pushbuffer(error=True, subproc=True) | |
2117 | def recordout(output): |
|
2130 | def recordout(output): | |
2118 | r.newpart('output', data=output, mandatory=False) |
|
2131 | r.newpart('output', data=output, mandatory=False) | |
2119 | if lockandtr[2] is not None: |
|
2132 | if lockandtr[2] is not None: | |
2120 | lockandtr[2].close() |
|
2133 | lockandtr[2].close() | |
2121 | except BaseException as exc: |
|
2134 | except BaseException as exc: | |
2122 | exc.duringunbundle2 = True |
|
2135 | exc.duringunbundle2 = True | |
2123 | if captureoutput and r is not None: |
|
2136 | if captureoutput and r is not None: | |
2124 | parts = exc._bundle2salvagedoutput = r.salvageoutput() |
|
2137 | parts = exc._bundle2salvagedoutput = r.salvageoutput() | |
2125 | def recordout(output): |
|
2138 | def recordout(output): | |
2126 | part = bundle2.bundlepart('output', data=output, |
|
2139 | part = bundle2.bundlepart('output', data=output, | |
2127 | mandatory=False) |
|
2140 | mandatory=False) | |
2128 | parts.append(part) |
|
2141 | parts.append(part) | |
2129 | raise |
|
2142 | raise | |
2130 | finally: |
|
2143 | finally: | |
2131 | lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) |
|
2144 | lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0]) | |
2132 | if recordout is not None: |
|
2145 | if recordout is not None: | |
2133 | recordout(repo.ui.popbuffer()) |
|
2146 | recordout(repo.ui.popbuffer()) | |
2134 | return r |
|
2147 | return r | |
2135 |
|
2148 | |||
2136 | def _maybeapplyclonebundle(pullop): |
|
2149 | def _maybeapplyclonebundle(pullop): | |
2137 | """Apply a clone bundle from a remote, if possible.""" |
|
2150 | """Apply a clone bundle from a remote, if possible.""" | |
2138 |
|
2151 | |||
2139 | repo = pullop.repo |
|
2152 | repo = pullop.repo | |
2140 | remote = pullop.remote |
|
2153 | remote = pullop.remote | |
2141 |
|
2154 | |||
2142 | if not repo.ui.configbool('ui', 'clonebundles'): |
|
2155 | if not repo.ui.configbool('ui', 'clonebundles'): | |
2143 | return |
|
2156 | return | |
2144 |
|
2157 | |||
2145 | # Only run if local repo is empty. |
|
2158 | # Only run if local repo is empty. | |
2146 | if len(repo): |
|
2159 | if len(repo): | |
2147 | return |
|
2160 | return | |
2148 |
|
2161 | |||
2149 | if pullop.heads: |
|
2162 | if pullop.heads: | |
2150 | return |
|
2163 | return | |
2151 |
|
2164 | |||
2152 | if not remote.capable('clonebundles'): |
|
2165 | if not remote.capable('clonebundles'): | |
2153 | return |
|
2166 | return | |
2154 |
|
2167 | |||
2155 | res = remote._call('clonebundles') |
|
2168 | res = remote._call('clonebundles') | |
2156 |
|
2169 | |||
2157 | # If we call the wire protocol command, that's good enough to record the |
|
2170 | # If we call the wire protocol command, that's good enough to record the | |
2158 | # attempt. |
|
2171 | # attempt. | |
2159 | pullop.clonebundleattempted = True |
|
2172 | pullop.clonebundleattempted = True | |
2160 |
|
2173 | |||
2161 | entries = parseclonebundlesmanifest(repo, res) |
|
2174 | entries = parseclonebundlesmanifest(repo, res) | |
2162 | if not entries: |
|
2175 | if not entries: | |
2163 | repo.ui.note(_('no clone bundles available on remote; ' |
|
2176 | repo.ui.note(_('no clone bundles available on remote; ' | |
2164 | 'falling back to regular clone\n')) |
|
2177 | 'falling back to regular clone\n')) | |
2165 | return |
|
2178 | return | |
2166 |
|
2179 | |||
2167 | entries = filterclonebundleentries( |
|
2180 | entries = filterclonebundleentries( | |
2168 | repo, entries, streamclonerequested=pullop.streamclonerequested) |
|
2181 | repo, entries, streamclonerequested=pullop.streamclonerequested) | |
2169 |
|
2182 | |||
2170 | if not entries: |
|
2183 | if not entries: | |
2171 | # There is a thundering herd concern here. However, if a server |
|
2184 | # There is a thundering herd concern here. However, if a server | |
2172 | # operator doesn't advertise bundles appropriate for its clients, |
|
2185 | # operator doesn't advertise bundles appropriate for its clients, | |
2173 | # they deserve what's coming. Furthermore, from a client's |
|
2186 | # they deserve what's coming. Furthermore, from a client's | |
2174 | # perspective, no automatic fallback would mean not being able to |
|
2187 | # perspective, no automatic fallback would mean not being able to | |
2175 | # clone! |
|
2188 | # clone! | |
2176 | repo.ui.warn(_('no compatible clone bundles available on server; ' |
|
2189 | repo.ui.warn(_('no compatible clone bundles available on server; ' | |
2177 | 'falling back to regular clone\n')) |
|
2190 | 'falling back to regular clone\n')) | |
2178 | repo.ui.warn(_('(you may want to report this to the server ' |
|
2191 | repo.ui.warn(_('(you may want to report this to the server ' | |
2179 | 'operator)\n')) |
|
2192 | 'operator)\n')) | |
2180 | return |
|
2193 | return | |
2181 |
|
2194 | |||
2182 | entries = sortclonebundleentries(repo.ui, entries) |
|
2195 | entries = sortclonebundleentries(repo.ui, entries) | |
2183 |
|
2196 | |||
2184 | url = entries[0]['URL'] |
|
2197 | url = entries[0]['URL'] | |
2185 | repo.ui.status(_('applying clone bundle from %s\n') % url) |
|
2198 | repo.ui.status(_('applying clone bundle from %s\n') % url) | |
2186 | if trypullbundlefromurl(repo.ui, repo, url): |
|
2199 | if trypullbundlefromurl(repo.ui, repo, url): | |
2187 | repo.ui.status(_('finished applying clone bundle\n')) |
|
2200 | repo.ui.status(_('finished applying clone bundle\n')) | |
2188 | # Bundle failed. |
|
2201 | # Bundle failed. | |
2189 | # |
|
2202 | # | |
2190 | # We abort by default to avoid the thundering herd of |
|
2203 | # We abort by default to avoid the thundering herd of | |
2191 | # clients flooding a server that was expecting expensive |
|
2204 | # clients flooding a server that was expecting expensive | |
2192 | # clone load to be offloaded. |
|
2205 | # clone load to be offloaded. | |
2193 | elif repo.ui.configbool('ui', 'clonebundlefallback'): |
|
2206 | elif repo.ui.configbool('ui', 'clonebundlefallback'): | |
2194 | repo.ui.warn(_('falling back to normal clone\n')) |
|
2207 | repo.ui.warn(_('falling back to normal clone\n')) | |
2195 | else: |
|
2208 | else: | |
2196 | raise error.Abort(_('error applying bundle'), |
|
2209 | raise error.Abort(_('error applying bundle'), | |
2197 | hint=_('if this error persists, consider contacting ' |
|
2210 | hint=_('if this error persists, consider contacting ' | |
2198 | 'the server operator or disable clone ' |
|
2211 | 'the server operator or disable clone ' | |
2199 | 'bundles via ' |
|
2212 | 'bundles via ' | |
2200 | '"--config ui.clonebundles=false"')) |
|
2213 | '"--config ui.clonebundles=false"')) | |
2201 |
|
2214 | |||
2202 | def parseclonebundlesmanifest(repo, s): |
|
2215 | def parseclonebundlesmanifest(repo, s): | |
2203 | """Parses the raw text of a clone bundles manifest. |
|
2216 | """Parses the raw text of a clone bundles manifest. | |
2204 |
|
2217 | |||
2205 | Returns a list of dicts. The dicts have a ``URL`` key corresponding |
|
2218 | Returns a list of dicts. The dicts have a ``URL`` key corresponding | |
2206 | to the URL and other keys are the attributes for the entry. |
|
2219 | to the URL and other keys are the attributes for the entry. | |
2207 | """ |
|
2220 | """ | |
2208 | m = [] |
|
2221 | m = [] | |
2209 | for line in s.splitlines(): |
|
2222 | for line in s.splitlines(): | |
2210 | fields = line.split() |
|
2223 | fields = line.split() | |
2211 | if not fields: |
|
2224 | if not fields: | |
2212 | continue |
|
2225 | continue | |
2213 | attrs = {'URL': fields[0]} |
|
2226 | attrs = {'URL': fields[0]} | |
2214 | for rawattr in fields[1:]: |
|
2227 | for rawattr in fields[1:]: | |
2215 | key, value = rawattr.split('=', 1) |
|
2228 | key, value = rawattr.split('=', 1) | |
2216 | key = urlreq.unquote(key) |
|
2229 | key = urlreq.unquote(key) | |
2217 | value = urlreq.unquote(value) |
|
2230 | value = urlreq.unquote(value) | |
2218 | attrs[key] = value |
|
2231 | attrs[key] = value | |
2219 |
|
2232 | |||
2220 | # Parse BUNDLESPEC into components. This makes client-side |
|
2233 | # Parse BUNDLESPEC into components. This makes client-side | |
2221 | # preferences easier to specify since you can prefer a single |
|
2234 | # preferences easier to specify since you can prefer a single | |
2222 | # component of the BUNDLESPEC. |
|
2235 | # component of the BUNDLESPEC. | |
2223 | if key == 'BUNDLESPEC': |
|
2236 | if key == 'BUNDLESPEC': | |
2224 | try: |
|
2237 | try: | |
2225 | bundlespec = parsebundlespec(repo, value, |
|
2238 | bundlespec = parsebundlespec(repo, value, | |
2226 | externalnames=True) |
|
2239 | externalnames=True) | |
2227 | attrs['COMPRESSION'] = bundlespec.compression |
|
2240 | attrs['COMPRESSION'] = bundlespec.compression | |
2228 | attrs['VERSION'] = bundlespec.version |
|
2241 | attrs['VERSION'] = bundlespec.version | |
2229 | except error.InvalidBundleSpecification: |
|
2242 | except error.InvalidBundleSpecification: | |
2230 | pass |
|
2243 | pass | |
2231 | except error.UnsupportedBundleSpecification: |
|
2244 | except error.UnsupportedBundleSpecification: | |
2232 | pass |
|
2245 | pass | |
2233 |
|
2246 | |||
2234 | m.append(attrs) |
|
2247 | m.append(attrs) | |
2235 |
|
2248 | |||
2236 | return m |
|
2249 | return m | |
2237 |
|
2250 | |||
2238 | def isstreamclonespec(bundlespec): |
|
2251 | def isstreamclonespec(bundlespec): | |
2239 | # Stream clone v1 |
|
2252 | # Stream clone v1 | |
2240 | if (bundlespec.compression == 'UN' and bundlespec.version == 's1'): |
|
2253 | if (bundlespec.compression == 'UN' and bundlespec.version == 's1'): | |
2241 | return True |
|
2254 | return True | |
2242 |
|
2255 | |||
2243 | # Stream clone v2 |
|
2256 | # Stream clone v2 | |
2244 | if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \ |
|
2257 | if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \ | |
2245 | bundlespec.contentopts.get('streamv2')): |
|
2258 | bundlespec.contentopts.get('streamv2')): | |
2246 | return True |
|
2259 | return True | |
2247 |
|
2260 | |||
2248 | return False |
|
2261 | return False | |
2249 |
|
2262 | |||
2250 | def filterclonebundleentries(repo, entries, streamclonerequested=False): |
|
2263 | def filterclonebundleentries(repo, entries, streamclonerequested=False): | |
2251 | """Remove incompatible clone bundle manifest entries. |
|
2264 | """Remove incompatible clone bundle manifest entries. | |
2252 |
|
2265 | |||
2253 | Accepts a list of entries parsed with ``parseclonebundlesmanifest`` |
|
2266 | Accepts a list of entries parsed with ``parseclonebundlesmanifest`` | |
2254 | and returns a new list consisting of only the entries that this client |
|
2267 | and returns a new list consisting of only the entries that this client | |
2255 | should be able to apply. |
|
2268 | should be able to apply. | |
2256 |
|
2269 | |||
2257 | There is no guarantee we'll be able to apply all returned entries because |
|
2270 | There is no guarantee we'll be able to apply all returned entries because | |
2258 | the metadata we use to filter on may be missing or wrong. |
|
2271 | the metadata we use to filter on may be missing or wrong. | |
2259 | """ |
|
2272 | """ | |
2260 | newentries = [] |
|
2273 | newentries = [] | |
2261 | for entry in entries: |
|
2274 | for entry in entries: | |
2262 | spec = entry.get('BUNDLESPEC') |
|
2275 | spec = entry.get('BUNDLESPEC') | |
2263 | if spec: |
|
2276 | if spec: | |
2264 | try: |
|
2277 | try: | |
2265 | bundlespec = parsebundlespec(repo, spec, strict=True) |
|
2278 | bundlespec = parsebundlespec(repo, spec, strict=True) | |
2266 |
|
2279 | |||
2267 | # If a stream clone was requested, filter out non-streamclone |
|
2280 | # If a stream clone was requested, filter out non-streamclone | |
2268 | # entries. |
|
2281 | # entries. | |
2269 | if streamclonerequested and not isstreamclonespec(bundlespec): |
|
2282 | if streamclonerequested and not isstreamclonespec(bundlespec): | |
2270 | repo.ui.debug('filtering %s because not a stream clone\n' % |
|
2283 | repo.ui.debug('filtering %s because not a stream clone\n' % | |
2271 | entry['URL']) |
|
2284 | entry['URL']) | |
2272 | continue |
|
2285 | continue | |
2273 |
|
2286 | |||
2274 | except error.InvalidBundleSpecification as e: |
|
2287 | except error.InvalidBundleSpecification as e: | |
2275 | repo.ui.debug(str(e) + '\n') |
|
2288 | repo.ui.debug(str(e) + '\n') | |
2276 | continue |
|
2289 | continue | |
2277 | except error.UnsupportedBundleSpecification as e: |
|
2290 | except error.UnsupportedBundleSpecification as e: | |
2278 | repo.ui.debug('filtering %s because unsupported bundle ' |
|
2291 | repo.ui.debug('filtering %s because unsupported bundle ' | |
2279 | 'spec: %s\n' % ( |
|
2292 | 'spec: %s\n' % ( | |
2280 | entry['URL'], stringutil.forcebytestr(e))) |
|
2293 | entry['URL'], stringutil.forcebytestr(e))) | |
2281 | continue |
|
2294 | continue | |
2282 | # If we don't have a spec and requested a stream clone, we don't know |
|
2295 | # If we don't have a spec and requested a stream clone, we don't know | |
2283 | # what the entry is so don't attempt to apply it. |
|
2296 | # what the entry is so don't attempt to apply it. | |
2284 | elif streamclonerequested: |
|
2297 | elif streamclonerequested: | |
2285 | repo.ui.debug('filtering %s because cannot determine if a stream ' |
|
2298 | repo.ui.debug('filtering %s because cannot determine if a stream ' | |
2286 | 'clone bundle\n' % entry['URL']) |
|
2299 | 'clone bundle\n' % entry['URL']) | |
2287 | continue |
|
2300 | continue | |
2288 |
|
2301 | |||
2289 | if 'REQUIRESNI' in entry and not sslutil.hassni: |
|
2302 | if 'REQUIRESNI' in entry and not sslutil.hassni: | |
2290 | repo.ui.debug('filtering %s because SNI not supported\n' % |
|
2303 | repo.ui.debug('filtering %s because SNI not supported\n' % | |
2291 | entry['URL']) |
|
2304 | entry['URL']) | |
2292 | continue |
|
2305 | continue | |
2293 |
|
2306 | |||
2294 | newentries.append(entry) |
|
2307 | newentries.append(entry) | |
2295 |
|
2308 | |||
2296 | return newentries |
|
2309 | return newentries | |
2297 |
|
2310 | |||
2298 | class clonebundleentry(object): |
|
2311 | class clonebundleentry(object): | |
2299 | """Represents an item in a clone bundles manifest. |
|
2312 | """Represents an item in a clone bundles manifest. | |
2300 |
|
2313 | |||
2301 | This rich class is needed to support sorting since sorted() in Python 3 |
|
2314 | This rich class is needed to support sorting since sorted() in Python 3 | |
2302 | doesn't support ``cmp`` and our comparison is complex enough that ``key=`` |
|
2315 | doesn't support ``cmp`` and our comparison is complex enough that ``key=`` | |
2303 | won't work. |
|
2316 | won't work. | |
2304 | """ |
|
2317 | """ | |
2305 |
|
2318 | |||
2306 | def __init__(self, value, prefers): |
|
2319 | def __init__(self, value, prefers): | |
2307 | self.value = value |
|
2320 | self.value = value | |
2308 | self.prefers = prefers |
|
2321 | self.prefers = prefers | |
2309 |
|
2322 | |||
2310 | def _cmp(self, other): |
|
2323 | def _cmp(self, other): | |
2311 | for prefkey, prefvalue in self.prefers: |
|
2324 | for prefkey, prefvalue in self.prefers: | |
2312 | avalue = self.value.get(prefkey) |
|
2325 | avalue = self.value.get(prefkey) | |
2313 | bvalue = other.value.get(prefkey) |
|
2326 | bvalue = other.value.get(prefkey) | |
2314 |
|
2327 | |||
2315 | # Special case for b missing attribute and a matches exactly. |
|
2328 | # Special case for b missing attribute and a matches exactly. | |
2316 | if avalue is not None and bvalue is None and avalue == prefvalue: |
|
2329 | if avalue is not None and bvalue is None and avalue == prefvalue: | |
2317 | return -1 |
|
2330 | return -1 | |
2318 |
|
2331 | |||
2319 | # Special case for a missing attribute and b matches exactly. |
|
2332 | # Special case for a missing attribute and b matches exactly. | |
2320 | if bvalue is not None and avalue is None and bvalue == prefvalue: |
|
2333 | if bvalue is not None and avalue is None and bvalue == prefvalue: | |
2321 | return 1 |
|
2334 | return 1 | |
2322 |
|
2335 | |||
2323 | # We can't compare unless attribute present on both. |
|
2336 | # We can't compare unless attribute present on both. | |
2324 | if avalue is None or bvalue is None: |
|
2337 | if avalue is None or bvalue is None: | |
2325 | continue |
|
2338 | continue | |
2326 |
|
2339 | |||
2327 | # Same values should fall back to next attribute. |
|
2340 | # Same values should fall back to next attribute. | |
2328 | if avalue == bvalue: |
|
2341 | if avalue == bvalue: | |
2329 | continue |
|
2342 | continue | |
2330 |
|
2343 | |||
2331 | # Exact matches come first. |
|
2344 | # Exact matches come first. | |
2332 | if avalue == prefvalue: |
|
2345 | if avalue == prefvalue: | |
2333 | return -1 |
|
2346 | return -1 | |
2334 | if bvalue == prefvalue: |
|
2347 | if bvalue == prefvalue: | |
2335 | return 1 |
|
2348 | return 1 | |
2336 |
|
2349 | |||
2337 | # Fall back to next attribute. |
|
2350 | # Fall back to next attribute. | |
2338 | continue |
|
2351 | continue | |
2339 |
|
2352 | |||
2340 | # If we got here we couldn't sort by attributes and prefers. Fall |
|
2353 | # If we got here we couldn't sort by attributes and prefers. Fall | |
2341 | # back to index order. |
|
2354 | # back to index order. | |
2342 | return 0 |
|
2355 | return 0 | |
2343 |
|
2356 | |||
2344 | def __lt__(self, other): |
|
2357 | def __lt__(self, other): | |
2345 | return self._cmp(other) < 0 |
|
2358 | return self._cmp(other) < 0 | |
2346 |
|
2359 | |||
2347 | def __gt__(self, other): |
|
2360 | def __gt__(self, other): | |
2348 | return self._cmp(other) > 0 |
|
2361 | return self._cmp(other) > 0 | |
2349 |
|
2362 | |||
2350 | def __eq__(self, other): |
|
2363 | def __eq__(self, other): | |
2351 | return self._cmp(other) == 0 |
|
2364 | return self._cmp(other) == 0 | |
2352 |
|
2365 | |||
2353 | def __le__(self, other): |
|
2366 | def __le__(self, other): | |
2354 | return self._cmp(other) <= 0 |
|
2367 | return self._cmp(other) <= 0 | |
2355 |
|
2368 | |||
2356 | def __ge__(self, other): |
|
2369 | def __ge__(self, other): | |
2357 | return self._cmp(other) >= 0 |
|
2370 | return self._cmp(other) >= 0 | |
2358 |
|
2371 | |||
2359 | def __ne__(self, other): |
|
2372 | def __ne__(self, other): | |
2360 | return self._cmp(other) != 0 |
|
2373 | return self._cmp(other) != 0 | |
2361 |
|
2374 | |||
2362 | def sortclonebundleentries(ui, entries): |
|
2375 | def sortclonebundleentries(ui, entries): | |
2363 | prefers = ui.configlist('ui', 'clonebundleprefers') |
|
2376 | prefers = ui.configlist('ui', 'clonebundleprefers') | |
2364 | if not prefers: |
|
2377 | if not prefers: | |
2365 | return list(entries) |
|
2378 | return list(entries) | |
2366 |
|
2379 | |||
2367 | prefers = [p.split('=', 1) for p in prefers] |
|
2380 | prefers = [p.split('=', 1) for p in prefers] | |
2368 |
|
2381 | |||
2369 | items = sorted(clonebundleentry(v, prefers) for v in entries) |
|
2382 | items = sorted(clonebundleentry(v, prefers) for v in entries) | |
2370 | return [i.value for i in items] |
|
2383 | return [i.value for i in items] | |
2371 |
|
2384 | |||
2372 | def trypullbundlefromurl(ui, repo, url): |
|
2385 | def trypullbundlefromurl(ui, repo, url): | |
2373 | """Attempt to apply a bundle from a URL.""" |
|
2386 | """Attempt to apply a bundle from a URL.""" | |
2374 | with repo.lock(), repo.transaction('bundleurl') as tr: |
|
2387 | with repo.lock(), repo.transaction('bundleurl') as tr: | |
2375 | try: |
|
2388 | try: | |
2376 | fh = urlmod.open(ui, url) |
|
2389 | fh = urlmod.open(ui, url) | |
2377 | cg = readbundle(ui, fh, 'stream') |
|
2390 | cg = readbundle(ui, fh, 'stream') | |
2378 |
|
2391 | |||
2379 | if isinstance(cg, streamclone.streamcloneapplier): |
|
2392 | if isinstance(cg, streamclone.streamcloneapplier): | |
2380 | cg.apply(repo) |
|
2393 | cg.apply(repo) | |
2381 | else: |
|
2394 | else: | |
2382 | bundle2.applybundle(repo, cg, tr, 'clonebundles', url) |
|
2395 | bundle2.applybundle(repo, cg, tr, 'clonebundles', url) | |
2383 | return True |
|
2396 | return True | |
2384 | except urlerr.httperror as e: |
|
2397 | except urlerr.httperror as e: | |
2385 | ui.warn(_('HTTP error fetching bundle: %s\n') % |
|
2398 | ui.warn(_('HTTP error fetching bundle: %s\n') % | |
2386 | stringutil.forcebytestr(e)) |
|
2399 | stringutil.forcebytestr(e)) | |
2387 | except urlerr.urlerror as e: |
|
2400 | except urlerr.urlerror as e: | |
2388 | ui.warn(_('error fetching bundle: %s\n') % |
|
2401 | ui.warn(_('error fetching bundle: %s\n') % | |
2389 | stringutil.forcebytestr(e.reason)) |
|
2402 | stringutil.forcebytestr(e.reason)) | |
2390 |
|
2403 | |||
2391 | return False |
|
2404 | return False |
General Comments 0
You need to be logged in to leave comments.
Login now