##// END OF EJS Templates
errors: name arguments to Abort constructor...
Martin von Zweigbergk -
r46274:d2e1dcd4 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1849 +1,1849
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 exthelper,
28 exthelper,
29 filemerge,
29 filemerge,
30 hg,
30 hg,
31 logcmdutil,
31 logcmdutil,
32 match as matchmod,
32 match as matchmod,
33 merge,
33 merge,
34 mergestate as mergestatemod,
34 mergestate as mergestatemod,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 subrepo,
39 subrepo,
40 upgrade,
40 upgrade,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from . import (
45 from . import (
46 lfcommands,
46 lfcommands,
47 lfutil,
47 lfutil,
48 storefactory,
48 storefactory,
49 )
49 )
50
50
51 eh = exthelper.exthelper()
51 eh = exthelper.exthelper()
52
52
53 lfstatus = lfutil.lfstatus
53 lfstatus = lfutil.lfstatus
54
54
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
55 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
56
56
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
57 # -- Utility functions: commonly/repeatedly needed functionality ---------------
58
58
59
59
60 def composelargefilematcher(match, manifest):
60 def composelargefilematcher(match, manifest):
61 '''create a matcher that matches only the largefiles in the original
61 '''create a matcher that matches only the largefiles in the original
62 matcher'''
62 matcher'''
63 m = copy.copy(match)
63 m = copy.copy(match)
64 lfile = lambda f: lfutil.standin(f) in manifest
64 lfile = lambda f: lfutil.standin(f) in manifest
65 m._files = [lf for lf in m._files if lfile(lf)]
65 m._files = [lf for lf in m._files if lfile(lf)]
66 m._fileset = set(m._files)
66 m._fileset = set(m._files)
67 m.always = lambda: False
67 m.always = lambda: False
68 origmatchfn = m.matchfn
68 origmatchfn = m.matchfn
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
69 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
70 return m
70 return m
71
71
72
72
73 def composenormalfilematcher(match, manifest, exclude=None):
73 def composenormalfilematcher(match, manifest, exclude=None):
74 excluded = set()
74 excluded = set()
75 if exclude is not None:
75 if exclude is not None:
76 excluded.update(exclude)
76 excluded.update(exclude)
77
77
78 m = copy.copy(match)
78 m = copy.copy(match)
79 notlfile = lambda f: not (
79 notlfile = lambda f: not (
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
80 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
81 )
81 )
82 m._files = [lf for lf in m._files if notlfile(lf)]
82 m._files = [lf for lf in m._files if notlfile(lf)]
83 m._fileset = set(m._files)
83 m._fileset = set(m._files)
84 m.always = lambda: False
84 m.always = lambda: False
85 origmatchfn = m.matchfn
85 origmatchfn = m.matchfn
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
86 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
87 return m
87 return m
88
88
89
89
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
90 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
91 large = opts.get('large')
91 large = opts.get('large')
92 lfsize = lfutil.getminsize(
92 lfsize = lfutil.getminsize(
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
93 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
94 )
94 )
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
98 lfpats = ui.configlist(lfutil.longname, b'patterns')
99 if lfpats:
99 if lfpats:
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
100 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = matcher
103 m = matcher
104
104
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
106 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # Don't warn the user when they attempt to add a normal tracked file.
112 # Don't warn the user when they attempt to add a normal tracked file.
113 # The normal add code will do that for us.
113 # The normal add code will do that for us.
114 if exact and exists:
114 if exact and exists:
115 if lfile:
115 if lfile:
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
116 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
117 continue
117 continue
118
118
119 if (exact or not exists) and not lfutil.isstandin(f):
119 if (exact or not exists) and not lfutil.isstandin(f):
120 # In case the file was removed previously, but not committed
120 # In case the file was removed previously, but not committed
121 # (issue3507)
121 # (issue3507)
122 if not repo.wvfs.exists(f):
122 if not repo.wvfs.exists(f):
123 continue
123 continue
124
124
125 abovemin = (
125 abovemin = (
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
126 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
127 )
127 )
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
128 if large or abovemin or (lfmatcher and lfmatcher(f)):
129 lfnames.append(f)
129 lfnames.append(f)
130 if ui.verbose or not exact:
130 if ui.verbose or not exact:
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
131 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
132
132
133 bad = []
133 bad = []
134
134
135 # Need to lock, otherwise there could be a race condition between
135 # Need to lock, otherwise there could be a race condition between
136 # when standins are created and added to the repo.
136 # when standins are created and added to the repo.
137 with repo.wlock():
137 with repo.wlock():
138 if not opts.get('dry_run'):
138 if not opts.get('dry_run'):
139 standins = []
139 standins = []
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 lfdirstate = lfutil.openlfdirstate(ui, repo)
141 for f in lfnames:
141 for f in lfnames:
142 standinname = lfutil.standin(f)
142 standinname = lfutil.standin(f)
143 lfutil.writestandin(
143 lfutil.writestandin(
144 repo,
144 repo,
145 standinname,
145 standinname,
146 hash=b'',
146 hash=b'',
147 executable=lfutil.getexecutable(repo.wjoin(f)),
147 executable=lfutil.getexecutable(repo.wjoin(f)),
148 )
148 )
149 standins.append(standinname)
149 standins.append(standinname)
150 if lfdirstate[f] == b'r':
150 if lfdirstate[f] == b'r':
151 lfdirstate.normallookup(f)
151 lfdirstate.normallookup(f)
152 else:
152 else:
153 lfdirstate.add(f)
153 lfdirstate.add(f)
154 lfdirstate.write()
154 lfdirstate.write()
155 bad += [
155 bad += [
156 lfutil.splitstandin(f)
156 lfutil.splitstandin(f)
157 for f in repo[None].add(standins)
157 for f in repo[None].add(standins)
158 if f in m.files()
158 if f in m.files()
159 ]
159 ]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 return added, bad
162 return added, bad
163
163
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 with lfstatus(repo):
168 with lfstatus(repo):
169 s = repo.status(match=m, clean=not isaddremove)
169 s = repo.status(match=m, clean=not isaddremove)
170 manifest = repo[None].manifest()
170 manifest = repo[None].manifest()
171 modified, added, deleted, clean = [
171 modified, added, deleted, clean = [
172 [f for f in list if lfutil.standin(f) in manifest]
172 [f for f in list if lfutil.standin(f) in manifest]
173 for list in (s.modified, s.added, s.deleted, s.clean)
173 for list in (s.modified, s.added, s.deleted, s.clean)
174 ]
174 ]
175
175
176 def warn(files, msg):
176 def warn(files, msg):
177 for f in files:
177 for f in files:
178 ui.warn(msg % uipathfn(f))
178 ui.warn(msg % uipathfn(f))
179 return int(len(files) > 0)
179 return int(len(files) > 0)
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(
183 result = warn(
184 modified + added + clean, _(b'not removing %s: file still exists\n')
184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 )
185 )
186 else:
186 else:
187 remove = deleted + clean
187 remove = deleted + clean
188 result = warn(
188 result = warn(
189 modified,
189 modified,
190 _(
190 _(
191 b'not removing %s: file is modified (use -f'
191 b'not removing %s: file is modified (use -f'
192 b' to force removal)\n'
192 b' to force removal)\n'
193 ),
193 ),
194 )
194 )
195 result = (
195 result = (
196 warn(
196 warn(
197 added,
197 added,
198 _(
198 _(
199 b'not removing %s: file has been marked for add'
199 b'not removing %s: file has been marked for add'
200 b' (use forget to undo)\n'
200 b' (use forget to undo)\n'
201 ),
201 ),
202 )
202 )
203 or result
203 or result
204 )
204 )
205
205
206 # Need to lock because standin files are deleted then removed from the
206 # Need to lock because standin files are deleted then removed from the
207 # repository and we could race in-between.
207 # repository and we could race in-between.
208 with repo.wlock():
208 with repo.wlock():
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 for f in sorted(remove):
210 for f in sorted(remove):
211 if ui.verbose or not m.exact(f):
211 if ui.verbose or not m.exact(f):
212 ui.status(_(b'removing %s\n') % uipathfn(f))
212 ui.status(_(b'removing %s\n') % uipathfn(f))
213
213
214 if not dryrun:
214 if not dryrun:
215 if not after:
215 if not after:
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217
217
218 if dryrun:
218 if dryrun:
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(
230 lfutil.synclfdirstate(
231 repo, lfdirstate, lfutil.splitstandin(f), False
231 repo, lfdirstate, lfutil.splitstandin(f), False
232 )
232 )
233
233
234 lfdirstate.write()
234 lfdirstate.write()
235
235
236 return result
236 return result
237
237
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 @eh.wrapfunction(webcommands, b'decodepath')
241 @eh.wrapfunction(webcommands, b'decodepath')
242 def decodepath(orig, path):
242 def decodepath(orig, path):
243 return lfutil.splitstandin(path) or path
243 return lfutil.splitstandin(path) or path
244
244
245
245
246 # -- Wrappers: modify existing commands --------------------------------
246 # -- Wrappers: modify existing commands --------------------------------
247
247
248
248
249 @eh.wrapcommand(
249 @eh.wrapcommand(
250 b'add',
250 b'add',
251 opts=[
251 opts=[
252 (b'', b'large', None, _(b'add as largefile')),
252 (b'', b'large', None, _(b'add as largefile')),
253 (b'', b'normal', None, _(b'add as normal file')),
253 (b'', b'normal', None, _(b'add as normal file')),
254 (
254 (
255 b'',
255 b'',
256 b'lfsize',
256 b'lfsize',
257 b'',
257 b'',
258 _(
258 _(
259 b'add all files above this size (in megabytes) '
259 b'add all files above this size (in megabytes) '
260 b'as largefiles (default: 10)'
260 b'as largefiles (default: 10)'
261 ),
261 ),
262 ),
262 ),
263 ],
263 ],
264 )
264 )
265 def overrideadd(orig, ui, repo, *pats, **opts):
265 def overrideadd(orig, ui, repo, *pats, **opts):
266 if opts.get('normal') and opts.get('large'):
266 if opts.get('normal') and opts.get('large'):
267 raise error.Abort(_(b'--normal cannot be used with --large'))
267 raise error.Abort(_(b'--normal cannot be used with --large'))
268 return orig(ui, repo, *pats, **opts)
268 return orig(ui, repo, *pats, **opts)
269
269
270
270
271 @eh.wrapfunction(cmdutil, b'add')
271 @eh.wrapfunction(cmdutil, b'add')
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
272 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
273 # The --normal flag short circuits this override
273 # The --normal flag short circuits this override
274 if opts.get('normal'):
274 if opts.get('normal'):
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
275 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
276
276
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
277 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
278 normalmatcher = composenormalfilematcher(
278 normalmatcher = composenormalfilematcher(
279 matcher, repo[None].manifest(), ladded
279 matcher, repo[None].manifest(), ladded
280 )
280 )
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
281 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
282
282
283 bad.extend(f for f in lbad)
283 bad.extend(f for f in lbad)
284 return bad
284 return bad
285
285
286
286
287 @eh.wrapfunction(cmdutil, b'remove')
287 @eh.wrapfunction(cmdutil, b'remove')
288 def cmdutilremove(
288 def cmdutilremove(
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
289 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
290 ):
290 ):
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
291 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
292 result = orig(
292 result = orig(
293 ui,
293 ui,
294 repo,
294 repo,
295 normalmatcher,
295 normalmatcher,
296 prefix,
296 prefix,
297 uipathfn,
297 uipathfn,
298 after,
298 after,
299 force,
299 force,
300 subrepos,
300 subrepos,
301 dryrun,
301 dryrun,
302 )
302 )
303 return (
303 return (
304 removelargefiles(
304 removelargefiles(
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
305 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
306 )
306 )
307 or result
307 or result
308 )
308 )
309
309
310
310
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
311 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
312 def overridestatusfn(orig, repo, rev2, **opts):
312 def overridestatusfn(orig, repo, rev2, **opts):
313 with lfstatus(repo._repo):
313 with lfstatus(repo._repo):
314 return orig(repo, rev2, **opts)
314 return orig(repo, rev2, **opts)
315
315
316
316
317 @eh.wrapcommand(b'status')
317 @eh.wrapcommand(b'status')
318 def overridestatus(orig, ui, repo, *pats, **opts):
318 def overridestatus(orig, ui, repo, *pats, **opts):
319 with lfstatus(repo):
319 with lfstatus(repo):
320 return orig(ui, repo, *pats, **opts)
320 return orig(ui, repo, *pats, **opts)
321
321
322
322
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
323 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
324 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
325 with lfstatus(repo._repo):
325 with lfstatus(repo._repo):
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
326 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
327
327
328
328
329 @eh.wrapcommand(b'log')
329 @eh.wrapcommand(b'log')
330 def overridelog(orig, ui, repo, *pats, **opts):
330 def overridelog(orig, ui, repo, *pats, **opts):
331 def overridematchandpats(
331 def overridematchandpats(
332 orig,
332 orig,
333 ctx,
333 ctx,
334 pats=(),
334 pats=(),
335 opts=None,
335 opts=None,
336 globbed=False,
336 globbed=False,
337 default=b'relpath',
337 default=b'relpath',
338 badfn=None,
338 badfn=None,
339 ):
339 ):
340 """Matcher that merges root directory with .hglf, suitable for log.
340 """Matcher that merges root directory with .hglf, suitable for log.
341 It is still possible to match .hglf directly.
341 It is still possible to match .hglf directly.
342 For any listed files run log on the standin too.
342 For any listed files run log on the standin too.
343 matchfn tries both the given filename and with .hglf stripped.
343 matchfn tries both the given filename and with .hglf stripped.
344 """
344 """
345 if opts is None:
345 if opts is None:
346 opts = {}
346 opts = {}
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
347 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
348 m, p = copy.copy(matchandpats)
348 m, p = copy.copy(matchandpats)
349
349
350 if m.always():
350 if m.always():
351 # We want to match everything anyway, so there's no benefit trying
351 # We want to match everything anyway, so there's no benefit trying
352 # to add standins.
352 # to add standins.
353 return matchandpats
353 return matchandpats
354
354
355 pats = set(p)
355 pats = set(p)
356
356
357 def fixpats(pat, tostandin=lfutil.standin):
357 def fixpats(pat, tostandin=lfutil.standin):
358 if pat.startswith(b'set:'):
358 if pat.startswith(b'set:'):
359 return pat
359 return pat
360
360
361 kindpat = matchmod._patsplit(pat, None)
361 kindpat = matchmod._patsplit(pat, None)
362
362
363 if kindpat[0] is not None:
363 if kindpat[0] is not None:
364 return kindpat[0] + b':' + tostandin(kindpat[1])
364 return kindpat[0] + b':' + tostandin(kindpat[1])
365 return tostandin(kindpat[1])
365 return tostandin(kindpat[1])
366
366
367 cwd = repo.getcwd()
367 cwd = repo.getcwd()
368 if cwd:
368 if cwd:
369 hglf = lfutil.shortname
369 hglf = lfutil.shortname
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
370 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
371
371
372 def tostandin(f):
372 def tostandin(f):
373 # The file may already be a standin, so truncate the back
373 # The file may already be a standin, so truncate the back
374 # prefix and test before mangling it. This avoids turning
374 # prefix and test before mangling it. This avoids turning
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
375 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
376 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
377 return f
377 return f
378
378
379 # An absolute path is from outside the repo, so truncate the
379 # An absolute path is from outside the repo, so truncate the
380 # path to the root before building the standin. Otherwise cwd
380 # path to the root before building the standin. Otherwise cwd
381 # is somewhere in the repo, relative to root, and needs to be
381 # is somewhere in the repo, relative to root, and needs to be
382 # prepended before building the standin.
382 # prepended before building the standin.
383 if os.path.isabs(cwd):
383 if os.path.isabs(cwd):
384 f = f[len(back) :]
384 f = f[len(back) :]
385 else:
385 else:
386 f = cwd + b'/' + f
386 f = cwd + b'/' + f
387 return back + lfutil.standin(f)
387 return back + lfutil.standin(f)
388
388
389 else:
389 else:
390
390
391 def tostandin(f):
391 def tostandin(f):
392 if lfutil.isstandin(f):
392 if lfutil.isstandin(f):
393 return f
393 return f
394 return lfutil.standin(f)
394 return lfutil.standin(f)
395
395
396 pats.update(fixpats(f, tostandin) for f in p)
396 pats.update(fixpats(f, tostandin) for f in p)
397
397
398 for i in range(0, len(m._files)):
398 for i in range(0, len(m._files)):
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
399 # Don't add '.hglf' to m.files, since that is already covered by '.'
400 if m._files[i] == b'.':
400 if m._files[i] == b'.':
401 continue
401 continue
402 standin = lfutil.standin(m._files[i])
402 standin = lfutil.standin(m._files[i])
403 # If the "standin" is a directory, append instead of replace to
403 # If the "standin" is a directory, append instead of replace to
404 # support naming a directory on the command line with only
404 # support naming a directory on the command line with only
405 # largefiles. The original directory is kept to support normal
405 # largefiles. The original directory is kept to support normal
406 # files.
406 # files.
407 if standin in ctx:
407 if standin in ctx:
408 m._files[i] = standin
408 m._files[i] = standin
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
409 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
410 m._files.append(standin)
410 m._files.append(standin)
411
411
412 m._fileset = set(m._files)
412 m._fileset = set(m._files)
413 m.always = lambda: False
413 m.always = lambda: False
414 origmatchfn = m.matchfn
414 origmatchfn = m.matchfn
415
415
416 def lfmatchfn(f):
416 def lfmatchfn(f):
417 lf = lfutil.splitstandin(f)
417 lf = lfutil.splitstandin(f)
418 if lf is not None and origmatchfn(lf):
418 if lf is not None and origmatchfn(lf):
419 return True
419 return True
420 r = origmatchfn(f)
420 r = origmatchfn(f)
421 return r
421 return r
422
422
423 m.matchfn = lfmatchfn
423 m.matchfn = lfmatchfn
424
424
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
425 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
426 return m, pats
426 return m, pats
427
427
428 # For hg log --patch, the match object is used in two different senses:
428 # For hg log --patch, the match object is used in two different senses:
429 # (1) to determine what revisions should be printed out, and
429 # (1) to determine what revisions should be printed out, and
430 # (2) to determine what files to print out diffs for.
430 # (2) to determine what files to print out diffs for.
431 # The magic matchandpats override should be used for case (1) but not for
431 # The magic matchandpats override should be used for case (1) but not for
432 # case (2).
432 # case (2).
433 oldmatchandpats = scmutil.matchandpats
433 oldmatchandpats = scmutil.matchandpats
434
434
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
435 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
436 wctx = repo[None]
436 wctx = repo[None]
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
437 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
438 return lambda ctx: match
438 return lambda ctx: match
439
439
440 wrappedmatchandpats = extensions.wrappedfunction(
440 wrappedmatchandpats = extensions.wrappedfunction(
441 scmutil, b'matchandpats', overridematchandpats
441 scmutil, b'matchandpats', overridematchandpats
442 )
442 )
443 wrappedmakefilematcher = extensions.wrappedfunction(
443 wrappedmakefilematcher = extensions.wrappedfunction(
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
444 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
445 )
445 )
446 with wrappedmatchandpats, wrappedmakefilematcher:
446 with wrappedmatchandpats, wrappedmakefilematcher:
447 return orig(ui, repo, *pats, **opts)
447 return orig(ui, repo, *pats, **opts)
448
448
449
449
450 @eh.wrapcommand(
450 @eh.wrapcommand(
451 b'verify',
451 b'verify',
452 opts=[
452 opts=[
453 (
453 (
454 b'',
454 b'',
455 b'large',
455 b'large',
456 None,
456 None,
457 _(b'verify that all largefiles in current revision exists'),
457 _(b'verify that all largefiles in current revision exists'),
458 ),
458 ),
459 (
459 (
460 b'',
460 b'',
461 b'lfa',
461 b'lfa',
462 None,
462 None,
463 _(b'verify largefiles in all revisions, not just current'),
463 _(b'verify largefiles in all revisions, not just current'),
464 ),
464 ),
465 (
465 (
466 b'',
466 b'',
467 b'lfc',
467 b'lfc',
468 None,
468 None,
469 _(b'verify local largefile contents, not just existence'),
469 _(b'verify local largefile contents, not just existence'),
470 ),
470 ),
471 ],
471 ],
472 )
472 )
473 def overrideverify(orig, ui, repo, *pats, **opts):
473 def overrideverify(orig, ui, repo, *pats, **opts):
474 large = opts.pop('large', False)
474 large = opts.pop('large', False)
475 all = opts.pop('lfa', False)
475 all = opts.pop('lfa', False)
476 contents = opts.pop('lfc', False)
476 contents = opts.pop('lfc', False)
477
477
478 result = orig(ui, repo, *pats, **opts)
478 result = orig(ui, repo, *pats, **opts)
479 if large or all or contents:
479 if large or all or contents:
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
480 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
481 return result
481 return result
482
482
483
483
484 @eh.wrapcommand(
484 @eh.wrapcommand(
485 b'debugstate',
485 b'debugstate',
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
486 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
487 )
487 )
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
488 def overridedebugstate(orig, ui, repo, *pats, **opts):
489 large = opts.pop('large', False)
489 large = opts.pop('large', False)
490 if large:
490 if large:
491
491
492 class fakerepo(object):
492 class fakerepo(object):
493 dirstate = lfutil.openlfdirstate(ui, repo)
493 dirstate = lfutil.openlfdirstate(ui, repo)
494
494
495 orig(ui, fakerepo, *pats, **opts)
495 orig(ui, fakerepo, *pats, **opts)
496 else:
496 else:
497 orig(ui, repo, *pats, **opts)
497 orig(ui, repo, *pats, **opts)
498
498
499
499
500 # Before starting the manifest merge, merge.updates will call
500 # Before starting the manifest merge, merge.updates will call
501 # _checkunknownfile to check if there are any files in the merged-in
501 # _checkunknownfile to check if there are any files in the merged-in
502 # changeset that collide with unknown files in the working copy.
502 # changeset that collide with unknown files in the working copy.
503 #
503 #
504 # The largefiles are seen as unknown, so this prevents us from merging
504 # The largefiles are seen as unknown, so this prevents us from merging
505 # in a file 'foo' if we already have a largefile with the same name.
505 # in a file 'foo' if we already have a largefile with the same name.
506 #
506 #
507 # The overridden function filters the unknown files by removing any
507 # The overridden function filters the unknown files by removing any
508 # largefiles. This makes the merge proceed and we can then handle this
508 # largefiles. This makes the merge proceed and we can then handle this
509 # case further in the overridden calculateupdates function below.
509 # case further in the overridden calculateupdates function below.
510 @eh.wrapfunction(merge, b'_checkunknownfile')
510 @eh.wrapfunction(merge, b'_checkunknownfile')
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
511 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
512 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
513 return False
513 return False
514 return origfn(repo, wctx, mctx, f, f2)
514 return origfn(repo, wctx, mctx, f, f2)
515
515
516
516
517 # The manifest merge handles conflicts on the manifest level. We want
517 # The manifest merge handles conflicts on the manifest level. We want
518 # to handle changes in largefile-ness of files at this level too.
518 # to handle changes in largefile-ness of files at this level too.
519 #
519 #
520 # The strategy is to run the original calculateupdates and then process
520 # The strategy is to run the original calculateupdates and then process
521 # the action list it outputs. There are two cases we need to deal with:
521 # the action list it outputs. There are two cases we need to deal with:
522 #
522 #
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
523 # 1. Normal file in p1, largefile in p2. Here the largefile is
524 # detected via its standin file, which will enter the working copy
524 # detected via its standin file, which will enter the working copy
525 # with a "get" action. It is not "merge" since the standin is all
525 # with a "get" action. It is not "merge" since the standin is all
526 # Mercurial is concerned with at this level -- the link to the
526 # Mercurial is concerned with at this level -- the link to the
527 # existing normal file is not relevant here.
527 # existing normal file is not relevant here.
528 #
528 #
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
529 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
530 # since the largefile will be present in the working copy and
530 # since the largefile will be present in the working copy and
531 # different from the normal file in p2. Mercurial therefore
531 # different from the normal file in p2. Mercurial therefore
532 # triggers a merge action.
532 # triggers a merge action.
533 #
533 #
534 # In both cases, we prompt the user and emit new actions to either
534 # In both cases, we prompt the user and emit new actions to either
535 # remove the standin (if the normal file was kept) or to remove the
535 # remove the standin (if the normal file was kept) or to remove the
536 # normal file and get the standin (if the largefile was kept). The
536 # normal file and get the standin (if the largefile was kept). The
537 # default prompt answer is to use the largefile version since it was
537 # default prompt answer is to use the largefile version since it was
538 # presumably changed on purpose.
538 # presumably changed on purpose.
539 #
539 #
540 # Finally, the merge.applyupdates function will then take care of
540 # Finally, the merge.applyupdates function will then take care of
541 # writing the files into the working copy and lfcommands.updatelfiles
541 # writing the files into the working copy and lfcommands.updatelfiles
542 # will update the largefiles.
542 # will update the largefiles.
543 @eh.wrapfunction(merge, b'calculateupdates')
543 @eh.wrapfunction(merge, b'calculateupdates')
544 def overridecalculateupdates(
544 def overridecalculateupdates(
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
545 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
546 ):
546 ):
547 overwrite = force and not branchmerge
547 overwrite = force and not branchmerge
548 mresult = origfn(
548 mresult = origfn(
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
549 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
550 )
550 )
551
551
552 if overwrite:
552 if overwrite:
553 return mresult
553 return mresult
554
554
555 # Convert to dictionary with filename as key and action as value.
555 # Convert to dictionary with filename as key and action as value.
556 lfiles = set()
556 lfiles = set()
557 for f in mresult.files():
557 for f in mresult.files():
558 splitstandin = lfutil.splitstandin(f)
558 splitstandin = lfutil.splitstandin(f)
559 if splitstandin is not None and splitstandin in p1:
559 if splitstandin is not None and splitstandin in p1:
560 lfiles.add(splitstandin)
560 lfiles.add(splitstandin)
561 elif lfutil.standin(f) in p1:
561 elif lfutil.standin(f) in p1:
562 lfiles.add(f)
562 lfiles.add(f)
563
563
564 for lfile in sorted(lfiles):
564 for lfile in sorted(lfiles):
565 standin = lfutil.standin(lfile)
565 standin = lfutil.standin(lfile)
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
566 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
567 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
568 if sm in (b'g', b'dc') and lm != b'r':
568 if sm in (b'g', b'dc') and lm != b'r':
569 if sm == b'dc':
569 if sm == b'dc':
570 f1, f2, fa, move, anc = sargs
570 f1, f2, fa, move, anc = sargs
571 sargs = (p2[f2].flags(), False)
571 sargs = (p2[f2].flags(), False)
572 # Case 1: normal file in the working copy, largefile in
572 # Case 1: normal file in the working copy, largefile in
573 # the second parent
573 # the second parent
574 usermsg = (
574 usermsg = (
575 _(
575 _(
576 b'remote turned local normal file %s into a largefile\n'
576 b'remote turned local normal file %s into a largefile\n'
577 b'use (l)argefile or keep (n)ormal file?'
577 b'use (l)argefile or keep (n)ormal file?'
578 b'$$ &Largefile $$ &Normal file'
578 b'$$ &Largefile $$ &Normal file'
579 )
579 )
580 % lfile
580 % lfile
581 )
581 )
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
582 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
583 mresult.addfile(lfile, b'r', None, b'replaced by standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
584 mresult.addfile(standin, b'g', sargs, b'replaces standin')
585 else: # keep local normal file
585 else: # keep local normal file
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
586 mresult.addfile(lfile, b'k', None, b'replaces standin')
587 if branchmerge:
587 if branchmerge:
588 mresult.addfile(
588 mresult.addfile(
589 standin, b'k', None, b'replaced by non-standin',
589 standin, b'k', None, b'replaced by non-standin',
590 )
590 )
591 else:
591 else:
592 mresult.addfile(
592 mresult.addfile(
593 standin, b'r', None, b'replaced by non-standin',
593 standin, b'r', None, b'replaced by non-standin',
594 )
594 )
595 elif lm in (b'g', b'dc') and sm != b'r':
595 elif lm in (b'g', b'dc') and sm != b'r':
596 if lm == b'dc':
596 if lm == b'dc':
597 f1, f2, fa, move, anc = largs
597 f1, f2, fa, move, anc = largs
598 largs = (p2[f2].flags(), False)
598 largs = (p2[f2].flags(), False)
599 # Case 2: largefile in the working copy, normal file in
599 # Case 2: largefile in the working copy, normal file in
600 # the second parent
600 # the second parent
601 usermsg = (
601 usermsg = (
602 _(
602 _(
603 b'remote turned local largefile %s into a normal file\n'
603 b'remote turned local largefile %s into a normal file\n'
604 b'keep (l)argefile or use (n)ormal file?'
604 b'keep (l)argefile or use (n)ormal file?'
605 b'$$ &Largefile $$ &Normal file'
605 b'$$ &Largefile $$ &Normal file'
606 )
606 )
607 % lfile
607 % lfile
608 )
608 )
609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
609 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
610 if branchmerge:
610 if branchmerge:
611 # largefile can be restored from standin safely
611 # largefile can be restored from standin safely
612 mresult.addfile(
612 mresult.addfile(
613 lfile, b'k', None, b'replaced by standin',
613 lfile, b'k', None, b'replaced by standin',
614 )
614 )
615 mresult.addfile(standin, b'k', None, b'replaces standin')
615 mresult.addfile(standin, b'k', None, b'replaces standin')
616 else:
616 else:
617 # "lfile" should be marked as "removed" without
617 # "lfile" should be marked as "removed" without
618 # removal of itself
618 # removal of itself
619 mresult.addfile(
619 mresult.addfile(
620 lfile,
620 lfile,
621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
621 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
622 None,
622 None,
623 b'forget non-standin largefile',
623 b'forget non-standin largefile',
624 )
624 )
625
625
626 # linear-merge should treat this largefile as 're-added'
626 # linear-merge should treat this largefile as 're-added'
627 mresult.addfile(standin, b'a', None, b'keep standin')
627 mresult.addfile(standin, b'a', None, b'keep standin')
628 else: # pick remote normal file
628 else: # pick remote normal file
629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
629 mresult.addfile(lfile, b'g', largs, b'replaces standin')
630 mresult.addfile(
630 mresult.addfile(
631 standin, b'r', None, b'replaced by non-standin',
631 standin, b'r', None, b'replaced by non-standin',
632 )
632 )
633
633
634 return mresult
634 return mresult
635
635
636
636
637 @eh.wrapfunction(mergestatemod, b'recordupdates')
637 @eh.wrapfunction(mergestatemod, b'recordupdates')
638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
638 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
639 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
640 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
641 for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
642 # this should be executed before 'orig', to execute 'remove'
642 # this should be executed before 'orig', to execute 'remove'
643 # before all other actions
643 # before all other actions
644 repo.dirstate.remove(lfile)
644 repo.dirstate.remove(lfile)
645 # make sure lfile doesn't get synclfdirstate'd as normal
645 # make sure lfile doesn't get synclfdirstate'd as normal
646 lfdirstate.add(lfile)
646 lfdirstate.add(lfile)
647 lfdirstate.write()
647 lfdirstate.write()
648
648
649 return orig(repo, actions, branchmerge, getfiledata)
649 return orig(repo, actions, branchmerge, getfiledata)
650
650
651
651
652 # Override filemerge to prompt the user about how they wish to merge
652 # Override filemerge to prompt the user about how they wish to merge
653 # largefiles. This will handle identical edits without prompting the user.
653 # largefiles. This will handle identical edits without prompting the user.
654 @eh.wrapfunction(filemerge, b'_filemerge')
654 @eh.wrapfunction(filemerge, b'_filemerge')
655 def overridefilemerge(
655 def overridefilemerge(
656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
656 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
657 ):
657 ):
658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
658 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
659 return origfn(
659 return origfn(
660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
660 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
661 )
661 )
662
662
663 ahash = lfutil.readasstandin(fca).lower()
663 ahash = lfutil.readasstandin(fca).lower()
664 dhash = lfutil.readasstandin(fcd).lower()
664 dhash = lfutil.readasstandin(fcd).lower()
665 ohash = lfutil.readasstandin(fco).lower()
665 ohash = lfutil.readasstandin(fco).lower()
666 if (
666 if (
667 ohash != ahash
667 ohash != ahash
668 and ohash != dhash
668 and ohash != dhash
669 and (
669 and (
670 dhash == ahash
670 dhash == ahash
671 or repo.ui.promptchoice(
671 or repo.ui.promptchoice(
672 _(
672 _(
673 b'largefile %s has a merge conflict\nancestor was %s\n'
673 b'largefile %s has a merge conflict\nancestor was %s\n'
674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
674 b'you can keep (l)ocal %s or take (o)ther %s.\n'
675 b'what do you want to do?'
675 b'what do you want to do?'
676 b'$$ &Local $$ &Other'
676 b'$$ &Local $$ &Other'
677 )
677 )
678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
678 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
679 0,
679 0,
680 )
680 )
681 == 1
681 == 1
682 )
682 )
683 ):
683 ):
684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
684 repo.wwrite(fcd.path(), fco.data(), fco.flags())
685 return True, 0, False
685 return True, 0, False
686
686
687
687
688 @eh.wrapfunction(copiesmod, b'pathcopies')
688 @eh.wrapfunction(copiesmod, b'pathcopies')
689 def copiespathcopies(orig, ctx1, ctx2, match=None):
689 def copiespathcopies(orig, ctx1, ctx2, match=None):
690 copies = orig(ctx1, ctx2, match=match)
690 copies = orig(ctx1, ctx2, match=match)
691 updated = {}
691 updated = {}
692
692
693 for k, v in pycompat.iteritems(copies):
693 for k, v in pycompat.iteritems(copies):
694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
694 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
695
695
696 return updated
696 return updated
697
697
698
698
699 # Copy first changes the matchers to match standins instead of
699 # Copy first changes the matchers to match standins instead of
700 # largefiles. Then it overrides util.copyfile in that function it
700 # largefiles. Then it overrides util.copyfile in that function it
701 # checks if the destination largefile already exists. It also keeps a
701 # checks if the destination largefile already exists. It also keeps a
702 # list of copied files so that the largefiles can be copied and the
702 # list of copied files so that the largefiles can be copied and the
703 # dirstate updated.
703 # dirstate updated.
704 @eh.wrapfunction(cmdutil, b'copy')
704 @eh.wrapfunction(cmdutil, b'copy')
705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
705 def overridecopy(orig, ui, repo, pats, opts, rename=False):
706 # doesn't remove largefile on rename
706 # doesn't remove largefile on rename
707 if len(pats) < 2:
707 if len(pats) < 2:
708 # this isn't legal, let the original function deal with it
708 # this isn't legal, let the original function deal with it
709 return orig(ui, repo, pats, opts, rename)
709 return orig(ui, repo, pats, opts, rename)
710
710
711 # This could copy both lfiles and normal files in one command,
711 # This could copy both lfiles and normal files in one command,
712 # but we don't want to do that. First replace their matcher to
712 # but we don't want to do that. First replace their matcher to
713 # only match normal files and run it, then replace it to just
713 # only match normal files and run it, then replace it to just
714 # match largefiles and run it again.
714 # match largefiles and run it again.
715 nonormalfiles = False
715 nonormalfiles = False
716 nolfiles = False
716 nolfiles = False
717 manifest = repo[None].manifest()
717 manifest = repo[None].manifest()
718
718
719 def normalfilesmatchfn(
719 def normalfilesmatchfn(
720 orig,
720 orig,
721 ctx,
721 ctx,
722 pats=(),
722 pats=(),
723 opts=None,
723 opts=None,
724 globbed=False,
724 globbed=False,
725 default=b'relpath',
725 default=b'relpath',
726 badfn=None,
726 badfn=None,
727 ):
727 ):
728 if opts is None:
728 if opts is None:
729 opts = {}
729 opts = {}
730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
730 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
731 return composenormalfilematcher(match, manifest)
731 return composenormalfilematcher(match, manifest)
732
732
733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
733 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
734 try:
734 try:
735 result = orig(ui, repo, pats, opts, rename)
735 result = orig(ui, repo, pats, opts, rename)
736 except error.Abort as e:
736 except error.Abort as e:
737 if pycompat.bytestr(e) != _(b'no files to copy'):
737 if e.message != _(b'no files to copy'):
738 raise e
738 raise e
739 else:
739 else:
740 nonormalfiles = True
740 nonormalfiles = True
741 result = 0
741 result = 0
742
742
743 # The first rename can cause our current working directory to be removed.
743 # The first rename can cause our current working directory to be removed.
744 # In that case there is nothing left to copy/rename so just quit.
744 # In that case there is nothing left to copy/rename so just quit.
745 try:
745 try:
746 repo.getcwd()
746 repo.getcwd()
747 except OSError:
747 except OSError:
748 return result
748 return result
749
749
750 def makestandin(relpath):
750 def makestandin(relpath):
751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
751 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
752 return repo.wvfs.join(lfutil.standin(path))
752 return repo.wvfs.join(lfutil.standin(path))
753
753
754 fullpats = scmutil.expandpats(pats)
754 fullpats = scmutil.expandpats(pats)
755 dest = fullpats[-1]
755 dest = fullpats[-1]
756
756
757 if os.path.isdir(dest):
757 if os.path.isdir(dest):
758 if not os.path.isdir(makestandin(dest)):
758 if not os.path.isdir(makestandin(dest)):
759 os.makedirs(makestandin(dest))
759 os.makedirs(makestandin(dest))
760
760
761 try:
761 try:
762 # When we call orig below it creates the standins but we don't add
762 # When we call orig below it creates the standins but we don't add
763 # them to the dir state until later so lock during that time.
763 # them to the dir state until later so lock during that time.
764 wlock = repo.wlock()
764 wlock = repo.wlock()
765
765
766 manifest = repo[None].manifest()
766 manifest = repo[None].manifest()
767
767
768 def overridematch(
768 def overridematch(
769 orig,
769 orig,
770 ctx,
770 ctx,
771 pats=(),
771 pats=(),
772 opts=None,
772 opts=None,
773 globbed=False,
773 globbed=False,
774 default=b'relpath',
774 default=b'relpath',
775 badfn=None,
775 badfn=None,
776 ):
776 ):
777 if opts is None:
777 if opts is None:
778 opts = {}
778 opts = {}
779 newpats = []
779 newpats = []
780 # The patterns were previously mangled to add the standin
780 # The patterns were previously mangled to add the standin
781 # directory; we need to remove that now
781 # directory; we need to remove that now
782 for pat in pats:
782 for pat in pats:
783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
783 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
784 newpats.append(pat.replace(lfutil.shortname, b''))
784 newpats.append(pat.replace(lfutil.shortname, b''))
785 else:
785 else:
786 newpats.append(pat)
786 newpats.append(pat)
787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
787 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
788 m = copy.copy(match)
788 m = copy.copy(match)
789 lfile = lambda f: lfutil.standin(f) in manifest
789 lfile = lambda f: lfutil.standin(f) in manifest
790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
790 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
791 m._fileset = set(m._files)
791 m._fileset = set(m._files)
792 origmatchfn = m.matchfn
792 origmatchfn = m.matchfn
793
793
794 def matchfn(f):
794 def matchfn(f):
795 lfile = lfutil.splitstandin(f)
795 lfile = lfutil.splitstandin(f)
796 return (
796 return (
797 lfile is not None
797 lfile is not None
798 and (f in manifest)
798 and (f in manifest)
799 and origmatchfn(lfile)
799 and origmatchfn(lfile)
800 or None
800 or None
801 )
801 )
802
802
803 m.matchfn = matchfn
803 m.matchfn = matchfn
804 return m
804 return m
805
805
806 listpats = []
806 listpats = []
807 for pat in pats:
807 for pat in pats:
808 if matchmod.patkind(pat) is not None:
808 if matchmod.patkind(pat) is not None:
809 listpats.append(pat)
809 listpats.append(pat)
810 else:
810 else:
811 listpats.append(makestandin(pat))
811 listpats.append(makestandin(pat))
812
812
813 copiedfiles = []
813 copiedfiles = []
814
814
815 def overridecopyfile(orig, src, dest, *args, **kwargs):
815 def overridecopyfile(orig, src, dest, *args, **kwargs):
816 if lfutil.shortname in src and dest.startswith(
816 if lfutil.shortname in src and dest.startswith(
817 repo.wjoin(lfutil.shortname)
817 repo.wjoin(lfutil.shortname)
818 ):
818 ):
819 destlfile = dest.replace(lfutil.shortname, b'')
819 destlfile = dest.replace(lfutil.shortname, b'')
820 if not opts[b'force'] and os.path.exists(destlfile):
820 if not opts[b'force'] and os.path.exists(destlfile):
821 raise IOError(
821 raise IOError(
822 b'', _(b'destination largefile already exists')
822 b'', _(b'destination largefile already exists')
823 )
823 )
824 copiedfiles.append((src, dest))
824 copiedfiles.append((src, dest))
825 orig(src, dest, *args, **kwargs)
825 orig(src, dest, *args, **kwargs)
826
826
827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
827 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
828 with extensions.wrappedfunction(scmutil, b'match', overridematch):
829 result += orig(ui, repo, listpats, opts, rename)
829 result += orig(ui, repo, listpats, opts, rename)
830
830
831 lfdirstate = lfutil.openlfdirstate(ui, repo)
831 lfdirstate = lfutil.openlfdirstate(ui, repo)
832 for (src, dest) in copiedfiles:
832 for (src, dest) in copiedfiles:
833 if lfutil.shortname in src and dest.startswith(
833 if lfutil.shortname in src and dest.startswith(
834 repo.wjoin(lfutil.shortname)
834 repo.wjoin(lfutil.shortname)
835 ):
835 ):
836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
836 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
837 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
838 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
839 if not os.path.isdir(destlfiledir):
839 if not os.path.isdir(destlfiledir):
840 os.makedirs(destlfiledir)
840 os.makedirs(destlfiledir)
841 if rename:
841 if rename:
842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
842 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
843
843
844 # The file is gone, but this deletes any empty parent
844 # The file is gone, but this deletes any empty parent
845 # directories as a side-effect.
845 # directories as a side-effect.
846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
846 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
847 lfdirstate.remove(srclfile)
847 lfdirstate.remove(srclfile)
848 else:
848 else:
849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
849 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
850
850
851 lfdirstate.add(destlfile)
851 lfdirstate.add(destlfile)
852 lfdirstate.write()
852 lfdirstate.write()
853 except error.Abort as e:
853 except error.Abort as e:
854 if pycompat.bytestr(e) != _(b'no files to copy'):
854 if e.message != _(b'no files to copy'):
855 raise e
855 raise e
856 else:
856 else:
857 nolfiles = True
857 nolfiles = True
858 finally:
858 finally:
859 wlock.release()
859 wlock.release()
860
860
861 if nolfiles and nonormalfiles:
861 if nolfiles and nonormalfiles:
862 raise error.Abort(_(b'no files to copy'))
862 raise error.Abort(_(b'no files to copy'))
863
863
864 return result
864 return result
865
865
866
866
867 # When the user calls revert, we have to be careful to not revert any
867 # When the user calls revert, we have to be careful to not revert any
868 # changes to other largefiles accidentally. This means we have to keep
868 # changes to other largefiles accidentally. This means we have to keep
869 # track of the largefiles that are being reverted so we only pull down
869 # track of the largefiles that are being reverted so we only pull down
870 # the necessary largefiles.
870 # the necessary largefiles.
871 #
871 #
872 # Standins are only updated (to match the hash of largefiles) before
872 # Standins are only updated (to match the hash of largefiles) before
873 # commits. Update the standins then run the original revert, changing
873 # commits. Update the standins then run the original revert, changing
874 # the matcher to hit standins instead of largefiles. Based on the
874 # the matcher to hit standins instead of largefiles. Based on the
875 # resulting standins update the largefiles.
875 # resulting standins update the largefiles.
876 @eh.wrapfunction(cmdutil, b'revert')
876 @eh.wrapfunction(cmdutil, b'revert')
877 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
877 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
878 # Because we put the standins in a bad state (by updating them)
878 # Because we put the standins in a bad state (by updating them)
879 # and then return them to a correct state we need to lock to
879 # and then return them to a correct state we need to lock to
880 # prevent others from changing them in their incorrect state.
880 # prevent others from changing them in their incorrect state.
881 with repo.wlock():
881 with repo.wlock():
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
882 lfdirstate = lfutil.openlfdirstate(ui, repo)
883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
883 s = lfutil.lfdirstatestatus(lfdirstate, repo)
884 lfdirstate.write()
884 lfdirstate.write()
885 for lfile in s.modified:
885 for lfile in s.modified:
886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
886 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
887 for lfile in s.deleted:
887 for lfile in s.deleted:
888 fstandin = lfutil.standin(lfile)
888 fstandin = lfutil.standin(lfile)
889 if repo.wvfs.exists(fstandin):
889 if repo.wvfs.exists(fstandin):
890 repo.wvfs.unlink(fstandin)
890 repo.wvfs.unlink(fstandin)
891
891
892 oldstandins = lfutil.getstandinsstate(repo)
892 oldstandins = lfutil.getstandinsstate(repo)
893
893
894 def overridematch(
894 def overridematch(
895 orig,
895 orig,
896 mctx,
896 mctx,
897 pats=(),
897 pats=(),
898 opts=None,
898 opts=None,
899 globbed=False,
899 globbed=False,
900 default=b'relpath',
900 default=b'relpath',
901 badfn=None,
901 badfn=None,
902 ):
902 ):
903 if opts is None:
903 if opts is None:
904 opts = {}
904 opts = {}
905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
905 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
906 m = copy.copy(match)
906 m = copy.copy(match)
907
907
908 # revert supports recursing into subrepos, and though largefiles
908 # revert supports recursing into subrepos, and though largefiles
909 # currently doesn't work correctly in that case, this match is
909 # currently doesn't work correctly in that case, this match is
910 # called, so the lfdirstate above may not be the correct one for
910 # called, so the lfdirstate above may not be the correct one for
911 # this invocation of match.
911 # this invocation of match.
912 lfdirstate = lfutil.openlfdirstate(
912 lfdirstate = lfutil.openlfdirstate(
913 mctx.repo().ui, mctx.repo(), False
913 mctx.repo().ui, mctx.repo(), False
914 )
914 )
915
915
916 wctx = repo[None]
916 wctx = repo[None]
917 matchfiles = []
917 matchfiles = []
918 for f in m._files:
918 for f in m._files:
919 standin = lfutil.standin(f)
919 standin = lfutil.standin(f)
920 if standin in ctx or standin in mctx:
920 if standin in ctx or standin in mctx:
921 matchfiles.append(standin)
921 matchfiles.append(standin)
922 elif standin in wctx or lfdirstate[f] == b'r':
922 elif standin in wctx or lfdirstate[f] == b'r':
923 continue
923 continue
924 else:
924 else:
925 matchfiles.append(f)
925 matchfiles.append(f)
926 m._files = matchfiles
926 m._files = matchfiles
927 m._fileset = set(m._files)
927 m._fileset = set(m._files)
928 origmatchfn = m.matchfn
928 origmatchfn = m.matchfn
929
929
930 def matchfn(f):
930 def matchfn(f):
931 lfile = lfutil.splitstandin(f)
931 lfile = lfutil.splitstandin(f)
932 if lfile is not None:
932 if lfile is not None:
933 return origmatchfn(lfile) and (f in ctx or f in mctx)
933 return origmatchfn(lfile) and (f in ctx or f in mctx)
934 return origmatchfn(f)
934 return origmatchfn(f)
935
935
936 m.matchfn = matchfn
936 m.matchfn = matchfn
937 return m
937 return m
938
938
939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
939 with extensions.wrappedfunction(scmutil, b'match', overridematch):
940 orig(ui, repo, ctx, *pats, **opts)
940 orig(ui, repo, ctx, *pats, **opts)
941
941
942 newstandins = lfutil.getstandinsstate(repo)
942 newstandins = lfutil.getstandinsstate(repo)
943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
943 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
944 # lfdirstate should be 'normallookup'-ed for updated files,
944 # lfdirstate should be 'normallookup'-ed for updated files,
945 # because reverting doesn't touch dirstate for 'normal' files
945 # because reverting doesn't touch dirstate for 'normal' files
946 # when target revision is explicitly specified: in such case,
946 # when target revision is explicitly specified: in such case,
947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
947 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
948 # of target (standin) file.
948 # of target (standin) file.
949 lfcommands.updatelfiles(
949 lfcommands.updatelfiles(
950 ui, repo, filelist, printmessage=False, normallookup=True
950 ui, repo, filelist, printmessage=False, normallookup=True
951 )
951 )
952
952
953
953
954 # after pulling changesets, we need to take some extra care to get
954 # after pulling changesets, we need to take some extra care to get
955 # largefiles updated remotely
955 # largefiles updated remotely
956 @eh.wrapcommand(
956 @eh.wrapcommand(
957 b'pull',
957 b'pull',
958 opts=[
958 opts=[
959 (
959 (
960 b'',
960 b'',
961 b'all-largefiles',
961 b'all-largefiles',
962 None,
962 None,
963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
963 _(b'download all pulled versions of largefiles (DEPRECATED)'),
964 ),
964 ),
965 (
965 (
966 b'',
966 b'',
967 b'lfrev',
967 b'lfrev',
968 [],
968 [],
969 _(b'download largefiles for these revisions'),
969 _(b'download largefiles for these revisions'),
970 _(b'REV'),
970 _(b'REV'),
971 ),
971 ),
972 ],
972 ],
973 )
973 )
974 def overridepull(orig, ui, repo, source=None, **opts):
974 def overridepull(orig, ui, repo, source=None, **opts):
975 revsprepull = len(repo)
975 revsprepull = len(repo)
976 if not source:
976 if not source:
977 source = b'default'
977 source = b'default'
978 repo.lfpullsource = source
978 repo.lfpullsource = source
979 result = orig(ui, repo, source, **opts)
979 result = orig(ui, repo, source, **opts)
980 revspostpull = len(repo)
980 revspostpull = len(repo)
981 lfrevs = opts.get('lfrev', [])
981 lfrevs = opts.get('lfrev', [])
982 if opts.get('all_largefiles'):
982 if opts.get('all_largefiles'):
983 lfrevs.append(b'pulled()')
983 lfrevs.append(b'pulled()')
984 if lfrevs and revspostpull > revsprepull:
984 if lfrevs and revspostpull > revsprepull:
985 numcached = 0
985 numcached = 0
986 repo.firstpulled = revsprepull # for pulled() revset expression
986 repo.firstpulled = revsprepull # for pulled() revset expression
987 try:
987 try:
988 for rev in scmutil.revrange(repo, lfrevs):
988 for rev in scmutil.revrange(repo, lfrevs):
989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
989 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
990 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
991 numcached += len(cached)
991 numcached += len(cached)
992 finally:
992 finally:
993 del repo.firstpulled
993 del repo.firstpulled
994 ui.status(_(b"%d largefiles cached\n") % numcached)
994 ui.status(_(b"%d largefiles cached\n") % numcached)
995 return result
995 return result
996
996
997
997
998 @eh.wrapcommand(
998 @eh.wrapcommand(
999 b'push',
999 b'push',
1000 opts=[
1000 opts=[
1001 (
1001 (
1002 b'',
1002 b'',
1003 b'lfrev',
1003 b'lfrev',
1004 [],
1004 [],
1005 _(b'upload largefiles for these revisions'),
1005 _(b'upload largefiles for these revisions'),
1006 _(b'REV'),
1006 _(b'REV'),
1007 )
1007 )
1008 ],
1008 ],
1009 )
1009 )
1010 def overridepush(orig, ui, repo, *args, **kwargs):
1010 def overridepush(orig, ui, repo, *args, **kwargs):
1011 """Override push command and store --lfrev parameters in opargs"""
1011 """Override push command and store --lfrev parameters in opargs"""
1012 lfrevs = kwargs.pop('lfrev', None)
1012 lfrevs = kwargs.pop('lfrev', None)
1013 if lfrevs:
1013 if lfrevs:
1014 opargs = kwargs.setdefault('opargs', {})
1014 opargs = kwargs.setdefault('opargs', {})
1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1015 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1016 return orig(ui, repo, *args, **kwargs)
1016 return orig(ui, repo, *args, **kwargs)
1017
1017
1018
1018
1019 @eh.wrapfunction(exchange, b'pushoperation')
1019 @eh.wrapfunction(exchange, b'pushoperation')
1020 def exchangepushoperation(orig, *args, **kwargs):
1020 def exchangepushoperation(orig, *args, **kwargs):
1021 """Override pushoperation constructor and store lfrevs parameter"""
1021 """Override pushoperation constructor and store lfrevs parameter"""
1022 lfrevs = kwargs.pop('lfrevs', None)
1022 lfrevs = kwargs.pop('lfrevs', None)
1023 pushop = orig(*args, **kwargs)
1023 pushop = orig(*args, **kwargs)
1024 pushop.lfrevs = lfrevs
1024 pushop.lfrevs = lfrevs
1025 return pushop
1025 return pushop
1026
1026
1027
1027
1028 @eh.revsetpredicate(b'pulled()')
1028 @eh.revsetpredicate(b'pulled()')
1029 def pulledrevsetsymbol(repo, subset, x):
1029 def pulledrevsetsymbol(repo, subset, x):
1030 """Changesets that just has been pulled.
1030 """Changesets that just has been pulled.
1031
1031
1032 Only available with largefiles from pull --lfrev expressions.
1032 Only available with largefiles from pull --lfrev expressions.
1033
1033
1034 .. container:: verbose
1034 .. container:: verbose
1035
1035
1036 Some examples:
1036 Some examples:
1037
1037
1038 - pull largefiles for all new changesets::
1038 - pull largefiles for all new changesets::
1039
1039
1040 hg pull -lfrev "pulled()"
1040 hg pull -lfrev "pulled()"
1041
1041
1042 - pull largefiles for all new branch heads::
1042 - pull largefiles for all new branch heads::
1043
1043
1044 hg pull -lfrev "head(pulled()) and not closed()"
1044 hg pull -lfrev "head(pulled()) and not closed()"
1045
1045
1046 """
1046 """
1047
1047
1048 try:
1048 try:
1049 firstpulled = repo.firstpulled
1049 firstpulled = repo.firstpulled
1050 except AttributeError:
1050 except AttributeError:
1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1051 raise error.Abort(_(b"pulled() only available in --lfrev"))
1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1052 return smartset.baseset([r for r in subset if r >= firstpulled])
1053
1053
1054
1054
1055 @eh.wrapcommand(
1055 @eh.wrapcommand(
1056 b'clone',
1056 b'clone',
1057 opts=[
1057 opts=[
1058 (
1058 (
1059 b'',
1059 b'',
1060 b'all-largefiles',
1060 b'all-largefiles',
1061 None,
1061 None,
1062 _(b'download all versions of all largefiles'),
1062 _(b'download all versions of all largefiles'),
1063 )
1063 )
1064 ],
1064 ],
1065 )
1065 )
1066 def overrideclone(orig, ui, source, dest=None, **opts):
1066 def overrideclone(orig, ui, source, dest=None, **opts):
1067 d = dest
1067 d = dest
1068 if d is None:
1068 if d is None:
1069 d = hg.defaultdest(source)
1069 d = hg.defaultdest(source)
1070 if opts.get('all_largefiles') and not hg.islocal(d):
1070 if opts.get('all_largefiles') and not hg.islocal(d):
1071 raise error.Abort(
1071 raise error.Abort(
1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1072 _(b'--all-largefiles is incompatible with non-local destination %s')
1073 % d
1073 % d
1074 )
1074 )
1075
1075
1076 return orig(ui, source, dest, **opts)
1076 return orig(ui, source, dest, **opts)
1077
1077
1078
1078
1079 @eh.wrapfunction(hg, b'clone')
1079 @eh.wrapfunction(hg, b'clone')
1080 def hgclone(orig, ui, opts, *args, **kwargs):
1080 def hgclone(orig, ui, opts, *args, **kwargs):
1081 result = orig(ui, opts, *args, **kwargs)
1081 result = orig(ui, opts, *args, **kwargs)
1082
1082
1083 if result is not None:
1083 if result is not None:
1084 sourcerepo, destrepo = result
1084 sourcerepo, destrepo = result
1085 repo = destrepo.local()
1085 repo = destrepo.local()
1086
1086
1087 # When cloning to a remote repo (like through SSH), no repo is available
1087 # When cloning to a remote repo (like through SSH), no repo is available
1088 # from the peer. Therefore the largefiles can't be downloaded and the
1088 # from the peer. Therefore the largefiles can't be downloaded and the
1089 # hgrc can't be updated.
1089 # hgrc can't be updated.
1090 if not repo:
1090 if not repo:
1091 return result
1091 return result
1092
1092
1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1093 # Caching is implicitly limited to 'rev' option, since the dest repo was
1094 # truncated at that point. The user may expect a download count with
1094 # truncated at that point. The user may expect a download count with
1095 # this option, so attempt whether or not this is a largefile repo.
1095 # this option, so attempt whether or not this is a largefile repo.
1096 if opts.get(b'all_largefiles'):
1096 if opts.get(b'all_largefiles'):
1097 success, missing = lfcommands.downloadlfiles(ui, repo)
1097 success, missing = lfcommands.downloadlfiles(ui, repo)
1098
1098
1099 if missing != 0:
1099 if missing != 0:
1100 return None
1100 return None
1101
1101
1102 return result
1102 return result
1103
1103
1104
1104
1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1105 @eh.wrapcommand(b'rebase', extension=b'rebase')
1106 def overriderebasecmd(orig, ui, repo, **opts):
1106 def overriderebasecmd(orig, ui, repo, **opts):
1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1107 if not util.safehasattr(repo, b'_largefilesenabled'):
1108 return orig(ui, repo, **opts)
1108 return orig(ui, repo, **opts)
1109
1109
1110 resuming = opts.get('continue')
1110 resuming = opts.get('continue')
1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1111 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1112 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1113 try:
1113 try:
1114 with ui.configoverride(
1114 with ui.configoverride(
1115 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1115 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1116 ):
1116 ):
1117 return orig(ui, repo, **opts)
1117 return orig(ui, repo, **opts)
1118 finally:
1118 finally:
1119 repo._lfstatuswriters.pop()
1119 repo._lfstatuswriters.pop()
1120 repo._lfcommithooks.pop()
1120 repo._lfcommithooks.pop()
1121
1121
1122
1122
1123 @eh.extsetup
1123 @eh.extsetup
1124 def overriderebase(ui):
1124 def overriderebase(ui):
1125 try:
1125 try:
1126 rebase = extensions.find(b'rebase')
1126 rebase = extensions.find(b'rebase')
1127 except KeyError:
1127 except KeyError:
1128 pass
1128 pass
1129 else:
1129 else:
1130
1130
1131 def _dorebase(orig, *args, **kwargs):
1131 def _dorebase(orig, *args, **kwargs):
1132 kwargs['inmemory'] = False
1132 kwargs['inmemory'] = False
1133 return orig(*args, **kwargs)
1133 return orig(*args, **kwargs)
1134
1134
1135 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1135 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1136
1136
1137
1137
1138 @eh.wrapcommand(b'archive')
1138 @eh.wrapcommand(b'archive')
1139 def overridearchivecmd(orig, ui, repo, dest, **opts):
1139 def overridearchivecmd(orig, ui, repo, dest, **opts):
1140 with lfstatus(repo.unfiltered()):
1140 with lfstatus(repo.unfiltered()):
1141 return orig(ui, repo.unfiltered(), dest, **opts)
1141 return orig(ui, repo.unfiltered(), dest, **opts)
1142
1142
1143
1143
1144 @eh.wrapfunction(webcommands, b'archive')
1144 @eh.wrapfunction(webcommands, b'archive')
1145 def hgwebarchive(orig, web):
1145 def hgwebarchive(orig, web):
1146 with lfstatus(web.repo):
1146 with lfstatus(web.repo):
1147 return orig(web)
1147 return orig(web)
1148
1148
1149
1149
1150 @eh.wrapfunction(archival, b'archive')
1150 @eh.wrapfunction(archival, b'archive')
1151 def overridearchive(
1151 def overridearchive(
1152 orig,
1152 orig,
1153 repo,
1153 repo,
1154 dest,
1154 dest,
1155 node,
1155 node,
1156 kind,
1156 kind,
1157 decode=True,
1157 decode=True,
1158 match=None,
1158 match=None,
1159 prefix=b'',
1159 prefix=b'',
1160 mtime=None,
1160 mtime=None,
1161 subrepos=None,
1161 subrepos=None,
1162 ):
1162 ):
1163 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1163 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1164 # unfiltered repo's attr, so check that as well.
1164 # unfiltered repo's attr, so check that as well.
1165 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1165 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1166 return orig(
1166 return orig(
1167 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1167 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1168 )
1168 )
1169
1169
1170 # No need to lock because we are only reading history and
1170 # No need to lock because we are only reading history and
1171 # largefile caches, neither of which are modified.
1171 # largefile caches, neither of which are modified.
1172 if node is not None:
1172 if node is not None:
1173 lfcommands.cachelfiles(repo.ui, repo, node)
1173 lfcommands.cachelfiles(repo.ui, repo, node)
1174
1174
1175 if kind not in archival.archivers:
1175 if kind not in archival.archivers:
1176 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1176 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1177
1177
1178 ctx = repo[node]
1178 ctx = repo[node]
1179
1179
1180 if kind == b'files':
1180 if kind == b'files':
1181 if prefix:
1181 if prefix:
1182 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1182 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1183 else:
1183 else:
1184 prefix = archival.tidyprefix(dest, kind, prefix)
1184 prefix = archival.tidyprefix(dest, kind, prefix)
1185
1185
1186 def write(name, mode, islink, getdata):
1186 def write(name, mode, islink, getdata):
1187 if match and not match(name):
1187 if match and not match(name):
1188 return
1188 return
1189 data = getdata()
1189 data = getdata()
1190 if decode:
1190 if decode:
1191 data = repo.wwritedata(name, data)
1191 data = repo.wwritedata(name, data)
1192 archiver.addfile(prefix + name, mode, islink, data)
1192 archiver.addfile(prefix + name, mode, islink, data)
1193
1193
1194 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1194 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1195
1195
1196 if repo.ui.configbool(b"ui", b"archivemeta"):
1196 if repo.ui.configbool(b"ui", b"archivemeta"):
1197 write(
1197 write(
1198 b'.hg_archival.txt',
1198 b'.hg_archival.txt',
1199 0o644,
1199 0o644,
1200 False,
1200 False,
1201 lambda: archival.buildmetadata(ctx),
1201 lambda: archival.buildmetadata(ctx),
1202 )
1202 )
1203
1203
1204 for f in ctx:
1204 for f in ctx:
1205 ff = ctx.flags(f)
1205 ff = ctx.flags(f)
1206 getdata = ctx[f].data
1206 getdata = ctx[f].data
1207 lfile = lfutil.splitstandin(f)
1207 lfile = lfutil.splitstandin(f)
1208 if lfile is not None:
1208 if lfile is not None:
1209 if node is not None:
1209 if node is not None:
1210 path = lfutil.findfile(repo, getdata().strip())
1210 path = lfutil.findfile(repo, getdata().strip())
1211
1211
1212 if path is None:
1212 if path is None:
1213 raise error.Abort(
1213 raise error.Abort(
1214 _(
1214 _(
1215 b'largefile %s not found in repo store or system cache'
1215 b'largefile %s not found in repo store or system cache'
1216 )
1216 )
1217 % lfile
1217 % lfile
1218 )
1218 )
1219 else:
1219 else:
1220 path = lfile
1220 path = lfile
1221
1221
1222 f = lfile
1222 f = lfile
1223
1223
1224 getdata = lambda: util.readfile(path)
1224 getdata = lambda: util.readfile(path)
1225 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1225 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1226
1226
1227 if subrepos:
1227 if subrepos:
1228 for subpath in sorted(ctx.substate):
1228 for subpath in sorted(ctx.substate):
1229 sub = ctx.workingsub(subpath)
1229 sub = ctx.workingsub(subpath)
1230 submatch = matchmod.subdirmatcher(subpath, match)
1230 submatch = matchmod.subdirmatcher(subpath, match)
1231 subprefix = prefix + subpath + b'/'
1231 subprefix = prefix + subpath + b'/'
1232
1232
1233 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1233 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1234 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1234 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1235 # allow only hgsubrepos to set this, instead of the current scheme
1235 # allow only hgsubrepos to set this, instead of the current scheme
1236 # where the parent sets this for the child.
1236 # where the parent sets this for the child.
1237 with (
1237 with (
1238 util.safehasattr(sub, '_repo')
1238 util.safehasattr(sub, '_repo')
1239 and lfstatus(sub._repo)
1239 and lfstatus(sub._repo)
1240 or util.nullcontextmanager()
1240 or util.nullcontextmanager()
1241 ):
1241 ):
1242 sub.archive(archiver, subprefix, submatch)
1242 sub.archive(archiver, subprefix, submatch)
1243
1243
1244 archiver.done()
1244 archiver.done()
1245
1245
1246
1246
1247 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1247 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1248 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1248 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1249 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1249 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1250 if not lfenabled or not repo._repo.lfstatus:
1250 if not lfenabled or not repo._repo.lfstatus:
1251 return orig(repo, archiver, prefix, match, decode)
1251 return orig(repo, archiver, prefix, match, decode)
1252
1252
1253 repo._get(repo._state + (b'hg',))
1253 repo._get(repo._state + (b'hg',))
1254 rev = repo._state[1]
1254 rev = repo._state[1]
1255 ctx = repo._repo[rev]
1255 ctx = repo._repo[rev]
1256
1256
1257 if ctx.node() is not None:
1257 if ctx.node() is not None:
1258 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1258 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1259
1259
1260 def write(name, mode, islink, getdata):
1260 def write(name, mode, islink, getdata):
1261 # At this point, the standin has been replaced with the largefile name,
1261 # At this point, the standin has been replaced with the largefile name,
1262 # so the normal matcher works here without the lfutil variants.
1262 # so the normal matcher works here without the lfutil variants.
1263 if match and not match(f):
1263 if match and not match(f):
1264 return
1264 return
1265 data = getdata()
1265 data = getdata()
1266 if decode:
1266 if decode:
1267 data = repo._repo.wwritedata(name, data)
1267 data = repo._repo.wwritedata(name, data)
1268
1268
1269 archiver.addfile(prefix + name, mode, islink, data)
1269 archiver.addfile(prefix + name, mode, islink, data)
1270
1270
1271 for f in ctx:
1271 for f in ctx:
1272 ff = ctx.flags(f)
1272 ff = ctx.flags(f)
1273 getdata = ctx[f].data
1273 getdata = ctx[f].data
1274 lfile = lfutil.splitstandin(f)
1274 lfile = lfutil.splitstandin(f)
1275 if lfile is not None:
1275 if lfile is not None:
1276 if ctx.node() is not None:
1276 if ctx.node() is not None:
1277 path = lfutil.findfile(repo._repo, getdata().strip())
1277 path = lfutil.findfile(repo._repo, getdata().strip())
1278
1278
1279 if path is None:
1279 if path is None:
1280 raise error.Abort(
1280 raise error.Abort(
1281 _(
1281 _(
1282 b'largefile %s not found in repo store or system cache'
1282 b'largefile %s not found in repo store or system cache'
1283 )
1283 )
1284 % lfile
1284 % lfile
1285 )
1285 )
1286 else:
1286 else:
1287 path = lfile
1287 path = lfile
1288
1288
1289 f = lfile
1289 f = lfile
1290
1290
1291 getdata = lambda: util.readfile(os.path.join(prefix, path))
1291 getdata = lambda: util.readfile(os.path.join(prefix, path))
1292
1292
1293 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1293 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1294
1294
1295 for subpath in sorted(ctx.substate):
1295 for subpath in sorted(ctx.substate):
1296 sub = ctx.workingsub(subpath)
1296 sub = ctx.workingsub(subpath)
1297 submatch = matchmod.subdirmatcher(subpath, match)
1297 submatch = matchmod.subdirmatcher(subpath, match)
1298 subprefix = prefix + subpath + b'/'
1298 subprefix = prefix + subpath + b'/'
1299 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1299 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1300 # infer and possibly set lfstatus at the top of this function. That
1300 # infer and possibly set lfstatus at the top of this function. That
1301 # would allow only hgsubrepos to set this, instead of the current scheme
1301 # would allow only hgsubrepos to set this, instead of the current scheme
1302 # where the parent sets this for the child.
1302 # where the parent sets this for the child.
1303 with (
1303 with (
1304 util.safehasattr(sub, '_repo')
1304 util.safehasattr(sub, '_repo')
1305 and lfstatus(sub._repo)
1305 and lfstatus(sub._repo)
1306 or util.nullcontextmanager()
1306 or util.nullcontextmanager()
1307 ):
1307 ):
1308 sub.archive(archiver, subprefix, submatch, decode)
1308 sub.archive(archiver, subprefix, submatch, decode)
1309
1309
1310
1310
1311 # If a largefile is modified, the change is not reflected in its
1311 # If a largefile is modified, the change is not reflected in its
1312 # standin until a commit. cmdutil.bailifchanged() raises an exception
1312 # standin until a commit. cmdutil.bailifchanged() raises an exception
1313 # if the repo has uncommitted changes. Wrap it to also check if
1313 # if the repo has uncommitted changes. Wrap it to also check if
1314 # largefiles were changed. This is used by bisect, backout and fetch.
1314 # largefiles were changed. This is used by bisect, backout and fetch.
1315 @eh.wrapfunction(cmdutil, b'bailifchanged')
1315 @eh.wrapfunction(cmdutil, b'bailifchanged')
1316 def overridebailifchanged(orig, repo, *args, **kwargs):
1316 def overridebailifchanged(orig, repo, *args, **kwargs):
1317 orig(repo, *args, **kwargs)
1317 orig(repo, *args, **kwargs)
1318 with lfstatus(repo):
1318 with lfstatus(repo):
1319 s = repo.status()
1319 s = repo.status()
1320 if s.modified or s.added or s.removed or s.deleted:
1320 if s.modified or s.added or s.removed or s.deleted:
1321 raise error.Abort(_(b'uncommitted changes'))
1321 raise error.Abort(_(b'uncommitted changes'))
1322
1322
1323
1323
1324 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1324 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1325 def postcommitstatus(orig, repo, *args, **kwargs):
1325 def postcommitstatus(orig, repo, *args, **kwargs):
1326 with lfstatus(repo):
1326 with lfstatus(repo):
1327 return orig(repo, *args, **kwargs)
1327 return orig(repo, *args, **kwargs)
1328
1328
1329
1329
1330 @eh.wrapfunction(cmdutil, b'forget')
1330 @eh.wrapfunction(cmdutil, b'forget')
1331 def cmdutilforget(
1331 def cmdutilforget(
1332 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1332 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1333 ):
1333 ):
1334 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1334 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1335 bad, forgot = orig(
1335 bad, forgot = orig(
1336 ui,
1336 ui,
1337 repo,
1337 repo,
1338 normalmatcher,
1338 normalmatcher,
1339 prefix,
1339 prefix,
1340 uipathfn,
1340 uipathfn,
1341 explicitonly,
1341 explicitonly,
1342 dryrun,
1342 dryrun,
1343 interactive,
1343 interactive,
1344 )
1344 )
1345 m = composelargefilematcher(match, repo[None].manifest())
1345 m = composelargefilematcher(match, repo[None].manifest())
1346
1346
1347 with lfstatus(repo):
1347 with lfstatus(repo):
1348 s = repo.status(match=m, clean=True)
1348 s = repo.status(match=m, clean=True)
1349 manifest = repo[None].manifest()
1349 manifest = repo[None].manifest()
1350 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1350 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1351 forget = [f for f in forget if lfutil.standin(f) in manifest]
1351 forget = [f for f in forget if lfutil.standin(f) in manifest]
1352
1352
1353 for f in forget:
1353 for f in forget:
1354 fstandin = lfutil.standin(f)
1354 fstandin = lfutil.standin(f)
1355 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1355 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1356 ui.warn(
1356 ui.warn(
1357 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1357 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1358 )
1358 )
1359 bad.append(f)
1359 bad.append(f)
1360
1360
1361 for f in forget:
1361 for f in forget:
1362 if ui.verbose or not m.exact(f):
1362 if ui.verbose or not m.exact(f):
1363 ui.status(_(b'removing %s\n') % uipathfn(f))
1363 ui.status(_(b'removing %s\n') % uipathfn(f))
1364
1364
1365 # Need to lock because standin files are deleted then removed from the
1365 # Need to lock because standin files are deleted then removed from the
1366 # repository and we could race in-between.
1366 # repository and we could race in-between.
1367 with repo.wlock():
1367 with repo.wlock():
1368 lfdirstate = lfutil.openlfdirstate(ui, repo)
1368 lfdirstate = lfutil.openlfdirstate(ui, repo)
1369 for f in forget:
1369 for f in forget:
1370 if lfdirstate[f] == b'a':
1370 if lfdirstate[f] == b'a':
1371 lfdirstate.drop(f)
1371 lfdirstate.drop(f)
1372 else:
1372 else:
1373 lfdirstate.remove(f)
1373 lfdirstate.remove(f)
1374 lfdirstate.write()
1374 lfdirstate.write()
1375 standins = [lfutil.standin(f) for f in forget]
1375 standins = [lfutil.standin(f) for f in forget]
1376 for f in standins:
1376 for f in standins:
1377 repo.wvfs.unlinkpath(f, ignoremissing=True)
1377 repo.wvfs.unlinkpath(f, ignoremissing=True)
1378 rejected = repo[None].forget(standins)
1378 rejected = repo[None].forget(standins)
1379
1379
1380 bad.extend(f for f in rejected if f in m.files())
1380 bad.extend(f for f in rejected if f in m.files())
1381 forgot.extend(f for f in forget if f not in rejected)
1381 forgot.extend(f for f in forget if f not in rejected)
1382 return bad, forgot
1382 return bad, forgot
1383
1383
1384
1384
1385 def _getoutgoings(repo, other, missing, addfunc):
1385 def _getoutgoings(repo, other, missing, addfunc):
1386 """get pairs of filename and largefile hash in outgoing revisions
1386 """get pairs of filename and largefile hash in outgoing revisions
1387 in 'missing'.
1387 in 'missing'.
1388
1388
1389 largefiles already existing on 'other' repository are ignored.
1389 largefiles already existing on 'other' repository are ignored.
1390
1390
1391 'addfunc' is invoked with each unique pairs of filename and
1391 'addfunc' is invoked with each unique pairs of filename and
1392 largefile hash value.
1392 largefile hash value.
1393 """
1393 """
1394 knowns = set()
1394 knowns = set()
1395 lfhashes = set()
1395 lfhashes = set()
1396
1396
1397 def dedup(fn, lfhash):
1397 def dedup(fn, lfhash):
1398 k = (fn, lfhash)
1398 k = (fn, lfhash)
1399 if k not in knowns:
1399 if k not in knowns:
1400 knowns.add(k)
1400 knowns.add(k)
1401 lfhashes.add(lfhash)
1401 lfhashes.add(lfhash)
1402
1402
1403 lfutil.getlfilestoupload(repo, missing, dedup)
1403 lfutil.getlfilestoupload(repo, missing, dedup)
1404 if lfhashes:
1404 if lfhashes:
1405 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1405 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1406 for fn, lfhash in knowns:
1406 for fn, lfhash in knowns:
1407 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1407 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1408 addfunc(fn, lfhash)
1408 addfunc(fn, lfhash)
1409
1409
1410
1410
1411 def outgoinghook(ui, repo, other, opts, missing):
1411 def outgoinghook(ui, repo, other, opts, missing):
1412 if opts.pop(b'large', None):
1412 if opts.pop(b'large', None):
1413 lfhashes = set()
1413 lfhashes = set()
1414 if ui.debugflag:
1414 if ui.debugflag:
1415 toupload = {}
1415 toupload = {}
1416
1416
1417 def addfunc(fn, lfhash):
1417 def addfunc(fn, lfhash):
1418 if fn not in toupload:
1418 if fn not in toupload:
1419 toupload[fn] = []
1419 toupload[fn] = []
1420 toupload[fn].append(lfhash)
1420 toupload[fn].append(lfhash)
1421 lfhashes.add(lfhash)
1421 lfhashes.add(lfhash)
1422
1422
1423 def showhashes(fn):
1423 def showhashes(fn):
1424 for lfhash in sorted(toupload[fn]):
1424 for lfhash in sorted(toupload[fn]):
1425 ui.debug(b' %s\n' % lfhash)
1425 ui.debug(b' %s\n' % lfhash)
1426
1426
1427 else:
1427 else:
1428 toupload = set()
1428 toupload = set()
1429
1429
1430 def addfunc(fn, lfhash):
1430 def addfunc(fn, lfhash):
1431 toupload.add(fn)
1431 toupload.add(fn)
1432 lfhashes.add(lfhash)
1432 lfhashes.add(lfhash)
1433
1433
1434 def showhashes(fn):
1434 def showhashes(fn):
1435 pass
1435 pass
1436
1436
1437 _getoutgoings(repo, other, missing, addfunc)
1437 _getoutgoings(repo, other, missing, addfunc)
1438
1438
1439 if not toupload:
1439 if not toupload:
1440 ui.status(_(b'largefiles: no files to upload\n'))
1440 ui.status(_(b'largefiles: no files to upload\n'))
1441 else:
1441 else:
1442 ui.status(
1442 ui.status(
1443 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1443 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1444 )
1444 )
1445 for file in sorted(toupload):
1445 for file in sorted(toupload):
1446 ui.status(lfutil.splitstandin(file) + b'\n')
1446 ui.status(lfutil.splitstandin(file) + b'\n')
1447 showhashes(file)
1447 showhashes(file)
1448 ui.status(b'\n')
1448 ui.status(b'\n')
1449
1449
1450
1450
1451 @eh.wrapcommand(
1451 @eh.wrapcommand(
1452 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1452 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1453 )
1453 )
1454 def _outgoingcmd(orig, *args, **kwargs):
1454 def _outgoingcmd(orig, *args, **kwargs):
1455 # Nothing to do here other than add the extra help option- the hook above
1455 # Nothing to do here other than add the extra help option- the hook above
1456 # processes it.
1456 # processes it.
1457 return orig(*args, **kwargs)
1457 return orig(*args, **kwargs)
1458
1458
1459
1459
1460 def summaryremotehook(ui, repo, opts, changes):
1460 def summaryremotehook(ui, repo, opts, changes):
1461 largeopt = opts.get(b'large', False)
1461 largeopt = opts.get(b'large', False)
1462 if changes is None:
1462 if changes is None:
1463 if largeopt:
1463 if largeopt:
1464 return (False, True) # only outgoing check is needed
1464 return (False, True) # only outgoing check is needed
1465 else:
1465 else:
1466 return (False, False)
1466 return (False, False)
1467 elif largeopt:
1467 elif largeopt:
1468 url, branch, peer, outgoing = changes[1]
1468 url, branch, peer, outgoing = changes[1]
1469 if peer is None:
1469 if peer is None:
1470 # i18n: column positioning for "hg summary"
1470 # i18n: column positioning for "hg summary"
1471 ui.status(_(b'largefiles: (no remote repo)\n'))
1471 ui.status(_(b'largefiles: (no remote repo)\n'))
1472 return
1472 return
1473
1473
1474 toupload = set()
1474 toupload = set()
1475 lfhashes = set()
1475 lfhashes = set()
1476
1476
1477 def addfunc(fn, lfhash):
1477 def addfunc(fn, lfhash):
1478 toupload.add(fn)
1478 toupload.add(fn)
1479 lfhashes.add(lfhash)
1479 lfhashes.add(lfhash)
1480
1480
1481 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1481 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1482
1482
1483 if not toupload:
1483 if not toupload:
1484 # i18n: column positioning for "hg summary"
1484 # i18n: column positioning for "hg summary"
1485 ui.status(_(b'largefiles: (no files to upload)\n'))
1485 ui.status(_(b'largefiles: (no files to upload)\n'))
1486 else:
1486 else:
1487 # i18n: column positioning for "hg summary"
1487 # i18n: column positioning for "hg summary"
1488 ui.status(
1488 ui.status(
1489 _(b'largefiles: %d entities for %d files to upload\n')
1489 _(b'largefiles: %d entities for %d files to upload\n')
1490 % (len(lfhashes), len(toupload))
1490 % (len(lfhashes), len(toupload))
1491 )
1491 )
1492
1492
1493
1493
1494 @eh.wrapcommand(
1494 @eh.wrapcommand(
1495 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1495 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1496 )
1496 )
1497 def overridesummary(orig, ui, repo, *pats, **opts):
1497 def overridesummary(orig, ui, repo, *pats, **opts):
1498 with lfstatus(repo):
1498 with lfstatus(repo):
1499 orig(ui, repo, *pats, **opts)
1499 orig(ui, repo, *pats, **opts)
1500
1500
1501
1501
1502 @eh.wrapfunction(scmutil, b'addremove')
1502 @eh.wrapfunction(scmutil, b'addremove')
1503 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1503 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1504 if opts is None:
1504 if opts is None:
1505 opts = {}
1505 opts = {}
1506 if not lfutil.islfilesrepo(repo):
1506 if not lfutil.islfilesrepo(repo):
1507 return orig(repo, matcher, prefix, uipathfn, opts)
1507 return orig(repo, matcher, prefix, uipathfn, opts)
1508 # Get the list of missing largefiles so we can remove them
1508 # Get the list of missing largefiles so we can remove them
1509 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1509 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1510 unsure, s = lfdirstate.status(
1510 unsure, s = lfdirstate.status(
1511 matchmod.always(),
1511 matchmod.always(),
1512 subrepos=[],
1512 subrepos=[],
1513 ignored=False,
1513 ignored=False,
1514 clean=False,
1514 clean=False,
1515 unknown=False,
1515 unknown=False,
1516 )
1516 )
1517
1517
1518 # Call into the normal remove code, but the removing of the standin, we want
1518 # Call into the normal remove code, but the removing of the standin, we want
1519 # to have handled by original addremove. Monkey patching here makes sure
1519 # to have handled by original addremove. Monkey patching here makes sure
1520 # we don't remove the standin in the largefiles code, preventing a very
1520 # we don't remove the standin in the largefiles code, preventing a very
1521 # confused state later.
1521 # confused state later.
1522 if s.deleted:
1522 if s.deleted:
1523 m = copy.copy(matcher)
1523 m = copy.copy(matcher)
1524
1524
1525 # The m._files and m._map attributes are not changed to the deleted list
1525 # The m._files and m._map attributes are not changed to the deleted list
1526 # because that affects the m.exact() test, which in turn governs whether
1526 # because that affects the m.exact() test, which in turn governs whether
1527 # or not the file name is printed, and how. Simply limit the original
1527 # or not the file name is printed, and how. Simply limit the original
1528 # matches to those in the deleted status list.
1528 # matches to those in the deleted status list.
1529 matchfn = m.matchfn
1529 matchfn = m.matchfn
1530 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1530 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1531
1531
1532 removelargefiles(
1532 removelargefiles(
1533 repo.ui,
1533 repo.ui,
1534 repo,
1534 repo,
1535 True,
1535 True,
1536 m,
1536 m,
1537 uipathfn,
1537 uipathfn,
1538 opts.get(b'dry_run'),
1538 opts.get(b'dry_run'),
1539 **pycompat.strkwargs(opts)
1539 **pycompat.strkwargs(opts)
1540 )
1540 )
1541 # Call into the normal add code, and any files that *should* be added as
1541 # Call into the normal add code, and any files that *should* be added as
1542 # largefiles will be
1542 # largefiles will be
1543 added, bad = addlargefiles(
1543 added, bad = addlargefiles(
1544 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1544 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1545 )
1545 )
1546 # Now that we've handled largefiles, hand off to the original addremove
1546 # Now that we've handled largefiles, hand off to the original addremove
1547 # function to take care of the rest. Make sure it doesn't do anything with
1547 # function to take care of the rest. Make sure it doesn't do anything with
1548 # largefiles by passing a matcher that will ignore them.
1548 # largefiles by passing a matcher that will ignore them.
1549 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1549 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1550 return orig(repo, matcher, prefix, uipathfn, opts)
1550 return orig(repo, matcher, prefix, uipathfn, opts)
1551
1551
1552
1552
1553 # Calling purge with --all will cause the largefiles to be deleted.
1553 # Calling purge with --all will cause the largefiles to be deleted.
1554 # Override repo.status to prevent this from happening.
1554 # Override repo.status to prevent this from happening.
1555 @eh.wrapcommand(b'purge', extension=b'purge')
1555 @eh.wrapcommand(b'purge', extension=b'purge')
1556 def overridepurge(orig, ui, repo, *dirs, **opts):
1556 def overridepurge(orig, ui, repo, *dirs, **opts):
1557 # XXX Monkey patching a repoview will not work. The assigned attribute will
1557 # XXX Monkey patching a repoview will not work. The assigned attribute will
1558 # be set on the unfiltered repo, but we will only lookup attributes in the
1558 # be set on the unfiltered repo, but we will only lookup attributes in the
1559 # unfiltered repo if the lookup in the repoview object itself fails. As the
1559 # unfiltered repo if the lookup in the repoview object itself fails. As the
1560 # monkey patched method exists on the repoview class the lookup will not
1560 # monkey patched method exists on the repoview class the lookup will not
1561 # fail. As a result, the original version will shadow the monkey patched
1561 # fail. As a result, the original version will shadow the monkey patched
1562 # one, defeating the monkey patch.
1562 # one, defeating the monkey patch.
1563 #
1563 #
1564 # As a work around we use an unfiltered repo here. We should do something
1564 # As a work around we use an unfiltered repo here. We should do something
1565 # cleaner instead.
1565 # cleaner instead.
1566 repo = repo.unfiltered()
1566 repo = repo.unfiltered()
1567 oldstatus = repo.status
1567 oldstatus = repo.status
1568
1568
1569 def overridestatus(
1569 def overridestatus(
1570 node1=b'.',
1570 node1=b'.',
1571 node2=None,
1571 node2=None,
1572 match=None,
1572 match=None,
1573 ignored=False,
1573 ignored=False,
1574 clean=False,
1574 clean=False,
1575 unknown=False,
1575 unknown=False,
1576 listsubrepos=False,
1576 listsubrepos=False,
1577 ):
1577 ):
1578 r = oldstatus(
1578 r = oldstatus(
1579 node1, node2, match, ignored, clean, unknown, listsubrepos
1579 node1, node2, match, ignored, clean, unknown, listsubrepos
1580 )
1580 )
1581 lfdirstate = lfutil.openlfdirstate(ui, repo)
1581 lfdirstate = lfutil.openlfdirstate(ui, repo)
1582 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1582 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1583 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1583 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1584 return scmutil.status(
1584 return scmutil.status(
1585 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1585 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1586 )
1586 )
1587
1587
1588 repo.status = overridestatus
1588 repo.status = overridestatus
1589 orig(ui, repo, *dirs, **opts)
1589 orig(ui, repo, *dirs, **opts)
1590 repo.status = oldstatus
1590 repo.status = oldstatus
1591
1591
1592
1592
1593 @eh.wrapcommand(b'rollback')
1593 @eh.wrapcommand(b'rollback')
1594 def overriderollback(orig, ui, repo, **opts):
1594 def overriderollback(orig, ui, repo, **opts):
1595 with repo.wlock():
1595 with repo.wlock():
1596 before = repo.dirstate.parents()
1596 before = repo.dirstate.parents()
1597 orphans = {
1597 orphans = {
1598 f
1598 f
1599 for f in repo.dirstate
1599 for f in repo.dirstate
1600 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1600 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1601 }
1601 }
1602 result = orig(ui, repo, **opts)
1602 result = orig(ui, repo, **opts)
1603 after = repo.dirstate.parents()
1603 after = repo.dirstate.parents()
1604 if before == after:
1604 if before == after:
1605 return result # no need to restore standins
1605 return result # no need to restore standins
1606
1606
1607 pctx = repo[b'.']
1607 pctx = repo[b'.']
1608 for f in repo.dirstate:
1608 for f in repo.dirstate:
1609 if lfutil.isstandin(f):
1609 if lfutil.isstandin(f):
1610 orphans.discard(f)
1610 orphans.discard(f)
1611 if repo.dirstate[f] == b'r':
1611 if repo.dirstate[f] == b'r':
1612 repo.wvfs.unlinkpath(f, ignoremissing=True)
1612 repo.wvfs.unlinkpath(f, ignoremissing=True)
1613 elif f in pctx:
1613 elif f in pctx:
1614 fctx = pctx[f]
1614 fctx = pctx[f]
1615 repo.wwrite(f, fctx.data(), fctx.flags())
1615 repo.wwrite(f, fctx.data(), fctx.flags())
1616 else:
1616 else:
1617 # content of standin is not so important in 'a',
1617 # content of standin is not so important in 'a',
1618 # 'm' or 'n' (coming from the 2nd parent) cases
1618 # 'm' or 'n' (coming from the 2nd parent) cases
1619 lfutil.writestandin(repo, f, b'', False)
1619 lfutil.writestandin(repo, f, b'', False)
1620 for standin in orphans:
1620 for standin in orphans:
1621 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1621 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1622
1622
1623 lfdirstate = lfutil.openlfdirstate(ui, repo)
1623 lfdirstate = lfutil.openlfdirstate(ui, repo)
1624 orphans = set(lfdirstate)
1624 orphans = set(lfdirstate)
1625 lfiles = lfutil.listlfiles(repo)
1625 lfiles = lfutil.listlfiles(repo)
1626 for file in lfiles:
1626 for file in lfiles:
1627 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1627 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1628 orphans.discard(file)
1628 orphans.discard(file)
1629 for lfile in orphans:
1629 for lfile in orphans:
1630 lfdirstate.drop(lfile)
1630 lfdirstate.drop(lfile)
1631 lfdirstate.write()
1631 lfdirstate.write()
1632 return result
1632 return result
1633
1633
1634
1634
1635 @eh.wrapcommand(b'transplant', extension=b'transplant')
1635 @eh.wrapcommand(b'transplant', extension=b'transplant')
1636 def overridetransplant(orig, ui, repo, *revs, **opts):
1636 def overridetransplant(orig, ui, repo, *revs, **opts):
1637 resuming = opts.get('continue')
1637 resuming = opts.get('continue')
1638 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1638 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1639 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1639 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1640 try:
1640 try:
1641 result = orig(ui, repo, *revs, **opts)
1641 result = orig(ui, repo, *revs, **opts)
1642 finally:
1642 finally:
1643 repo._lfstatuswriters.pop()
1643 repo._lfstatuswriters.pop()
1644 repo._lfcommithooks.pop()
1644 repo._lfcommithooks.pop()
1645 return result
1645 return result
1646
1646
1647
1647
1648 @eh.wrapcommand(b'cat')
1648 @eh.wrapcommand(b'cat')
1649 def overridecat(orig, ui, repo, file1, *pats, **opts):
1649 def overridecat(orig, ui, repo, file1, *pats, **opts):
1650 opts = pycompat.byteskwargs(opts)
1650 opts = pycompat.byteskwargs(opts)
1651 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1651 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1652 err = 1
1652 err = 1
1653 notbad = set()
1653 notbad = set()
1654 m = scmutil.match(ctx, (file1,) + pats, opts)
1654 m = scmutil.match(ctx, (file1,) + pats, opts)
1655 origmatchfn = m.matchfn
1655 origmatchfn = m.matchfn
1656
1656
1657 def lfmatchfn(f):
1657 def lfmatchfn(f):
1658 if origmatchfn(f):
1658 if origmatchfn(f):
1659 return True
1659 return True
1660 lf = lfutil.splitstandin(f)
1660 lf = lfutil.splitstandin(f)
1661 if lf is None:
1661 if lf is None:
1662 return False
1662 return False
1663 notbad.add(lf)
1663 notbad.add(lf)
1664 return origmatchfn(lf)
1664 return origmatchfn(lf)
1665
1665
1666 m.matchfn = lfmatchfn
1666 m.matchfn = lfmatchfn
1667 origbadfn = m.bad
1667 origbadfn = m.bad
1668
1668
1669 def lfbadfn(f, msg):
1669 def lfbadfn(f, msg):
1670 if not f in notbad:
1670 if not f in notbad:
1671 origbadfn(f, msg)
1671 origbadfn(f, msg)
1672
1672
1673 m.bad = lfbadfn
1673 m.bad = lfbadfn
1674
1674
1675 origvisitdirfn = m.visitdir
1675 origvisitdirfn = m.visitdir
1676
1676
1677 def lfvisitdirfn(dir):
1677 def lfvisitdirfn(dir):
1678 if dir == lfutil.shortname:
1678 if dir == lfutil.shortname:
1679 return True
1679 return True
1680 ret = origvisitdirfn(dir)
1680 ret = origvisitdirfn(dir)
1681 if ret:
1681 if ret:
1682 return ret
1682 return ret
1683 lf = lfutil.splitstandin(dir)
1683 lf = lfutil.splitstandin(dir)
1684 if lf is None:
1684 if lf is None:
1685 return False
1685 return False
1686 return origvisitdirfn(lf)
1686 return origvisitdirfn(lf)
1687
1687
1688 m.visitdir = lfvisitdirfn
1688 m.visitdir = lfvisitdirfn
1689
1689
1690 for f in ctx.walk(m):
1690 for f in ctx.walk(m):
1691 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1691 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1692 lf = lfutil.splitstandin(f)
1692 lf = lfutil.splitstandin(f)
1693 if lf is None or origmatchfn(f):
1693 if lf is None or origmatchfn(f):
1694 # duplicating unreachable code from commands.cat
1694 # duplicating unreachable code from commands.cat
1695 data = ctx[f].data()
1695 data = ctx[f].data()
1696 if opts.get(b'decode'):
1696 if opts.get(b'decode'):
1697 data = repo.wwritedata(f, data)
1697 data = repo.wwritedata(f, data)
1698 fp.write(data)
1698 fp.write(data)
1699 else:
1699 else:
1700 hash = lfutil.readasstandin(ctx[f])
1700 hash = lfutil.readasstandin(ctx[f])
1701 if not lfutil.inusercache(repo.ui, hash):
1701 if not lfutil.inusercache(repo.ui, hash):
1702 store = storefactory.openstore(repo)
1702 store = storefactory.openstore(repo)
1703 success, missing = store.get([(lf, hash)])
1703 success, missing = store.get([(lf, hash)])
1704 if len(success) != 1:
1704 if len(success) != 1:
1705 raise error.Abort(
1705 raise error.Abort(
1706 _(
1706 _(
1707 b'largefile %s is not in cache and could not be '
1707 b'largefile %s is not in cache and could not be '
1708 b'downloaded'
1708 b'downloaded'
1709 )
1709 )
1710 % lf
1710 % lf
1711 )
1711 )
1712 path = lfutil.usercachepath(repo.ui, hash)
1712 path = lfutil.usercachepath(repo.ui, hash)
1713 with open(path, b"rb") as fpin:
1713 with open(path, b"rb") as fpin:
1714 for chunk in util.filechunkiter(fpin):
1714 for chunk in util.filechunkiter(fpin):
1715 fp.write(chunk)
1715 fp.write(chunk)
1716 err = 0
1716 err = 0
1717 return err
1717 return err
1718
1718
1719
1719
1720 @eh.wrapfunction(merge, b'_update')
1720 @eh.wrapfunction(merge, b'_update')
1721 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1721 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1722 matcher = kwargs.get('matcher', None)
1722 matcher = kwargs.get('matcher', None)
1723 # note if this is a partial update
1723 # note if this is a partial update
1724 partial = matcher and not matcher.always()
1724 partial = matcher and not matcher.always()
1725 with repo.wlock():
1725 with repo.wlock():
1726 # branch | | |
1726 # branch | | |
1727 # merge | force | partial | action
1727 # merge | force | partial | action
1728 # -------+-------+---------+--------------
1728 # -------+-------+---------+--------------
1729 # x | x | x | linear-merge
1729 # x | x | x | linear-merge
1730 # o | x | x | branch-merge
1730 # o | x | x | branch-merge
1731 # x | o | x | overwrite (as clean update)
1731 # x | o | x | overwrite (as clean update)
1732 # o | o | x | force-branch-merge (*1)
1732 # o | o | x | force-branch-merge (*1)
1733 # x | x | o | (*)
1733 # x | x | o | (*)
1734 # o | x | o | (*)
1734 # o | x | o | (*)
1735 # x | o | o | overwrite (as revert)
1735 # x | o | o | overwrite (as revert)
1736 # o | o | o | (*)
1736 # o | o | o | (*)
1737 #
1737 #
1738 # (*) don't care
1738 # (*) don't care
1739 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1739 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1740
1740
1741 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1741 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1742 unsure, s = lfdirstate.status(
1742 unsure, s = lfdirstate.status(
1743 matchmod.always(),
1743 matchmod.always(),
1744 subrepos=[],
1744 subrepos=[],
1745 ignored=False,
1745 ignored=False,
1746 clean=True,
1746 clean=True,
1747 unknown=False,
1747 unknown=False,
1748 )
1748 )
1749 oldclean = set(s.clean)
1749 oldclean = set(s.clean)
1750 pctx = repo[b'.']
1750 pctx = repo[b'.']
1751 dctx = repo[node]
1751 dctx = repo[node]
1752 for lfile in unsure + s.modified:
1752 for lfile in unsure + s.modified:
1753 lfileabs = repo.wvfs.join(lfile)
1753 lfileabs = repo.wvfs.join(lfile)
1754 if not repo.wvfs.exists(lfileabs):
1754 if not repo.wvfs.exists(lfileabs):
1755 continue
1755 continue
1756 lfhash = lfutil.hashfile(lfileabs)
1756 lfhash = lfutil.hashfile(lfileabs)
1757 standin = lfutil.standin(lfile)
1757 standin = lfutil.standin(lfile)
1758 lfutil.writestandin(
1758 lfutil.writestandin(
1759 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1759 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1760 )
1760 )
1761 if standin in pctx and lfhash == lfutil.readasstandin(
1761 if standin in pctx and lfhash == lfutil.readasstandin(
1762 pctx[standin]
1762 pctx[standin]
1763 ):
1763 ):
1764 oldclean.add(lfile)
1764 oldclean.add(lfile)
1765 for lfile in s.added:
1765 for lfile in s.added:
1766 fstandin = lfutil.standin(lfile)
1766 fstandin = lfutil.standin(lfile)
1767 if fstandin not in dctx:
1767 if fstandin not in dctx:
1768 # in this case, content of standin file is meaningless
1768 # in this case, content of standin file is meaningless
1769 # (in dctx, lfile is unknown, or normal file)
1769 # (in dctx, lfile is unknown, or normal file)
1770 continue
1770 continue
1771 lfutil.updatestandin(repo, lfile, fstandin)
1771 lfutil.updatestandin(repo, lfile, fstandin)
1772 # mark all clean largefiles as dirty, just in case the update gets
1772 # mark all clean largefiles as dirty, just in case the update gets
1773 # interrupted before largefiles and lfdirstate are synchronized
1773 # interrupted before largefiles and lfdirstate are synchronized
1774 for lfile in oldclean:
1774 for lfile in oldclean:
1775 lfdirstate.normallookup(lfile)
1775 lfdirstate.normallookup(lfile)
1776 lfdirstate.write()
1776 lfdirstate.write()
1777
1777
1778 oldstandins = lfutil.getstandinsstate(repo)
1778 oldstandins = lfutil.getstandinsstate(repo)
1779 wc = kwargs.get('wc')
1779 wc = kwargs.get('wc')
1780 if wc and wc.isinmemory():
1780 if wc and wc.isinmemory():
1781 # largefiles is not a good candidate for in-memory merge (large
1781 # largefiles is not a good candidate for in-memory merge (large
1782 # files, custom dirstate, matcher usage).
1782 # files, custom dirstate, matcher usage).
1783 raise error.ProgrammingError(
1783 raise error.ProgrammingError(
1784 b'largefiles is not compatible with in-memory merge'
1784 b'largefiles is not compatible with in-memory merge'
1785 )
1785 )
1786 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1786 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1787
1787
1788 newstandins = lfutil.getstandinsstate(repo)
1788 newstandins = lfutil.getstandinsstate(repo)
1789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1790
1790
1791 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1791 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1792 # all the ones that didn't change as clean
1792 # all the ones that didn't change as clean
1793 for lfile in oldclean.difference(filelist):
1793 for lfile in oldclean.difference(filelist):
1794 lfdirstate.normal(lfile)
1794 lfdirstate.normal(lfile)
1795 lfdirstate.write()
1795 lfdirstate.write()
1796
1796
1797 if branchmerge or force or partial:
1797 if branchmerge or force or partial:
1798 filelist.extend(s.deleted + s.removed)
1798 filelist.extend(s.deleted + s.removed)
1799
1799
1800 lfcommands.updatelfiles(
1800 lfcommands.updatelfiles(
1801 repo.ui, repo, filelist=filelist, normallookup=partial
1801 repo.ui, repo, filelist=filelist, normallookup=partial
1802 )
1802 )
1803
1803
1804 return result
1804 return result
1805
1805
1806
1806
1807 @eh.wrapfunction(scmutil, b'marktouched')
1807 @eh.wrapfunction(scmutil, b'marktouched')
1808 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1808 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1809 result = orig(repo, files, *args, **kwargs)
1809 result = orig(repo, files, *args, **kwargs)
1810
1810
1811 filelist = []
1811 filelist = []
1812 for f in files:
1812 for f in files:
1813 lf = lfutil.splitstandin(f)
1813 lf = lfutil.splitstandin(f)
1814 if lf is not None:
1814 if lf is not None:
1815 filelist.append(lf)
1815 filelist.append(lf)
1816 if filelist:
1816 if filelist:
1817 lfcommands.updatelfiles(
1817 lfcommands.updatelfiles(
1818 repo.ui,
1818 repo.ui,
1819 repo,
1819 repo,
1820 filelist=filelist,
1820 filelist=filelist,
1821 printmessage=False,
1821 printmessage=False,
1822 normallookup=True,
1822 normallookup=True,
1823 )
1823 )
1824
1824
1825 return result
1825 return result
1826
1826
1827
1827
1828 @eh.wrapfunction(upgrade, b'preservedrequirements')
1828 @eh.wrapfunction(upgrade, b'preservedrequirements')
1829 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1829 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1830 def upgraderequirements(orig, repo):
1830 def upgraderequirements(orig, repo):
1831 reqs = orig(repo)
1831 reqs = orig(repo)
1832 if b'largefiles' in repo.requirements:
1832 if b'largefiles' in repo.requirements:
1833 reqs.add(b'largefiles')
1833 reqs.add(b'largefiles')
1834 return reqs
1834 return reqs
1835
1835
1836
1836
1837 _lfscheme = b'largefile://'
1837 _lfscheme = b'largefile://'
1838
1838
1839
1839
1840 @eh.wrapfunction(urlmod, b'open')
1840 @eh.wrapfunction(urlmod, b'open')
1841 def openlargefile(orig, ui, url_, data=None):
1841 def openlargefile(orig, ui, url_, data=None):
1842 if url_.startswith(_lfscheme):
1842 if url_.startswith(_lfscheme):
1843 if data:
1843 if data:
1844 msg = b"cannot use data on a 'largefile://' url"
1844 msg = b"cannot use data on a 'largefile://' url"
1845 raise error.ProgrammingError(msg)
1845 raise error.ProgrammingError(msg)
1846 lfid = url_[len(_lfscheme) :]
1846 lfid = url_[len(_lfscheme) :]
1847 return storefactory.getlfile(ui, lfid)
1847 return storefactory.getlfile(ui, lfid)
1848 else:
1848 else:
1849 return orig(ui, url_, data=data)
1849 return orig(ui, url_, data=data)
@@ -1,150 +1,149
1 # narrowwirepeer.py - passes narrow spec with unbundle command
1 # narrowwirepeer.py - passes narrow spec with unbundle command
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 bundle2,
11 bundle2,
12 error,
12 error,
13 extensions,
13 extensions,
14 hg,
14 hg,
15 narrowspec,
15 narrowspec,
16 pycompat,
17 wireprototypes,
16 wireprototypes,
18 wireprotov1peer,
17 wireprotov1peer,
19 wireprotov1server,
18 wireprotov1server,
20 )
19 )
21
20
22 from . import narrowbundle2
21 from . import narrowbundle2
23
22
24
23
25 def uisetup():
24 def uisetup():
26 wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
25 wireprotov1peer.wirepeer.narrow_widen = peernarrowwiden
27
26
28
27
29 def reposetup(repo):
28 def reposetup(repo):
30 def wirereposetup(ui, peer):
29 def wirereposetup(ui, peer):
31 def wrapped(orig, cmd, *args, **kwargs):
30 def wrapped(orig, cmd, *args, **kwargs):
32 if cmd == b'unbundle':
31 if cmd == b'unbundle':
33 # TODO: don't blindly add include/exclude wireproto
32 # TODO: don't blindly add include/exclude wireproto
34 # arguments to unbundle.
33 # arguments to unbundle.
35 include, exclude = repo.narrowpats
34 include, exclude = repo.narrowpats
36 kwargs["includepats"] = b','.join(include)
35 kwargs["includepats"] = b','.join(include)
37 kwargs["excludepats"] = b','.join(exclude)
36 kwargs["excludepats"] = b','.join(exclude)
38 return orig(cmd, *args, **kwargs)
37 return orig(cmd, *args, **kwargs)
39
38
40 extensions.wrapfunction(peer, b'_calltwowaystream', wrapped)
39 extensions.wrapfunction(peer, b'_calltwowaystream', wrapped)
41
40
42 hg.wirepeersetupfuncs.append(wirereposetup)
41 hg.wirepeersetupfuncs.append(wirereposetup)
43
42
44
43
45 @wireprotov1server.wireprotocommand(
44 @wireprotov1server.wireprotocommand(
46 b'narrow_widen',
45 b'narrow_widen',
47 b'oldincludes oldexcludes'
46 b'oldincludes oldexcludes'
48 b' newincludes newexcludes'
47 b' newincludes newexcludes'
49 b' commonheads cgversion'
48 b' commonheads cgversion'
50 b' known ellipses',
49 b' known ellipses',
51 permission=b'pull',
50 permission=b'pull',
52 )
51 )
53 def narrow_widen(
52 def narrow_widen(
54 repo,
53 repo,
55 proto,
54 proto,
56 oldincludes,
55 oldincludes,
57 oldexcludes,
56 oldexcludes,
58 newincludes,
57 newincludes,
59 newexcludes,
58 newexcludes,
60 commonheads,
59 commonheads,
61 cgversion,
60 cgversion,
62 known,
61 known,
63 ellipses,
62 ellipses,
64 ):
63 ):
65 """wireprotocol command to send data when a narrow clone is widen. We will
64 """wireprotocol command to send data when a narrow clone is widen. We will
66 be sending a changegroup here.
65 be sending a changegroup here.
67
66
68 The current set of arguments which are required:
67 The current set of arguments which are required:
69 oldincludes: the old includes of the narrow copy
68 oldincludes: the old includes of the narrow copy
70 oldexcludes: the old excludes of the narrow copy
69 oldexcludes: the old excludes of the narrow copy
71 newincludes: the new includes of the narrow copy
70 newincludes: the new includes of the narrow copy
72 newexcludes: the new excludes of the narrow copy
71 newexcludes: the new excludes of the narrow copy
73 commonheads: list of heads which are common between the server and client
72 commonheads: list of heads which are common between the server and client
74 cgversion(maybe): the changegroup version to produce
73 cgversion(maybe): the changegroup version to produce
75 known: list of nodes which are known on the client (used in ellipses cases)
74 known: list of nodes which are known on the client (used in ellipses cases)
76 ellipses: whether to send ellipses data or not
75 ellipses: whether to send ellipses data or not
77 """
76 """
78
77
79 preferuncompressed = False
78 preferuncompressed = False
80 try:
79 try:
81
80
82 def splitpaths(data):
81 def splitpaths(data):
83 # work around ''.split(',') => ['']
82 # work around ''.split(',') => ['']
84 return data.split(b',') if data else []
83 return data.split(b',') if data else []
85
84
86 oldincludes = splitpaths(oldincludes)
85 oldincludes = splitpaths(oldincludes)
87 newincludes = splitpaths(newincludes)
86 newincludes = splitpaths(newincludes)
88 oldexcludes = splitpaths(oldexcludes)
87 oldexcludes = splitpaths(oldexcludes)
89 newexcludes = splitpaths(newexcludes)
88 newexcludes = splitpaths(newexcludes)
90 # validate the patterns
89 # validate the patterns
91 narrowspec.validatepatterns(set(oldincludes))
90 narrowspec.validatepatterns(set(oldincludes))
92 narrowspec.validatepatterns(set(newincludes))
91 narrowspec.validatepatterns(set(newincludes))
93 narrowspec.validatepatterns(set(oldexcludes))
92 narrowspec.validatepatterns(set(oldexcludes))
94 narrowspec.validatepatterns(set(newexcludes))
93 narrowspec.validatepatterns(set(newexcludes))
95
94
96 common = wireprototypes.decodelist(commonheads)
95 common = wireprototypes.decodelist(commonheads)
97 known = wireprototypes.decodelist(known)
96 known = wireprototypes.decodelist(known)
98 if ellipses == b'0':
97 if ellipses == b'0':
99 ellipses = False
98 ellipses = False
100 else:
99 else:
101 ellipses = bool(ellipses)
100 ellipses = bool(ellipses)
102 cgversion = cgversion
101 cgversion = cgversion
103
102
104 bundler = bundle2.bundle20(repo.ui)
103 bundler = bundle2.bundle20(repo.ui)
105 newmatch = narrowspec.match(
104 newmatch = narrowspec.match(
106 repo.root, include=newincludes, exclude=newexcludes
105 repo.root, include=newincludes, exclude=newexcludes
107 )
106 )
108 oldmatch = narrowspec.match(
107 oldmatch = narrowspec.match(
109 repo.root, include=oldincludes, exclude=oldexcludes
108 repo.root, include=oldincludes, exclude=oldexcludes
110 )
109 )
111 if not ellipses:
110 if not ellipses:
112 bundle2.widen_bundle(
111 bundle2.widen_bundle(
113 bundler,
112 bundler,
114 repo,
113 repo,
115 oldmatch,
114 oldmatch,
116 newmatch,
115 newmatch,
117 common,
116 common,
118 known,
117 known,
119 cgversion,
118 cgversion,
120 ellipses,
119 ellipses,
121 )
120 )
122 else:
121 else:
123 narrowbundle2.generate_ellipses_bundle2_for_widening(
122 narrowbundle2.generate_ellipses_bundle2_for_widening(
124 bundler, repo, oldmatch, newmatch, cgversion, common, known,
123 bundler, repo, oldmatch, newmatch, cgversion, common, known,
125 )
124 )
126 except error.Abort as exc:
125 except error.Abort as exc:
127 bundler = bundle2.bundle20(repo.ui)
126 bundler = bundle2.bundle20(repo.ui)
128 manargs = [(b'message', pycompat.bytestr(exc))]
127 manargs = [(b'message', exc.message)]
129 advargs = []
128 advargs = []
130 if exc.hint is not None:
129 if exc.hint is not None:
131 advargs.append((b'hint', exc.hint))
130 advargs.append((b'hint', exc.hint))
132 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
131 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
133 preferuncompressed = True
132 preferuncompressed = True
134
133
135 chunks = bundler.getchunks()
134 chunks = bundler.getchunks()
136 return wireprototypes.streamres(
135 return wireprototypes.streamres(
137 gen=chunks, prefer_uncompressed=preferuncompressed
136 gen=chunks, prefer_uncompressed=preferuncompressed
138 )
137 )
139
138
140
139
141 def peernarrowwiden(remote, **kwargs):
140 def peernarrowwiden(remote, **kwargs):
142 for ch in ('commonheads', 'known'):
141 for ch in ('commonheads', 'known'):
143 kwargs[ch] = wireprototypes.encodelist(kwargs[ch])
142 kwargs[ch] = wireprototypes.encodelist(kwargs[ch])
144
143
145 for ch in ('oldincludes', 'newincludes', 'oldexcludes', 'newexcludes'):
144 for ch in ('oldincludes', 'newincludes', 'oldexcludes', 'newexcludes'):
146 kwargs[ch] = b','.join(kwargs[ch])
145 kwargs[ch] = b','.join(kwargs[ch])
147
146
148 kwargs['ellipses'] = b'%i' % bool(kwargs['ellipses'])
147 kwargs['ellipses'] = b'%i' % bool(kwargs['ellipses'])
149 f = remote._callcompressable(b'narrow_widen', **kwargs)
148 f = remote._callcompressable(b'narrow_widen', **kwargs)
150 return bundle2.getunbundler(remote.ui, f)
149 return bundle2.getunbundler(remote.ui, f)
@@ -1,2582 +1,2582
1 # bundle2.py - generic container format to transmit arbitrary data.
1 # bundle2.py - generic container format to transmit arbitrary data.
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Handling of the new bundle2 format
7 """Handling of the new bundle2 format
8
8
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
9 The goal of bundle2 is to act as an atomically packet to transmit a set of
10 payloads in an application agnostic way. It consist in a sequence of "parts"
10 payloads in an application agnostic way. It consist in a sequence of "parts"
11 that will be handed to and processed by the application layer.
11 that will be handed to and processed by the application layer.
12
12
13
13
14 General format architecture
14 General format architecture
15 ===========================
15 ===========================
16
16
17 The format is architectured as follow
17 The format is architectured as follow
18
18
19 - magic string
19 - magic string
20 - stream level parameters
20 - stream level parameters
21 - payload parts (any number)
21 - payload parts (any number)
22 - end of stream marker.
22 - end of stream marker.
23
23
24 the Binary format
24 the Binary format
25 ============================
25 ============================
26
26
27 All numbers are unsigned and big-endian.
27 All numbers are unsigned and big-endian.
28
28
29 stream level parameters
29 stream level parameters
30 ------------------------
30 ------------------------
31
31
32 Binary format is as follow
32 Binary format is as follow
33
33
34 :params size: int32
34 :params size: int32
35
35
36 The total number of Bytes used by the parameters
36 The total number of Bytes used by the parameters
37
37
38 :params value: arbitrary number of Bytes
38 :params value: arbitrary number of Bytes
39
39
40 A blob of `params size` containing the serialized version of all stream level
40 A blob of `params size` containing the serialized version of all stream level
41 parameters.
41 parameters.
42
42
43 The blob contains a space separated list of parameters. Parameters with value
43 The blob contains a space separated list of parameters. Parameters with value
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
44 are stored in the form `<name>=<value>`. Both name and value are urlquoted.
45
45
46 Empty name are obviously forbidden.
46 Empty name are obviously forbidden.
47
47
48 Name MUST start with a letter. If this first letter is lower case, the
48 Name MUST start with a letter. If this first letter is lower case, the
49 parameter is advisory and can be safely ignored. However when the first
49 parameter is advisory and can be safely ignored. However when the first
50 letter is capital, the parameter is mandatory and the bundling process MUST
50 letter is capital, the parameter is mandatory and the bundling process MUST
51 stop if he is not able to proceed it.
51 stop if he is not able to proceed it.
52
52
53 Stream parameters use a simple textual format for two main reasons:
53 Stream parameters use a simple textual format for two main reasons:
54
54
55 - Stream level parameters should remain simple and we want to discourage any
55 - Stream level parameters should remain simple and we want to discourage any
56 crazy usage.
56 crazy usage.
57 - Textual data allow easy human inspection of a bundle2 header in case of
57 - Textual data allow easy human inspection of a bundle2 header in case of
58 troubles.
58 troubles.
59
59
60 Any Applicative level options MUST go into a bundle2 part instead.
60 Any Applicative level options MUST go into a bundle2 part instead.
61
61
62 Payload part
62 Payload part
63 ------------------------
63 ------------------------
64
64
65 Binary format is as follow
65 Binary format is as follow
66
66
67 :header size: int32
67 :header size: int32
68
68
69 The total number of Bytes used by the part header. When the header is empty
69 The total number of Bytes used by the part header. When the header is empty
70 (size = 0) this is interpreted as the end of stream marker.
70 (size = 0) this is interpreted as the end of stream marker.
71
71
72 :header:
72 :header:
73
73
74 The header defines how to interpret the part. It contains two piece of
74 The header defines how to interpret the part. It contains two piece of
75 data: the part type, and the part parameters.
75 data: the part type, and the part parameters.
76
76
77 The part type is used to route an application level handler, that can
77 The part type is used to route an application level handler, that can
78 interpret payload.
78 interpret payload.
79
79
80 Part parameters are passed to the application level handler. They are
80 Part parameters are passed to the application level handler. They are
81 meant to convey information that will help the application level object to
81 meant to convey information that will help the application level object to
82 interpret the part payload.
82 interpret the part payload.
83
83
84 The binary format of the header is has follow
84 The binary format of the header is has follow
85
85
86 :typesize: (one byte)
86 :typesize: (one byte)
87
87
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
88 :parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
89
89
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
90 :partid: A 32bits integer (unique in the bundle) that can be used to refer
91 to this part.
91 to this part.
92
92
93 :parameters:
93 :parameters:
94
94
95 Part's parameter may have arbitrary content, the binary structure is::
95 Part's parameter may have arbitrary content, the binary structure is::
96
96
97 <mandatory-count><advisory-count><param-sizes><param-data>
97 <mandatory-count><advisory-count><param-sizes><param-data>
98
98
99 :mandatory-count: 1 byte, number of mandatory parameters
99 :mandatory-count: 1 byte, number of mandatory parameters
100
100
101 :advisory-count: 1 byte, number of advisory parameters
101 :advisory-count: 1 byte, number of advisory parameters
102
102
103 :param-sizes:
103 :param-sizes:
104
104
105 N couple of bytes, where N is the total number of parameters. Each
105 N couple of bytes, where N is the total number of parameters. Each
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
106 couple contains (<size-of-key>, <size-of-value) for one parameter.
107
107
108 :param-data:
108 :param-data:
109
109
110 A blob of bytes from which each parameter key and value can be
110 A blob of bytes from which each parameter key and value can be
111 retrieved using the list of size couples stored in the previous
111 retrieved using the list of size couples stored in the previous
112 field.
112 field.
113
113
114 Mandatory parameters comes first, then the advisory ones.
114 Mandatory parameters comes first, then the advisory ones.
115
115
116 Each parameter's key MUST be unique within the part.
116 Each parameter's key MUST be unique within the part.
117
117
118 :payload:
118 :payload:
119
119
120 payload is a series of `<chunksize><chunkdata>`.
120 payload is a series of `<chunksize><chunkdata>`.
121
121
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
122 `chunksize` is an int32, `chunkdata` are plain bytes (as much as
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
123 `chunksize` says)` The payload part is concluded by a zero size chunk.
124
124
125 The current implementation always produces either zero or one chunk.
125 The current implementation always produces either zero or one chunk.
126 This is an implementation limitation that will ultimately be lifted.
126 This is an implementation limitation that will ultimately be lifted.
127
127
128 `chunksize` can be negative to trigger special case processing. No such
128 `chunksize` can be negative to trigger special case processing. No such
129 processing is in place yet.
129 processing is in place yet.
130
130
131 Bundle processing
131 Bundle processing
132 ============================
132 ============================
133
133
134 Each part is processed in order using a "part handler". Handler are registered
134 Each part is processed in order using a "part handler". Handler are registered
135 for a certain part type.
135 for a certain part type.
136
136
137 The matching of a part to its handler is case insensitive. The case of the
137 The matching of a part to its handler is case insensitive. The case of the
138 part type is used to know if a part is mandatory or advisory. If the Part type
138 part type is used to know if a part is mandatory or advisory. If the Part type
139 contains any uppercase char it is considered mandatory. When no handler is
139 contains any uppercase char it is considered mandatory. When no handler is
140 known for a Mandatory part, the process is aborted and an exception is raised.
140 known for a Mandatory part, the process is aborted and an exception is raised.
141 If the part is advisory and no handler is known, the part is ignored. When the
141 If the part is advisory and no handler is known, the part is ignored. When the
142 process is aborted, the full bundle is still read from the stream to keep the
142 process is aborted, the full bundle is still read from the stream to keep the
143 channel usable. But none of the part read from an abort are processed. In the
143 channel usable. But none of the part read from an abort are processed. In the
144 future, dropping the stream may become an option for channel we do not care to
144 future, dropping the stream may become an option for channel we do not care to
145 preserve.
145 preserve.
146 """
146 """
147
147
148 from __future__ import absolute_import, division
148 from __future__ import absolute_import, division
149
149
150 import collections
150 import collections
151 import errno
151 import errno
152 import os
152 import os
153 import re
153 import re
154 import string
154 import string
155 import struct
155 import struct
156 import sys
156 import sys
157
157
158 from .i18n import _
158 from .i18n import _
159 from . import (
159 from . import (
160 bookmarks,
160 bookmarks,
161 changegroup,
161 changegroup,
162 encoding,
162 encoding,
163 error,
163 error,
164 node as nodemod,
164 node as nodemod,
165 obsolete,
165 obsolete,
166 phases,
166 phases,
167 pushkey,
167 pushkey,
168 pycompat,
168 pycompat,
169 requirements,
169 requirements,
170 scmutil,
170 scmutil,
171 streamclone,
171 streamclone,
172 tags,
172 tags,
173 url,
173 url,
174 util,
174 util,
175 )
175 )
176 from .utils import stringutil
176 from .utils import stringutil
177
177
178 urlerr = util.urlerr
178 urlerr = util.urlerr
179 urlreq = util.urlreq
179 urlreq = util.urlreq
180
180
181 _pack = struct.pack
181 _pack = struct.pack
182 _unpack = struct.unpack
182 _unpack = struct.unpack
183
183
184 _fstreamparamsize = b'>i'
184 _fstreamparamsize = b'>i'
185 _fpartheadersize = b'>i'
185 _fpartheadersize = b'>i'
186 _fparttypesize = b'>B'
186 _fparttypesize = b'>B'
187 _fpartid = b'>I'
187 _fpartid = b'>I'
188 _fpayloadsize = b'>i'
188 _fpayloadsize = b'>i'
189 _fpartparamcount = b'>BB'
189 _fpartparamcount = b'>BB'
190
190
191 preferedchunksize = 32768
191 preferedchunksize = 32768
192
192
193 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
193 _parttypeforbidden = re.compile(b'[^a-zA-Z0-9_:-]')
194
194
195
195
196 def outdebug(ui, message):
196 def outdebug(ui, message):
197 """debug regarding output stream (bundling)"""
197 """debug regarding output stream (bundling)"""
198 if ui.configbool(b'devel', b'bundle2.debug'):
198 if ui.configbool(b'devel', b'bundle2.debug'):
199 ui.debug(b'bundle2-output: %s\n' % message)
199 ui.debug(b'bundle2-output: %s\n' % message)
200
200
201
201
202 def indebug(ui, message):
202 def indebug(ui, message):
203 """debug on input stream (unbundling)"""
203 """debug on input stream (unbundling)"""
204 if ui.configbool(b'devel', b'bundle2.debug'):
204 if ui.configbool(b'devel', b'bundle2.debug'):
205 ui.debug(b'bundle2-input: %s\n' % message)
205 ui.debug(b'bundle2-input: %s\n' % message)
206
206
207
207
208 def validateparttype(parttype):
208 def validateparttype(parttype):
209 """raise ValueError if a parttype contains invalid character"""
209 """raise ValueError if a parttype contains invalid character"""
210 if _parttypeforbidden.search(parttype):
210 if _parttypeforbidden.search(parttype):
211 raise ValueError(parttype)
211 raise ValueError(parttype)
212
212
213
213
214 def _makefpartparamsizes(nbparams):
214 def _makefpartparamsizes(nbparams):
215 """return a struct format to read part parameter sizes
215 """return a struct format to read part parameter sizes
216
216
217 The number parameters is variable so we need to build that format
217 The number parameters is variable so we need to build that format
218 dynamically.
218 dynamically.
219 """
219 """
220 return b'>' + (b'BB' * nbparams)
220 return b'>' + (b'BB' * nbparams)
221
221
222
222
223 parthandlermapping = {}
223 parthandlermapping = {}
224
224
225
225
226 def parthandler(parttype, params=()):
226 def parthandler(parttype, params=()):
227 """decorator that register a function as a bundle2 part handler
227 """decorator that register a function as a bundle2 part handler
228
228
229 eg::
229 eg::
230
230
231 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
231 @parthandler('myparttype', ('mandatory', 'param', 'handled'))
232 def myparttypehandler(...):
232 def myparttypehandler(...):
233 '''process a part of type "my part".'''
233 '''process a part of type "my part".'''
234 ...
234 ...
235 """
235 """
236 validateparttype(parttype)
236 validateparttype(parttype)
237
237
238 def _decorator(func):
238 def _decorator(func):
239 lparttype = parttype.lower() # enforce lower case matching.
239 lparttype = parttype.lower() # enforce lower case matching.
240 assert lparttype not in parthandlermapping
240 assert lparttype not in parthandlermapping
241 parthandlermapping[lparttype] = func
241 parthandlermapping[lparttype] = func
242 func.params = frozenset(params)
242 func.params = frozenset(params)
243 return func
243 return func
244
244
245 return _decorator
245 return _decorator
246
246
247
247
248 class unbundlerecords(object):
248 class unbundlerecords(object):
249 """keep record of what happens during and unbundle
249 """keep record of what happens during and unbundle
250
250
251 New records are added using `records.add('cat', obj)`. Where 'cat' is a
251 New records are added using `records.add('cat', obj)`. Where 'cat' is a
252 category of record and obj is an arbitrary object.
252 category of record and obj is an arbitrary object.
253
253
254 `records['cat']` will return all entries of this category 'cat'.
254 `records['cat']` will return all entries of this category 'cat'.
255
255
256 Iterating on the object itself will yield `('category', obj)` tuples
256 Iterating on the object itself will yield `('category', obj)` tuples
257 for all entries.
257 for all entries.
258
258
259 All iterations happens in chronological order.
259 All iterations happens in chronological order.
260 """
260 """
261
261
262 def __init__(self):
262 def __init__(self):
263 self._categories = {}
263 self._categories = {}
264 self._sequences = []
264 self._sequences = []
265 self._replies = {}
265 self._replies = {}
266
266
267 def add(self, category, entry, inreplyto=None):
267 def add(self, category, entry, inreplyto=None):
268 """add a new record of a given category.
268 """add a new record of a given category.
269
269
270 The entry can then be retrieved in the list returned by
270 The entry can then be retrieved in the list returned by
271 self['category']."""
271 self['category']."""
272 self._categories.setdefault(category, []).append(entry)
272 self._categories.setdefault(category, []).append(entry)
273 self._sequences.append((category, entry))
273 self._sequences.append((category, entry))
274 if inreplyto is not None:
274 if inreplyto is not None:
275 self.getreplies(inreplyto).add(category, entry)
275 self.getreplies(inreplyto).add(category, entry)
276
276
277 def getreplies(self, partid):
277 def getreplies(self, partid):
278 """get the records that are replies to a specific part"""
278 """get the records that are replies to a specific part"""
279 return self._replies.setdefault(partid, unbundlerecords())
279 return self._replies.setdefault(partid, unbundlerecords())
280
280
281 def __getitem__(self, cat):
281 def __getitem__(self, cat):
282 return tuple(self._categories.get(cat, ()))
282 return tuple(self._categories.get(cat, ()))
283
283
284 def __iter__(self):
284 def __iter__(self):
285 return iter(self._sequences)
285 return iter(self._sequences)
286
286
287 def __len__(self):
287 def __len__(self):
288 return len(self._sequences)
288 return len(self._sequences)
289
289
290 def __nonzero__(self):
290 def __nonzero__(self):
291 return bool(self._sequences)
291 return bool(self._sequences)
292
292
293 __bool__ = __nonzero__
293 __bool__ = __nonzero__
294
294
295
295
296 class bundleoperation(object):
296 class bundleoperation(object):
297 """an object that represents a single bundling process
297 """an object that represents a single bundling process
298
298
299 Its purpose is to carry unbundle-related objects and states.
299 Its purpose is to carry unbundle-related objects and states.
300
300
301 A new object should be created at the beginning of each bundle processing.
301 A new object should be created at the beginning of each bundle processing.
302 The object is to be returned by the processing function.
302 The object is to be returned by the processing function.
303
303
304 The object has very little content now it will ultimately contain:
304 The object has very little content now it will ultimately contain:
305 * an access to the repo the bundle is applied to,
305 * an access to the repo the bundle is applied to,
306 * a ui object,
306 * a ui object,
307 * a way to retrieve a transaction to add changes to the repo,
307 * a way to retrieve a transaction to add changes to the repo,
308 * a way to record the result of processing each part,
308 * a way to record the result of processing each part,
309 * a way to construct a bundle response when applicable.
309 * a way to construct a bundle response when applicable.
310 """
310 """
311
311
312 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
312 def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
313 self.repo = repo
313 self.repo = repo
314 self.ui = repo.ui
314 self.ui = repo.ui
315 self.records = unbundlerecords()
315 self.records = unbundlerecords()
316 self.reply = None
316 self.reply = None
317 self.captureoutput = captureoutput
317 self.captureoutput = captureoutput
318 self.hookargs = {}
318 self.hookargs = {}
319 self._gettransaction = transactiongetter
319 self._gettransaction = transactiongetter
320 # carries value that can modify part behavior
320 # carries value that can modify part behavior
321 self.modes = {}
321 self.modes = {}
322 self.source = source
322 self.source = source
323
323
324 def gettransaction(self):
324 def gettransaction(self):
325 transaction = self._gettransaction()
325 transaction = self._gettransaction()
326
326
327 if self.hookargs:
327 if self.hookargs:
328 # the ones added to the transaction supercede those added
328 # the ones added to the transaction supercede those added
329 # to the operation.
329 # to the operation.
330 self.hookargs.update(transaction.hookargs)
330 self.hookargs.update(transaction.hookargs)
331 transaction.hookargs = self.hookargs
331 transaction.hookargs = self.hookargs
332
332
333 # mark the hookargs as flushed. further attempts to add to
333 # mark the hookargs as flushed. further attempts to add to
334 # hookargs will result in an abort.
334 # hookargs will result in an abort.
335 self.hookargs = None
335 self.hookargs = None
336
336
337 return transaction
337 return transaction
338
338
339 def addhookargs(self, hookargs):
339 def addhookargs(self, hookargs):
340 if self.hookargs is None:
340 if self.hookargs is None:
341 raise error.ProgrammingError(
341 raise error.ProgrammingError(
342 b'attempted to add hookargs to '
342 b'attempted to add hookargs to '
343 b'operation after transaction started'
343 b'operation after transaction started'
344 )
344 )
345 self.hookargs.update(hookargs)
345 self.hookargs.update(hookargs)
346
346
347
347
348 class TransactionUnavailable(RuntimeError):
348 class TransactionUnavailable(RuntimeError):
349 pass
349 pass
350
350
351
351
352 def _notransaction():
352 def _notransaction():
353 """default method to get a transaction while processing a bundle
353 """default method to get a transaction while processing a bundle
354
354
355 Raise an exception to highlight the fact that no transaction was expected
355 Raise an exception to highlight the fact that no transaction was expected
356 to be created"""
356 to be created"""
357 raise TransactionUnavailable()
357 raise TransactionUnavailable()
358
358
359
359
360 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
360 def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
361 # transform me into unbundler.apply() as soon as the freeze is lifted
361 # transform me into unbundler.apply() as soon as the freeze is lifted
362 if isinstance(unbundler, unbundle20):
362 if isinstance(unbundler, unbundle20):
363 tr.hookargs[b'bundle2'] = b'1'
363 tr.hookargs[b'bundle2'] = b'1'
364 if source is not None and b'source' not in tr.hookargs:
364 if source is not None and b'source' not in tr.hookargs:
365 tr.hookargs[b'source'] = source
365 tr.hookargs[b'source'] = source
366 if url is not None and b'url' not in tr.hookargs:
366 if url is not None and b'url' not in tr.hookargs:
367 tr.hookargs[b'url'] = url
367 tr.hookargs[b'url'] = url
368 return processbundle(repo, unbundler, lambda: tr, source=source)
368 return processbundle(repo, unbundler, lambda: tr, source=source)
369 else:
369 else:
370 # the transactiongetter won't be used, but we might as well set it
370 # the transactiongetter won't be used, but we might as well set it
371 op = bundleoperation(repo, lambda: tr, source=source)
371 op = bundleoperation(repo, lambda: tr, source=source)
372 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
372 _processchangegroup(op, unbundler, tr, source, url, **kwargs)
373 return op
373 return op
374
374
375
375
376 class partiterator(object):
376 class partiterator(object):
377 def __init__(self, repo, op, unbundler):
377 def __init__(self, repo, op, unbundler):
378 self.repo = repo
378 self.repo = repo
379 self.op = op
379 self.op = op
380 self.unbundler = unbundler
380 self.unbundler = unbundler
381 self.iterator = None
381 self.iterator = None
382 self.count = 0
382 self.count = 0
383 self.current = None
383 self.current = None
384
384
385 def __enter__(self):
385 def __enter__(self):
386 def func():
386 def func():
387 itr = enumerate(self.unbundler.iterparts(), 1)
387 itr = enumerate(self.unbundler.iterparts(), 1)
388 for count, p in itr:
388 for count, p in itr:
389 self.count = count
389 self.count = count
390 self.current = p
390 self.current = p
391 yield p
391 yield p
392 p.consume()
392 p.consume()
393 self.current = None
393 self.current = None
394
394
395 self.iterator = func()
395 self.iterator = func()
396 return self.iterator
396 return self.iterator
397
397
398 def __exit__(self, type, exc, tb):
398 def __exit__(self, type, exc, tb):
399 if not self.iterator:
399 if not self.iterator:
400 return
400 return
401
401
402 # Only gracefully abort in a normal exception situation. User aborts
402 # Only gracefully abort in a normal exception situation. User aborts
403 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
403 # like Ctrl+C throw a KeyboardInterrupt which is not a base Exception,
404 # and should not gracefully cleanup.
404 # and should not gracefully cleanup.
405 if isinstance(exc, Exception):
405 if isinstance(exc, Exception):
406 # Any exceptions seeking to the end of the bundle at this point are
406 # Any exceptions seeking to the end of the bundle at this point are
407 # almost certainly related to the underlying stream being bad.
407 # almost certainly related to the underlying stream being bad.
408 # And, chances are that the exception we're handling is related to
408 # And, chances are that the exception we're handling is related to
409 # getting in that bad state. So, we swallow the seeking error and
409 # getting in that bad state. So, we swallow the seeking error and
410 # re-raise the original error.
410 # re-raise the original error.
411 seekerror = False
411 seekerror = False
412 try:
412 try:
413 if self.current:
413 if self.current:
414 # consume the part content to not corrupt the stream.
414 # consume the part content to not corrupt the stream.
415 self.current.consume()
415 self.current.consume()
416
416
417 for part in self.iterator:
417 for part in self.iterator:
418 # consume the bundle content
418 # consume the bundle content
419 part.consume()
419 part.consume()
420 except Exception:
420 except Exception:
421 seekerror = True
421 seekerror = True
422
422
423 # Small hack to let caller code distinguish exceptions from bundle2
423 # Small hack to let caller code distinguish exceptions from bundle2
424 # processing from processing the old format. This is mostly needed
424 # processing from processing the old format. This is mostly needed
425 # to handle different return codes to unbundle according to the type
425 # to handle different return codes to unbundle according to the type
426 # of bundle. We should probably clean up or drop this return code
426 # of bundle. We should probably clean up or drop this return code
427 # craziness in a future version.
427 # craziness in a future version.
428 exc.duringunbundle2 = True
428 exc.duringunbundle2 = True
429 salvaged = []
429 salvaged = []
430 replycaps = None
430 replycaps = None
431 if self.op.reply is not None:
431 if self.op.reply is not None:
432 salvaged = self.op.reply.salvageoutput()
432 salvaged = self.op.reply.salvageoutput()
433 replycaps = self.op.reply.capabilities
433 replycaps = self.op.reply.capabilities
434 exc._replycaps = replycaps
434 exc._replycaps = replycaps
435 exc._bundle2salvagedoutput = salvaged
435 exc._bundle2salvagedoutput = salvaged
436
436
437 # Re-raising from a variable loses the original stack. So only use
437 # Re-raising from a variable loses the original stack. So only use
438 # that form if we need to.
438 # that form if we need to.
439 if seekerror:
439 if seekerror:
440 raise exc
440 raise exc
441
441
442 self.repo.ui.debug(
442 self.repo.ui.debug(
443 b'bundle2-input-bundle: %i parts total\n' % self.count
443 b'bundle2-input-bundle: %i parts total\n' % self.count
444 )
444 )
445
445
446
446
447 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
447 def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
448 """This function process a bundle, apply effect to/from a repo
448 """This function process a bundle, apply effect to/from a repo
449
449
450 It iterates over each part then searches for and uses the proper handling
450 It iterates over each part then searches for and uses the proper handling
451 code to process the part. Parts are processed in order.
451 code to process the part. Parts are processed in order.
452
452
453 Unknown Mandatory part will abort the process.
453 Unknown Mandatory part will abort the process.
454
454
455 It is temporarily possible to provide a prebuilt bundleoperation to the
455 It is temporarily possible to provide a prebuilt bundleoperation to the
456 function. This is used to ensure output is properly propagated in case of
456 function. This is used to ensure output is properly propagated in case of
457 an error during the unbundling. This output capturing part will likely be
457 an error during the unbundling. This output capturing part will likely be
458 reworked and this ability will probably go away in the process.
458 reworked and this ability will probably go away in the process.
459 """
459 """
460 if op is None:
460 if op is None:
461 if transactiongetter is None:
461 if transactiongetter is None:
462 transactiongetter = _notransaction
462 transactiongetter = _notransaction
463 op = bundleoperation(repo, transactiongetter, source=source)
463 op = bundleoperation(repo, transactiongetter, source=source)
464 # todo:
464 # todo:
465 # - replace this is a init function soon.
465 # - replace this is a init function soon.
466 # - exception catching
466 # - exception catching
467 unbundler.params
467 unbundler.params
468 if repo.ui.debugflag:
468 if repo.ui.debugflag:
469 msg = [b'bundle2-input-bundle:']
469 msg = [b'bundle2-input-bundle:']
470 if unbundler.params:
470 if unbundler.params:
471 msg.append(b' %i params' % len(unbundler.params))
471 msg.append(b' %i params' % len(unbundler.params))
472 if op._gettransaction is None or op._gettransaction is _notransaction:
472 if op._gettransaction is None or op._gettransaction is _notransaction:
473 msg.append(b' no-transaction')
473 msg.append(b' no-transaction')
474 else:
474 else:
475 msg.append(b' with-transaction')
475 msg.append(b' with-transaction')
476 msg.append(b'\n')
476 msg.append(b'\n')
477 repo.ui.debug(b''.join(msg))
477 repo.ui.debug(b''.join(msg))
478
478
479 processparts(repo, op, unbundler)
479 processparts(repo, op, unbundler)
480
480
481 return op
481 return op
482
482
483
483
484 def processparts(repo, op, unbundler):
484 def processparts(repo, op, unbundler):
485 with partiterator(repo, op, unbundler) as parts:
485 with partiterator(repo, op, unbundler) as parts:
486 for part in parts:
486 for part in parts:
487 _processpart(op, part)
487 _processpart(op, part)
488
488
489
489
490 def _processchangegroup(op, cg, tr, source, url, **kwargs):
490 def _processchangegroup(op, cg, tr, source, url, **kwargs):
491 ret = cg.apply(op.repo, tr, source, url, **kwargs)
491 ret = cg.apply(op.repo, tr, source, url, **kwargs)
492 op.records.add(b'changegroup', {b'return': ret,})
492 op.records.add(b'changegroup', {b'return': ret,})
493 return ret
493 return ret
494
494
495
495
496 def _gethandler(op, part):
496 def _gethandler(op, part):
497 status = b'unknown' # used by debug output
497 status = b'unknown' # used by debug output
498 try:
498 try:
499 handler = parthandlermapping.get(part.type)
499 handler = parthandlermapping.get(part.type)
500 if handler is None:
500 if handler is None:
501 status = b'unsupported-type'
501 status = b'unsupported-type'
502 raise error.BundleUnknownFeatureError(parttype=part.type)
502 raise error.BundleUnknownFeatureError(parttype=part.type)
503 indebug(op.ui, b'found a handler for part %s' % part.type)
503 indebug(op.ui, b'found a handler for part %s' % part.type)
504 unknownparams = part.mandatorykeys - handler.params
504 unknownparams = part.mandatorykeys - handler.params
505 if unknownparams:
505 if unknownparams:
506 unknownparams = list(unknownparams)
506 unknownparams = list(unknownparams)
507 unknownparams.sort()
507 unknownparams.sort()
508 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
508 status = b'unsupported-params (%s)' % b', '.join(unknownparams)
509 raise error.BundleUnknownFeatureError(
509 raise error.BundleUnknownFeatureError(
510 parttype=part.type, params=unknownparams
510 parttype=part.type, params=unknownparams
511 )
511 )
512 status = b'supported'
512 status = b'supported'
513 except error.BundleUnknownFeatureError as exc:
513 except error.BundleUnknownFeatureError as exc:
514 if part.mandatory: # mandatory parts
514 if part.mandatory: # mandatory parts
515 raise
515 raise
516 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
516 indebug(op.ui, b'ignoring unsupported advisory part %s' % exc)
517 return # skip to part processing
517 return # skip to part processing
518 finally:
518 finally:
519 if op.ui.debugflag:
519 if op.ui.debugflag:
520 msg = [b'bundle2-input-part: "%s"' % part.type]
520 msg = [b'bundle2-input-part: "%s"' % part.type]
521 if not part.mandatory:
521 if not part.mandatory:
522 msg.append(b' (advisory)')
522 msg.append(b' (advisory)')
523 nbmp = len(part.mandatorykeys)
523 nbmp = len(part.mandatorykeys)
524 nbap = len(part.params) - nbmp
524 nbap = len(part.params) - nbmp
525 if nbmp or nbap:
525 if nbmp or nbap:
526 msg.append(b' (params:')
526 msg.append(b' (params:')
527 if nbmp:
527 if nbmp:
528 msg.append(b' %i mandatory' % nbmp)
528 msg.append(b' %i mandatory' % nbmp)
529 if nbap:
529 if nbap:
530 msg.append(b' %i advisory' % nbmp)
530 msg.append(b' %i advisory' % nbmp)
531 msg.append(b')')
531 msg.append(b')')
532 msg.append(b' %s\n' % status)
532 msg.append(b' %s\n' % status)
533 op.ui.debug(b''.join(msg))
533 op.ui.debug(b''.join(msg))
534
534
535 return handler
535 return handler
536
536
537
537
538 def _processpart(op, part):
538 def _processpart(op, part):
539 """process a single part from a bundle
539 """process a single part from a bundle
540
540
541 The part is guaranteed to have been fully consumed when the function exits
541 The part is guaranteed to have been fully consumed when the function exits
542 (even if an exception is raised)."""
542 (even if an exception is raised)."""
543 handler = _gethandler(op, part)
543 handler = _gethandler(op, part)
544 if handler is None:
544 if handler is None:
545 return
545 return
546
546
547 # handler is called outside the above try block so that we don't
547 # handler is called outside the above try block so that we don't
548 # risk catching KeyErrors from anything other than the
548 # risk catching KeyErrors from anything other than the
549 # parthandlermapping lookup (any KeyError raised by handler()
549 # parthandlermapping lookup (any KeyError raised by handler()
550 # itself represents a defect of a different variety).
550 # itself represents a defect of a different variety).
551 output = None
551 output = None
552 if op.captureoutput and op.reply is not None:
552 if op.captureoutput and op.reply is not None:
553 op.ui.pushbuffer(error=True, subproc=True)
553 op.ui.pushbuffer(error=True, subproc=True)
554 output = b''
554 output = b''
555 try:
555 try:
556 handler(op, part)
556 handler(op, part)
557 finally:
557 finally:
558 if output is not None:
558 if output is not None:
559 output = op.ui.popbuffer()
559 output = op.ui.popbuffer()
560 if output:
560 if output:
561 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
561 outpart = op.reply.newpart(b'output', data=output, mandatory=False)
562 outpart.addparam(
562 outpart.addparam(
563 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
563 b'in-reply-to', pycompat.bytestr(part.id), mandatory=False
564 )
564 )
565
565
566
566
567 def decodecaps(blob):
567 def decodecaps(blob):
568 """decode a bundle2 caps bytes blob into a dictionary
568 """decode a bundle2 caps bytes blob into a dictionary
569
569
570 The blob is a list of capabilities (one per line)
570 The blob is a list of capabilities (one per line)
571 Capabilities may have values using a line of the form::
571 Capabilities may have values using a line of the form::
572
572
573 capability=value1,value2,value3
573 capability=value1,value2,value3
574
574
575 The values are always a list."""
575 The values are always a list."""
576 caps = {}
576 caps = {}
577 for line in blob.splitlines():
577 for line in blob.splitlines():
578 if not line:
578 if not line:
579 continue
579 continue
580 if b'=' not in line:
580 if b'=' not in line:
581 key, vals = line, ()
581 key, vals = line, ()
582 else:
582 else:
583 key, vals = line.split(b'=', 1)
583 key, vals = line.split(b'=', 1)
584 vals = vals.split(b',')
584 vals = vals.split(b',')
585 key = urlreq.unquote(key)
585 key = urlreq.unquote(key)
586 vals = [urlreq.unquote(v) for v in vals]
586 vals = [urlreq.unquote(v) for v in vals]
587 caps[key] = vals
587 caps[key] = vals
588 return caps
588 return caps
589
589
590
590
591 def encodecaps(caps):
591 def encodecaps(caps):
592 """encode a bundle2 caps dictionary into a bytes blob"""
592 """encode a bundle2 caps dictionary into a bytes blob"""
593 chunks = []
593 chunks = []
594 for ca in sorted(caps):
594 for ca in sorted(caps):
595 vals = caps[ca]
595 vals = caps[ca]
596 ca = urlreq.quote(ca)
596 ca = urlreq.quote(ca)
597 vals = [urlreq.quote(v) for v in vals]
597 vals = [urlreq.quote(v) for v in vals]
598 if vals:
598 if vals:
599 ca = b"%s=%s" % (ca, b','.join(vals))
599 ca = b"%s=%s" % (ca, b','.join(vals))
600 chunks.append(ca)
600 chunks.append(ca)
601 return b'\n'.join(chunks)
601 return b'\n'.join(chunks)
602
602
603
603
604 bundletypes = {
604 bundletypes = {
605 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
605 b"": (b"", b'UN'), # only when using unbundle on ssh and old http servers
606 # since the unification ssh accepts a header but there
606 # since the unification ssh accepts a header but there
607 # is no capability signaling it.
607 # is no capability signaling it.
608 b"HG20": (), # special-cased below
608 b"HG20": (), # special-cased below
609 b"HG10UN": (b"HG10UN", b'UN'),
609 b"HG10UN": (b"HG10UN", b'UN'),
610 b"HG10BZ": (b"HG10", b'BZ'),
610 b"HG10BZ": (b"HG10", b'BZ'),
611 b"HG10GZ": (b"HG10GZ", b'GZ'),
611 b"HG10GZ": (b"HG10GZ", b'GZ'),
612 }
612 }
613
613
614 # hgweb uses this list to communicate its preferred type
614 # hgweb uses this list to communicate its preferred type
615 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
615 bundlepriority = [b'HG10GZ', b'HG10BZ', b'HG10UN']
616
616
617
617
618 class bundle20(object):
618 class bundle20(object):
619 """represent an outgoing bundle2 container
619 """represent an outgoing bundle2 container
620
620
621 Use the `addparam` method to add stream level parameter. and `newpart` to
621 Use the `addparam` method to add stream level parameter. and `newpart` to
622 populate it. Then call `getchunks` to retrieve all the binary chunks of
622 populate it. Then call `getchunks` to retrieve all the binary chunks of
623 data that compose the bundle2 container."""
623 data that compose the bundle2 container."""
624
624
625 _magicstring = b'HG20'
625 _magicstring = b'HG20'
626
626
627 def __init__(self, ui, capabilities=()):
627 def __init__(self, ui, capabilities=()):
628 self.ui = ui
628 self.ui = ui
629 self._params = []
629 self._params = []
630 self._parts = []
630 self._parts = []
631 self.capabilities = dict(capabilities)
631 self.capabilities = dict(capabilities)
632 self._compengine = util.compengines.forbundletype(b'UN')
632 self._compengine = util.compengines.forbundletype(b'UN')
633 self._compopts = None
633 self._compopts = None
634 # If compression is being handled by a consumer of the raw
634 # If compression is being handled by a consumer of the raw
635 # data (e.g. the wire protocol), unsetting this flag tells
635 # data (e.g. the wire protocol), unsetting this flag tells
636 # consumers that the bundle is best left uncompressed.
636 # consumers that the bundle is best left uncompressed.
637 self.prefercompressed = True
637 self.prefercompressed = True
638
638
639 def setcompression(self, alg, compopts=None):
639 def setcompression(self, alg, compopts=None):
640 """setup core part compression to <alg>"""
640 """setup core part compression to <alg>"""
641 if alg in (None, b'UN'):
641 if alg in (None, b'UN'):
642 return
642 return
643 assert not any(n.lower() == b'compression' for n, v in self._params)
643 assert not any(n.lower() == b'compression' for n, v in self._params)
644 self.addparam(b'Compression', alg)
644 self.addparam(b'Compression', alg)
645 self._compengine = util.compengines.forbundletype(alg)
645 self._compengine = util.compengines.forbundletype(alg)
646 self._compopts = compopts
646 self._compopts = compopts
647
647
648 @property
648 @property
649 def nbparts(self):
649 def nbparts(self):
650 """total number of parts added to the bundler"""
650 """total number of parts added to the bundler"""
651 return len(self._parts)
651 return len(self._parts)
652
652
653 # methods used to defines the bundle2 content
653 # methods used to defines the bundle2 content
654 def addparam(self, name, value=None):
654 def addparam(self, name, value=None):
655 """add a stream level parameter"""
655 """add a stream level parameter"""
656 if not name:
656 if not name:
657 raise error.ProgrammingError(b'empty parameter name')
657 raise error.ProgrammingError(b'empty parameter name')
658 if name[0:1] not in pycompat.bytestr(
658 if name[0:1] not in pycompat.bytestr(
659 string.ascii_letters # pytype: disable=wrong-arg-types
659 string.ascii_letters # pytype: disable=wrong-arg-types
660 ):
660 ):
661 raise error.ProgrammingError(
661 raise error.ProgrammingError(
662 b'non letter first character: %s' % name
662 b'non letter first character: %s' % name
663 )
663 )
664 self._params.append((name, value))
664 self._params.append((name, value))
665
665
666 def addpart(self, part):
666 def addpart(self, part):
667 """add a new part to the bundle2 container
667 """add a new part to the bundle2 container
668
668
669 Parts contains the actual applicative payload."""
669 Parts contains the actual applicative payload."""
670 assert part.id is None
670 assert part.id is None
671 part.id = len(self._parts) # very cheap counter
671 part.id = len(self._parts) # very cheap counter
672 self._parts.append(part)
672 self._parts.append(part)
673
673
674 def newpart(self, typeid, *args, **kwargs):
674 def newpart(self, typeid, *args, **kwargs):
675 """create a new part and add it to the containers
675 """create a new part and add it to the containers
676
676
677 As the part is directly added to the containers. For now, this means
677 As the part is directly added to the containers. For now, this means
678 that any failure to properly initialize the part after calling
678 that any failure to properly initialize the part after calling
679 ``newpart`` should result in a failure of the whole bundling process.
679 ``newpart`` should result in a failure of the whole bundling process.
680
680
681 You can still fall back to manually create and add if you need better
681 You can still fall back to manually create and add if you need better
682 control."""
682 control."""
683 part = bundlepart(typeid, *args, **kwargs)
683 part = bundlepart(typeid, *args, **kwargs)
684 self.addpart(part)
684 self.addpart(part)
685 return part
685 return part
686
686
687 # methods used to generate the bundle2 stream
687 # methods used to generate the bundle2 stream
688 def getchunks(self):
688 def getchunks(self):
689 if self.ui.debugflag:
689 if self.ui.debugflag:
690 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
690 msg = [b'bundle2-output-bundle: "%s",' % self._magicstring]
691 if self._params:
691 if self._params:
692 msg.append(b' (%i params)' % len(self._params))
692 msg.append(b' (%i params)' % len(self._params))
693 msg.append(b' %i parts total\n' % len(self._parts))
693 msg.append(b' %i parts total\n' % len(self._parts))
694 self.ui.debug(b''.join(msg))
694 self.ui.debug(b''.join(msg))
695 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
695 outdebug(self.ui, b'start emission of %s stream' % self._magicstring)
696 yield self._magicstring
696 yield self._magicstring
697 param = self._paramchunk()
697 param = self._paramchunk()
698 outdebug(self.ui, b'bundle parameter: %s' % param)
698 outdebug(self.ui, b'bundle parameter: %s' % param)
699 yield _pack(_fstreamparamsize, len(param))
699 yield _pack(_fstreamparamsize, len(param))
700 if param:
700 if param:
701 yield param
701 yield param
702 for chunk in self._compengine.compressstream(
702 for chunk in self._compengine.compressstream(
703 self._getcorechunk(), self._compopts
703 self._getcorechunk(), self._compopts
704 ):
704 ):
705 yield chunk
705 yield chunk
706
706
707 def _paramchunk(self):
707 def _paramchunk(self):
708 """return a encoded version of all stream parameters"""
708 """return a encoded version of all stream parameters"""
709 blocks = []
709 blocks = []
710 for par, value in self._params:
710 for par, value in self._params:
711 par = urlreq.quote(par)
711 par = urlreq.quote(par)
712 if value is not None:
712 if value is not None:
713 value = urlreq.quote(value)
713 value = urlreq.quote(value)
714 par = b'%s=%s' % (par, value)
714 par = b'%s=%s' % (par, value)
715 blocks.append(par)
715 blocks.append(par)
716 return b' '.join(blocks)
716 return b' '.join(blocks)
717
717
718 def _getcorechunk(self):
718 def _getcorechunk(self):
719 """yield chunk for the core part of the bundle
719 """yield chunk for the core part of the bundle
720
720
721 (all but headers and parameters)"""
721 (all but headers and parameters)"""
722 outdebug(self.ui, b'start of parts')
722 outdebug(self.ui, b'start of parts')
723 for part in self._parts:
723 for part in self._parts:
724 outdebug(self.ui, b'bundle part: "%s"' % part.type)
724 outdebug(self.ui, b'bundle part: "%s"' % part.type)
725 for chunk in part.getchunks(ui=self.ui):
725 for chunk in part.getchunks(ui=self.ui):
726 yield chunk
726 yield chunk
727 outdebug(self.ui, b'end of bundle')
727 outdebug(self.ui, b'end of bundle')
728 yield _pack(_fpartheadersize, 0)
728 yield _pack(_fpartheadersize, 0)
729
729
730 def salvageoutput(self):
730 def salvageoutput(self):
731 """return a list with a copy of all output parts in the bundle
731 """return a list with a copy of all output parts in the bundle
732
732
733 This is meant to be used during error handling to make sure we preserve
733 This is meant to be used during error handling to make sure we preserve
734 server output"""
734 server output"""
735 salvaged = []
735 salvaged = []
736 for part in self._parts:
736 for part in self._parts:
737 if part.type.startswith(b'output'):
737 if part.type.startswith(b'output'):
738 salvaged.append(part.copy())
738 salvaged.append(part.copy())
739 return salvaged
739 return salvaged
740
740
741
741
742 class unpackermixin(object):
742 class unpackermixin(object):
743 """A mixin to extract bytes and struct data from a stream"""
743 """A mixin to extract bytes and struct data from a stream"""
744
744
745 def __init__(self, fp):
745 def __init__(self, fp):
746 self._fp = fp
746 self._fp = fp
747
747
748 def _unpack(self, format):
748 def _unpack(self, format):
749 """unpack this struct format from the stream
749 """unpack this struct format from the stream
750
750
751 This method is meant for internal usage by the bundle2 protocol only.
751 This method is meant for internal usage by the bundle2 protocol only.
752 They directly manipulate the low level stream including bundle2 level
752 They directly manipulate the low level stream including bundle2 level
753 instruction.
753 instruction.
754
754
755 Do not use it to implement higher-level logic or methods."""
755 Do not use it to implement higher-level logic or methods."""
756 data = self._readexact(struct.calcsize(format))
756 data = self._readexact(struct.calcsize(format))
757 return _unpack(format, data)
757 return _unpack(format, data)
758
758
759 def _readexact(self, size):
759 def _readexact(self, size):
760 """read exactly <size> bytes from the stream
760 """read exactly <size> bytes from the stream
761
761
762 This method is meant for internal usage by the bundle2 protocol only.
762 This method is meant for internal usage by the bundle2 protocol only.
763 They directly manipulate the low level stream including bundle2 level
763 They directly manipulate the low level stream including bundle2 level
764 instruction.
764 instruction.
765
765
766 Do not use it to implement higher-level logic or methods."""
766 Do not use it to implement higher-level logic or methods."""
767 return changegroup.readexactly(self._fp, size)
767 return changegroup.readexactly(self._fp, size)
768
768
769
769
770 def getunbundler(ui, fp, magicstring=None):
770 def getunbundler(ui, fp, magicstring=None):
771 """return a valid unbundler object for a given magicstring"""
771 """return a valid unbundler object for a given magicstring"""
772 if magicstring is None:
772 if magicstring is None:
773 magicstring = changegroup.readexactly(fp, 4)
773 magicstring = changegroup.readexactly(fp, 4)
774 magic, version = magicstring[0:2], magicstring[2:4]
774 magic, version = magicstring[0:2], magicstring[2:4]
775 if magic != b'HG':
775 if magic != b'HG':
776 ui.debug(
776 ui.debug(
777 b"error: invalid magic: %r (version %r), should be 'HG'\n"
777 b"error: invalid magic: %r (version %r), should be 'HG'\n"
778 % (magic, version)
778 % (magic, version)
779 )
779 )
780 raise error.Abort(_(b'not a Mercurial bundle'))
780 raise error.Abort(_(b'not a Mercurial bundle'))
781 unbundlerclass = formatmap.get(version)
781 unbundlerclass = formatmap.get(version)
782 if unbundlerclass is None:
782 if unbundlerclass is None:
783 raise error.Abort(_(b'unknown bundle version %s') % version)
783 raise error.Abort(_(b'unknown bundle version %s') % version)
784 unbundler = unbundlerclass(ui, fp)
784 unbundler = unbundlerclass(ui, fp)
785 indebug(ui, b'start processing of %s stream' % magicstring)
785 indebug(ui, b'start processing of %s stream' % magicstring)
786 return unbundler
786 return unbundler
787
787
788
788
789 class unbundle20(unpackermixin):
789 class unbundle20(unpackermixin):
790 """interpret a bundle2 stream
790 """interpret a bundle2 stream
791
791
792 This class is fed with a binary stream and yields parts through its
792 This class is fed with a binary stream and yields parts through its
793 `iterparts` methods."""
793 `iterparts` methods."""
794
794
795 _magicstring = b'HG20'
795 _magicstring = b'HG20'
796
796
797 def __init__(self, ui, fp):
797 def __init__(self, ui, fp):
798 """If header is specified, we do not read it out of the stream."""
798 """If header is specified, we do not read it out of the stream."""
799 self.ui = ui
799 self.ui = ui
800 self._compengine = util.compengines.forbundletype(b'UN')
800 self._compengine = util.compengines.forbundletype(b'UN')
801 self._compressed = None
801 self._compressed = None
802 super(unbundle20, self).__init__(fp)
802 super(unbundle20, self).__init__(fp)
803
803
804 @util.propertycache
804 @util.propertycache
805 def params(self):
805 def params(self):
806 """dictionary of stream level parameters"""
806 """dictionary of stream level parameters"""
807 indebug(self.ui, b'reading bundle2 stream parameters')
807 indebug(self.ui, b'reading bundle2 stream parameters')
808 params = {}
808 params = {}
809 paramssize = self._unpack(_fstreamparamsize)[0]
809 paramssize = self._unpack(_fstreamparamsize)[0]
810 if paramssize < 0:
810 if paramssize < 0:
811 raise error.BundleValueError(
811 raise error.BundleValueError(
812 b'negative bundle param size: %i' % paramssize
812 b'negative bundle param size: %i' % paramssize
813 )
813 )
814 if paramssize:
814 if paramssize:
815 params = self._readexact(paramssize)
815 params = self._readexact(paramssize)
816 params = self._processallparams(params)
816 params = self._processallparams(params)
817 return params
817 return params
818
818
819 def _processallparams(self, paramsblock):
819 def _processallparams(self, paramsblock):
820 """"""
820 """"""
821 params = util.sortdict()
821 params = util.sortdict()
822 for p in paramsblock.split(b' '):
822 for p in paramsblock.split(b' '):
823 p = p.split(b'=', 1)
823 p = p.split(b'=', 1)
824 p = [urlreq.unquote(i) for i in p]
824 p = [urlreq.unquote(i) for i in p]
825 if len(p) < 2:
825 if len(p) < 2:
826 p.append(None)
826 p.append(None)
827 self._processparam(*p)
827 self._processparam(*p)
828 params[p[0]] = p[1]
828 params[p[0]] = p[1]
829 return params
829 return params
830
830
831 def _processparam(self, name, value):
831 def _processparam(self, name, value):
832 """process a parameter, applying its effect if needed
832 """process a parameter, applying its effect if needed
833
833
834 Parameter starting with a lower case letter are advisory and will be
834 Parameter starting with a lower case letter are advisory and will be
835 ignored when unknown. Those starting with an upper case letter are
835 ignored when unknown. Those starting with an upper case letter are
836 mandatory and will this function will raise a KeyError when unknown.
836 mandatory and will this function will raise a KeyError when unknown.
837
837
838 Note: no option are currently supported. Any input will be either
838 Note: no option are currently supported. Any input will be either
839 ignored or failing.
839 ignored or failing.
840 """
840 """
841 if not name:
841 if not name:
842 raise ValueError('empty parameter name')
842 raise ValueError('empty parameter name')
843 if name[0:1] not in pycompat.bytestr(
843 if name[0:1] not in pycompat.bytestr(
844 string.ascii_letters # pytype: disable=wrong-arg-types
844 string.ascii_letters # pytype: disable=wrong-arg-types
845 ):
845 ):
846 raise ValueError('non letter first character: %s' % name)
846 raise ValueError('non letter first character: %s' % name)
847 try:
847 try:
848 handler = b2streamparamsmap[name.lower()]
848 handler = b2streamparamsmap[name.lower()]
849 except KeyError:
849 except KeyError:
850 if name[0:1].islower():
850 if name[0:1].islower():
851 indebug(self.ui, b"ignoring unknown parameter %s" % name)
851 indebug(self.ui, b"ignoring unknown parameter %s" % name)
852 else:
852 else:
853 raise error.BundleUnknownFeatureError(params=(name,))
853 raise error.BundleUnknownFeatureError(params=(name,))
854 else:
854 else:
855 handler(self, name, value)
855 handler(self, name, value)
856
856
857 def _forwardchunks(self):
857 def _forwardchunks(self):
858 """utility to transfer a bundle2 as binary
858 """utility to transfer a bundle2 as binary
859
859
860 This is made necessary by the fact the 'getbundle' command over 'ssh'
860 This is made necessary by the fact the 'getbundle' command over 'ssh'
861 have no way to know then the reply end, relying on the bundle to be
861 have no way to know then the reply end, relying on the bundle to be
862 interpreted to know its end. This is terrible and we are sorry, but we
862 interpreted to know its end. This is terrible and we are sorry, but we
863 needed to move forward to get general delta enabled.
863 needed to move forward to get general delta enabled.
864 """
864 """
865 yield self._magicstring
865 yield self._magicstring
866 assert 'params' not in vars(self)
866 assert 'params' not in vars(self)
867 paramssize = self._unpack(_fstreamparamsize)[0]
867 paramssize = self._unpack(_fstreamparamsize)[0]
868 if paramssize < 0:
868 if paramssize < 0:
869 raise error.BundleValueError(
869 raise error.BundleValueError(
870 b'negative bundle param size: %i' % paramssize
870 b'negative bundle param size: %i' % paramssize
871 )
871 )
872 if paramssize:
872 if paramssize:
873 params = self._readexact(paramssize)
873 params = self._readexact(paramssize)
874 self._processallparams(params)
874 self._processallparams(params)
875 # The payload itself is decompressed below, so drop
875 # The payload itself is decompressed below, so drop
876 # the compression parameter passed down to compensate.
876 # the compression parameter passed down to compensate.
877 outparams = []
877 outparams = []
878 for p in params.split(b' '):
878 for p in params.split(b' '):
879 k, v = p.split(b'=', 1)
879 k, v = p.split(b'=', 1)
880 if k.lower() != b'compression':
880 if k.lower() != b'compression':
881 outparams.append(p)
881 outparams.append(p)
882 outparams = b' '.join(outparams)
882 outparams = b' '.join(outparams)
883 yield _pack(_fstreamparamsize, len(outparams))
883 yield _pack(_fstreamparamsize, len(outparams))
884 yield outparams
884 yield outparams
885 else:
885 else:
886 yield _pack(_fstreamparamsize, paramssize)
886 yield _pack(_fstreamparamsize, paramssize)
887 # From there, payload might need to be decompressed
887 # From there, payload might need to be decompressed
888 self._fp = self._compengine.decompressorreader(self._fp)
888 self._fp = self._compengine.decompressorreader(self._fp)
889 emptycount = 0
889 emptycount = 0
890 while emptycount < 2:
890 while emptycount < 2:
891 # so we can brainlessly loop
891 # so we can brainlessly loop
892 assert _fpartheadersize == _fpayloadsize
892 assert _fpartheadersize == _fpayloadsize
893 size = self._unpack(_fpartheadersize)[0]
893 size = self._unpack(_fpartheadersize)[0]
894 yield _pack(_fpartheadersize, size)
894 yield _pack(_fpartheadersize, size)
895 if size:
895 if size:
896 emptycount = 0
896 emptycount = 0
897 else:
897 else:
898 emptycount += 1
898 emptycount += 1
899 continue
899 continue
900 if size == flaginterrupt:
900 if size == flaginterrupt:
901 continue
901 continue
902 elif size < 0:
902 elif size < 0:
903 raise error.BundleValueError(b'negative chunk size: %i')
903 raise error.BundleValueError(b'negative chunk size: %i')
904 yield self._readexact(size)
904 yield self._readexact(size)
905
905
906 def iterparts(self, seekable=False):
906 def iterparts(self, seekable=False):
907 """yield all parts contained in the stream"""
907 """yield all parts contained in the stream"""
908 cls = seekableunbundlepart if seekable else unbundlepart
908 cls = seekableunbundlepart if seekable else unbundlepart
909 # make sure param have been loaded
909 # make sure param have been loaded
910 self.params
910 self.params
911 # From there, payload need to be decompressed
911 # From there, payload need to be decompressed
912 self._fp = self._compengine.decompressorreader(self._fp)
912 self._fp = self._compengine.decompressorreader(self._fp)
913 indebug(self.ui, b'start extraction of bundle2 parts')
913 indebug(self.ui, b'start extraction of bundle2 parts')
914 headerblock = self._readpartheader()
914 headerblock = self._readpartheader()
915 while headerblock is not None:
915 while headerblock is not None:
916 part = cls(self.ui, headerblock, self._fp)
916 part = cls(self.ui, headerblock, self._fp)
917 yield part
917 yield part
918 # Ensure part is fully consumed so we can start reading the next
918 # Ensure part is fully consumed so we can start reading the next
919 # part.
919 # part.
920 part.consume()
920 part.consume()
921
921
922 headerblock = self._readpartheader()
922 headerblock = self._readpartheader()
923 indebug(self.ui, b'end of bundle2 stream')
923 indebug(self.ui, b'end of bundle2 stream')
924
924
925 def _readpartheader(self):
925 def _readpartheader(self):
926 """reads a part header size and return the bytes blob
926 """reads a part header size and return the bytes blob
927
927
928 returns None if empty"""
928 returns None if empty"""
929 headersize = self._unpack(_fpartheadersize)[0]
929 headersize = self._unpack(_fpartheadersize)[0]
930 if headersize < 0:
930 if headersize < 0:
931 raise error.BundleValueError(
931 raise error.BundleValueError(
932 b'negative part header size: %i' % headersize
932 b'negative part header size: %i' % headersize
933 )
933 )
934 indebug(self.ui, b'part header size: %i' % headersize)
934 indebug(self.ui, b'part header size: %i' % headersize)
935 if headersize:
935 if headersize:
936 return self._readexact(headersize)
936 return self._readexact(headersize)
937 return None
937 return None
938
938
939 def compressed(self):
939 def compressed(self):
940 self.params # load params
940 self.params # load params
941 return self._compressed
941 return self._compressed
942
942
943 def close(self):
943 def close(self):
944 """close underlying file"""
944 """close underlying file"""
945 if util.safehasattr(self._fp, 'close'):
945 if util.safehasattr(self._fp, 'close'):
946 return self._fp.close()
946 return self._fp.close()
947
947
948
948
949 formatmap = {b'20': unbundle20}
949 formatmap = {b'20': unbundle20}
950
950
951 b2streamparamsmap = {}
951 b2streamparamsmap = {}
952
952
953
953
954 def b2streamparamhandler(name):
954 def b2streamparamhandler(name):
955 """register a handler for a stream level parameter"""
955 """register a handler for a stream level parameter"""
956
956
957 def decorator(func):
957 def decorator(func):
958 assert name not in formatmap
958 assert name not in formatmap
959 b2streamparamsmap[name] = func
959 b2streamparamsmap[name] = func
960 return func
960 return func
961
961
962 return decorator
962 return decorator
963
963
964
964
965 @b2streamparamhandler(b'compression')
965 @b2streamparamhandler(b'compression')
966 def processcompression(unbundler, param, value):
966 def processcompression(unbundler, param, value):
967 """read compression parameter and install payload decompression"""
967 """read compression parameter and install payload decompression"""
968 if value not in util.compengines.supportedbundletypes:
968 if value not in util.compengines.supportedbundletypes:
969 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
969 raise error.BundleUnknownFeatureError(params=(param,), values=(value,))
970 unbundler._compengine = util.compengines.forbundletype(value)
970 unbundler._compengine = util.compengines.forbundletype(value)
971 if value is not None:
971 if value is not None:
972 unbundler._compressed = True
972 unbundler._compressed = True
973
973
974
974
975 class bundlepart(object):
975 class bundlepart(object):
976 """A bundle2 part contains application level payload
976 """A bundle2 part contains application level payload
977
977
978 The part `type` is used to route the part to the application level
978 The part `type` is used to route the part to the application level
979 handler.
979 handler.
980
980
981 The part payload is contained in ``part.data``. It could be raw bytes or a
981 The part payload is contained in ``part.data``. It could be raw bytes or a
982 generator of byte chunks.
982 generator of byte chunks.
983
983
984 You can add parameters to the part using the ``addparam`` method.
984 You can add parameters to the part using the ``addparam`` method.
985 Parameters can be either mandatory (default) or advisory. Remote side
985 Parameters can be either mandatory (default) or advisory. Remote side
986 should be able to safely ignore the advisory ones.
986 should be able to safely ignore the advisory ones.
987
987
988 Both data and parameters cannot be modified after the generation has begun.
988 Both data and parameters cannot be modified after the generation has begun.
989 """
989 """
990
990
991 def __init__(
991 def __init__(
992 self,
992 self,
993 parttype,
993 parttype,
994 mandatoryparams=(),
994 mandatoryparams=(),
995 advisoryparams=(),
995 advisoryparams=(),
996 data=b'',
996 data=b'',
997 mandatory=True,
997 mandatory=True,
998 ):
998 ):
999 validateparttype(parttype)
999 validateparttype(parttype)
1000 self.id = None
1000 self.id = None
1001 self.type = parttype
1001 self.type = parttype
1002 self._data = data
1002 self._data = data
1003 self._mandatoryparams = list(mandatoryparams)
1003 self._mandatoryparams = list(mandatoryparams)
1004 self._advisoryparams = list(advisoryparams)
1004 self._advisoryparams = list(advisoryparams)
1005 # checking for duplicated entries
1005 # checking for duplicated entries
1006 self._seenparams = set()
1006 self._seenparams = set()
1007 for pname, __ in self._mandatoryparams + self._advisoryparams:
1007 for pname, __ in self._mandatoryparams + self._advisoryparams:
1008 if pname in self._seenparams:
1008 if pname in self._seenparams:
1009 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1009 raise error.ProgrammingError(b'duplicated params: %s' % pname)
1010 self._seenparams.add(pname)
1010 self._seenparams.add(pname)
1011 # status of the part's generation:
1011 # status of the part's generation:
1012 # - None: not started,
1012 # - None: not started,
1013 # - False: currently generated,
1013 # - False: currently generated,
1014 # - True: generation done.
1014 # - True: generation done.
1015 self._generated = None
1015 self._generated = None
1016 self.mandatory = mandatory
1016 self.mandatory = mandatory
1017
1017
1018 def __repr__(self):
1018 def __repr__(self):
1019 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1019 cls = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
1020 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1020 return '<%s object at %x; id: %s; type: %s; mandatory: %s>' % (
1021 cls,
1021 cls,
1022 id(self),
1022 id(self),
1023 self.id,
1023 self.id,
1024 self.type,
1024 self.type,
1025 self.mandatory,
1025 self.mandatory,
1026 )
1026 )
1027
1027
1028 def copy(self):
1028 def copy(self):
1029 """return a copy of the part
1029 """return a copy of the part
1030
1030
1031 The new part have the very same content but no partid assigned yet.
1031 The new part have the very same content but no partid assigned yet.
1032 Parts with generated data cannot be copied."""
1032 Parts with generated data cannot be copied."""
1033 assert not util.safehasattr(self.data, 'next')
1033 assert not util.safehasattr(self.data, 'next')
1034 return self.__class__(
1034 return self.__class__(
1035 self.type,
1035 self.type,
1036 self._mandatoryparams,
1036 self._mandatoryparams,
1037 self._advisoryparams,
1037 self._advisoryparams,
1038 self._data,
1038 self._data,
1039 self.mandatory,
1039 self.mandatory,
1040 )
1040 )
1041
1041
1042 # methods used to defines the part content
1042 # methods used to defines the part content
1043 @property
1043 @property
1044 def data(self):
1044 def data(self):
1045 return self._data
1045 return self._data
1046
1046
1047 @data.setter
1047 @data.setter
1048 def data(self, data):
1048 def data(self, data):
1049 if self._generated is not None:
1049 if self._generated is not None:
1050 raise error.ReadOnlyPartError(b'part is being generated')
1050 raise error.ReadOnlyPartError(b'part is being generated')
1051 self._data = data
1051 self._data = data
1052
1052
1053 @property
1053 @property
1054 def mandatoryparams(self):
1054 def mandatoryparams(self):
1055 # make it an immutable tuple to force people through ``addparam``
1055 # make it an immutable tuple to force people through ``addparam``
1056 return tuple(self._mandatoryparams)
1056 return tuple(self._mandatoryparams)
1057
1057
1058 @property
1058 @property
1059 def advisoryparams(self):
1059 def advisoryparams(self):
1060 # make it an immutable tuple to force people through ``addparam``
1060 # make it an immutable tuple to force people through ``addparam``
1061 return tuple(self._advisoryparams)
1061 return tuple(self._advisoryparams)
1062
1062
1063 def addparam(self, name, value=b'', mandatory=True):
1063 def addparam(self, name, value=b'', mandatory=True):
1064 """add a parameter to the part
1064 """add a parameter to the part
1065
1065
1066 If 'mandatory' is set to True, the remote handler must claim support
1066 If 'mandatory' is set to True, the remote handler must claim support
1067 for this parameter or the unbundling will be aborted.
1067 for this parameter or the unbundling will be aborted.
1068
1068
1069 The 'name' and 'value' cannot exceed 255 bytes each.
1069 The 'name' and 'value' cannot exceed 255 bytes each.
1070 """
1070 """
1071 if self._generated is not None:
1071 if self._generated is not None:
1072 raise error.ReadOnlyPartError(b'part is being generated')
1072 raise error.ReadOnlyPartError(b'part is being generated')
1073 if name in self._seenparams:
1073 if name in self._seenparams:
1074 raise ValueError(b'duplicated params: %s' % name)
1074 raise ValueError(b'duplicated params: %s' % name)
1075 self._seenparams.add(name)
1075 self._seenparams.add(name)
1076 params = self._advisoryparams
1076 params = self._advisoryparams
1077 if mandatory:
1077 if mandatory:
1078 params = self._mandatoryparams
1078 params = self._mandatoryparams
1079 params.append((name, value))
1079 params.append((name, value))
1080
1080
1081 # methods used to generates the bundle2 stream
1081 # methods used to generates the bundle2 stream
1082 def getchunks(self, ui):
1082 def getchunks(self, ui):
1083 if self._generated is not None:
1083 if self._generated is not None:
1084 raise error.ProgrammingError(b'part can only be consumed once')
1084 raise error.ProgrammingError(b'part can only be consumed once')
1085 self._generated = False
1085 self._generated = False
1086
1086
1087 if ui.debugflag:
1087 if ui.debugflag:
1088 msg = [b'bundle2-output-part: "%s"' % self.type]
1088 msg = [b'bundle2-output-part: "%s"' % self.type]
1089 if not self.mandatory:
1089 if not self.mandatory:
1090 msg.append(b' (advisory)')
1090 msg.append(b' (advisory)')
1091 nbmp = len(self.mandatoryparams)
1091 nbmp = len(self.mandatoryparams)
1092 nbap = len(self.advisoryparams)
1092 nbap = len(self.advisoryparams)
1093 if nbmp or nbap:
1093 if nbmp or nbap:
1094 msg.append(b' (params:')
1094 msg.append(b' (params:')
1095 if nbmp:
1095 if nbmp:
1096 msg.append(b' %i mandatory' % nbmp)
1096 msg.append(b' %i mandatory' % nbmp)
1097 if nbap:
1097 if nbap:
1098 msg.append(b' %i advisory' % nbmp)
1098 msg.append(b' %i advisory' % nbmp)
1099 msg.append(b')')
1099 msg.append(b')')
1100 if not self.data:
1100 if not self.data:
1101 msg.append(b' empty payload')
1101 msg.append(b' empty payload')
1102 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1102 elif util.safehasattr(self.data, 'next') or util.safehasattr(
1103 self.data, b'__next__'
1103 self.data, b'__next__'
1104 ):
1104 ):
1105 msg.append(b' streamed payload')
1105 msg.append(b' streamed payload')
1106 else:
1106 else:
1107 msg.append(b' %i bytes payload' % len(self.data))
1107 msg.append(b' %i bytes payload' % len(self.data))
1108 msg.append(b'\n')
1108 msg.append(b'\n')
1109 ui.debug(b''.join(msg))
1109 ui.debug(b''.join(msg))
1110
1110
1111 #### header
1111 #### header
1112 if self.mandatory:
1112 if self.mandatory:
1113 parttype = self.type.upper()
1113 parttype = self.type.upper()
1114 else:
1114 else:
1115 parttype = self.type.lower()
1115 parttype = self.type.lower()
1116 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1116 outdebug(ui, b'part %s: "%s"' % (pycompat.bytestr(self.id), parttype))
1117 ## parttype
1117 ## parttype
1118 header = [
1118 header = [
1119 _pack(_fparttypesize, len(parttype)),
1119 _pack(_fparttypesize, len(parttype)),
1120 parttype,
1120 parttype,
1121 _pack(_fpartid, self.id),
1121 _pack(_fpartid, self.id),
1122 ]
1122 ]
1123 ## parameters
1123 ## parameters
1124 # count
1124 # count
1125 manpar = self.mandatoryparams
1125 manpar = self.mandatoryparams
1126 advpar = self.advisoryparams
1126 advpar = self.advisoryparams
1127 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1127 header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
1128 # size
1128 # size
1129 parsizes = []
1129 parsizes = []
1130 for key, value in manpar:
1130 for key, value in manpar:
1131 parsizes.append(len(key))
1131 parsizes.append(len(key))
1132 parsizes.append(len(value))
1132 parsizes.append(len(value))
1133 for key, value in advpar:
1133 for key, value in advpar:
1134 parsizes.append(len(key))
1134 parsizes.append(len(key))
1135 parsizes.append(len(value))
1135 parsizes.append(len(value))
1136 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1136 paramsizes = _pack(_makefpartparamsizes(len(parsizes) // 2), *parsizes)
1137 header.append(paramsizes)
1137 header.append(paramsizes)
1138 # key, value
1138 # key, value
1139 for key, value in manpar:
1139 for key, value in manpar:
1140 header.append(key)
1140 header.append(key)
1141 header.append(value)
1141 header.append(value)
1142 for key, value in advpar:
1142 for key, value in advpar:
1143 header.append(key)
1143 header.append(key)
1144 header.append(value)
1144 header.append(value)
1145 ## finalize header
1145 ## finalize header
1146 try:
1146 try:
1147 headerchunk = b''.join(header)
1147 headerchunk = b''.join(header)
1148 except TypeError:
1148 except TypeError:
1149 raise TypeError(
1149 raise TypeError(
1150 'Found a non-bytes trying to '
1150 'Found a non-bytes trying to '
1151 'build bundle part header: %r' % header
1151 'build bundle part header: %r' % header
1152 )
1152 )
1153 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1153 outdebug(ui, b'header chunk size: %i' % len(headerchunk))
1154 yield _pack(_fpartheadersize, len(headerchunk))
1154 yield _pack(_fpartheadersize, len(headerchunk))
1155 yield headerchunk
1155 yield headerchunk
1156 ## payload
1156 ## payload
1157 try:
1157 try:
1158 for chunk in self._payloadchunks():
1158 for chunk in self._payloadchunks():
1159 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1159 outdebug(ui, b'payload chunk size: %i' % len(chunk))
1160 yield _pack(_fpayloadsize, len(chunk))
1160 yield _pack(_fpayloadsize, len(chunk))
1161 yield chunk
1161 yield chunk
1162 except GeneratorExit:
1162 except GeneratorExit:
1163 # GeneratorExit means that nobody is listening for our
1163 # GeneratorExit means that nobody is listening for our
1164 # results anyway, so just bail quickly rather than trying
1164 # results anyway, so just bail quickly rather than trying
1165 # to produce an error part.
1165 # to produce an error part.
1166 ui.debug(b'bundle2-generatorexit\n')
1166 ui.debug(b'bundle2-generatorexit\n')
1167 raise
1167 raise
1168 except BaseException as exc:
1168 except BaseException as exc:
1169 bexc = stringutil.forcebytestr(exc)
1169 bexc = stringutil.forcebytestr(exc)
1170 # backup exception data for later
1170 # backup exception data for later
1171 ui.debug(
1171 ui.debug(
1172 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1172 b'bundle2-input-stream-interrupt: encoding exception %s' % bexc
1173 )
1173 )
1174 tb = sys.exc_info()[2]
1174 tb = sys.exc_info()[2]
1175 msg = b'unexpected error: %s' % bexc
1175 msg = b'unexpected error: %s' % bexc
1176 interpart = bundlepart(
1176 interpart = bundlepart(
1177 b'error:abort', [(b'message', msg)], mandatory=False
1177 b'error:abort', [(b'message', msg)], mandatory=False
1178 )
1178 )
1179 interpart.id = 0
1179 interpart.id = 0
1180 yield _pack(_fpayloadsize, -1)
1180 yield _pack(_fpayloadsize, -1)
1181 for chunk in interpart.getchunks(ui=ui):
1181 for chunk in interpart.getchunks(ui=ui):
1182 yield chunk
1182 yield chunk
1183 outdebug(ui, b'closing payload chunk')
1183 outdebug(ui, b'closing payload chunk')
1184 # abort current part payload
1184 # abort current part payload
1185 yield _pack(_fpayloadsize, 0)
1185 yield _pack(_fpayloadsize, 0)
1186 pycompat.raisewithtb(exc, tb)
1186 pycompat.raisewithtb(exc, tb)
1187 # end of payload
1187 # end of payload
1188 outdebug(ui, b'closing payload chunk')
1188 outdebug(ui, b'closing payload chunk')
1189 yield _pack(_fpayloadsize, 0)
1189 yield _pack(_fpayloadsize, 0)
1190 self._generated = True
1190 self._generated = True
1191
1191
1192 def _payloadchunks(self):
1192 def _payloadchunks(self):
1193 """yield chunks of a the part payload
1193 """yield chunks of a the part payload
1194
1194
1195 Exists to handle the different methods to provide data to a part."""
1195 Exists to handle the different methods to provide data to a part."""
1196 # we only support fixed size data now.
1196 # we only support fixed size data now.
1197 # This will be improved in the future.
1197 # This will be improved in the future.
1198 if util.safehasattr(self.data, 'next') or util.safehasattr(
1198 if util.safehasattr(self.data, 'next') or util.safehasattr(
1199 self.data, b'__next__'
1199 self.data, b'__next__'
1200 ):
1200 ):
1201 buff = util.chunkbuffer(self.data)
1201 buff = util.chunkbuffer(self.data)
1202 chunk = buff.read(preferedchunksize)
1202 chunk = buff.read(preferedchunksize)
1203 while chunk:
1203 while chunk:
1204 yield chunk
1204 yield chunk
1205 chunk = buff.read(preferedchunksize)
1205 chunk = buff.read(preferedchunksize)
1206 elif len(self.data):
1206 elif len(self.data):
1207 yield self.data
1207 yield self.data
1208
1208
1209
1209
1210 flaginterrupt = -1
1210 flaginterrupt = -1
1211
1211
1212
1212
1213 class interrupthandler(unpackermixin):
1213 class interrupthandler(unpackermixin):
1214 """read one part and process it with restricted capability
1214 """read one part and process it with restricted capability
1215
1215
1216 This allows to transmit exception raised on the producer size during part
1216 This allows to transmit exception raised on the producer size during part
1217 iteration while the consumer is reading a part.
1217 iteration while the consumer is reading a part.
1218
1218
1219 Part processed in this manner only have access to a ui object,"""
1219 Part processed in this manner only have access to a ui object,"""
1220
1220
1221 def __init__(self, ui, fp):
1221 def __init__(self, ui, fp):
1222 super(interrupthandler, self).__init__(fp)
1222 super(interrupthandler, self).__init__(fp)
1223 self.ui = ui
1223 self.ui = ui
1224
1224
1225 def _readpartheader(self):
1225 def _readpartheader(self):
1226 """reads a part header size and return the bytes blob
1226 """reads a part header size and return the bytes blob
1227
1227
1228 returns None if empty"""
1228 returns None if empty"""
1229 headersize = self._unpack(_fpartheadersize)[0]
1229 headersize = self._unpack(_fpartheadersize)[0]
1230 if headersize < 0:
1230 if headersize < 0:
1231 raise error.BundleValueError(
1231 raise error.BundleValueError(
1232 b'negative part header size: %i' % headersize
1232 b'negative part header size: %i' % headersize
1233 )
1233 )
1234 indebug(self.ui, b'part header size: %i\n' % headersize)
1234 indebug(self.ui, b'part header size: %i\n' % headersize)
1235 if headersize:
1235 if headersize:
1236 return self._readexact(headersize)
1236 return self._readexact(headersize)
1237 return None
1237 return None
1238
1238
1239 def __call__(self):
1239 def __call__(self):
1240
1240
1241 self.ui.debug(
1241 self.ui.debug(
1242 b'bundle2-input-stream-interrupt: opening out of band context\n'
1242 b'bundle2-input-stream-interrupt: opening out of band context\n'
1243 )
1243 )
1244 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1244 indebug(self.ui, b'bundle2 stream interruption, looking for a part.')
1245 headerblock = self._readpartheader()
1245 headerblock = self._readpartheader()
1246 if headerblock is None:
1246 if headerblock is None:
1247 indebug(self.ui, b'no part found during interruption.')
1247 indebug(self.ui, b'no part found during interruption.')
1248 return
1248 return
1249 part = unbundlepart(self.ui, headerblock, self._fp)
1249 part = unbundlepart(self.ui, headerblock, self._fp)
1250 op = interruptoperation(self.ui)
1250 op = interruptoperation(self.ui)
1251 hardabort = False
1251 hardabort = False
1252 try:
1252 try:
1253 _processpart(op, part)
1253 _processpart(op, part)
1254 except (SystemExit, KeyboardInterrupt):
1254 except (SystemExit, KeyboardInterrupt):
1255 hardabort = True
1255 hardabort = True
1256 raise
1256 raise
1257 finally:
1257 finally:
1258 if not hardabort:
1258 if not hardabort:
1259 part.consume()
1259 part.consume()
1260 self.ui.debug(
1260 self.ui.debug(
1261 b'bundle2-input-stream-interrupt: closing out of band context\n'
1261 b'bundle2-input-stream-interrupt: closing out of band context\n'
1262 )
1262 )
1263
1263
1264
1264
1265 class interruptoperation(object):
1265 class interruptoperation(object):
1266 """A limited operation to be use by part handler during interruption
1266 """A limited operation to be use by part handler during interruption
1267
1267
1268 It only have access to an ui object.
1268 It only have access to an ui object.
1269 """
1269 """
1270
1270
1271 def __init__(self, ui):
1271 def __init__(self, ui):
1272 self.ui = ui
1272 self.ui = ui
1273 self.reply = None
1273 self.reply = None
1274 self.captureoutput = False
1274 self.captureoutput = False
1275
1275
1276 @property
1276 @property
1277 def repo(self):
1277 def repo(self):
1278 raise error.ProgrammingError(b'no repo access from stream interruption')
1278 raise error.ProgrammingError(b'no repo access from stream interruption')
1279
1279
1280 def gettransaction(self):
1280 def gettransaction(self):
1281 raise TransactionUnavailable(b'no repo access from stream interruption')
1281 raise TransactionUnavailable(b'no repo access from stream interruption')
1282
1282
1283
1283
1284 def decodepayloadchunks(ui, fh):
1284 def decodepayloadchunks(ui, fh):
1285 """Reads bundle2 part payload data into chunks.
1285 """Reads bundle2 part payload data into chunks.
1286
1286
1287 Part payload data consists of framed chunks. This function takes
1287 Part payload data consists of framed chunks. This function takes
1288 a file handle and emits those chunks.
1288 a file handle and emits those chunks.
1289 """
1289 """
1290 dolog = ui.configbool(b'devel', b'bundle2.debug')
1290 dolog = ui.configbool(b'devel', b'bundle2.debug')
1291 debug = ui.debug
1291 debug = ui.debug
1292
1292
1293 headerstruct = struct.Struct(_fpayloadsize)
1293 headerstruct = struct.Struct(_fpayloadsize)
1294 headersize = headerstruct.size
1294 headersize = headerstruct.size
1295 unpack = headerstruct.unpack
1295 unpack = headerstruct.unpack
1296
1296
1297 readexactly = changegroup.readexactly
1297 readexactly = changegroup.readexactly
1298 read = fh.read
1298 read = fh.read
1299
1299
1300 chunksize = unpack(readexactly(fh, headersize))[0]
1300 chunksize = unpack(readexactly(fh, headersize))[0]
1301 indebug(ui, b'payload chunk size: %i' % chunksize)
1301 indebug(ui, b'payload chunk size: %i' % chunksize)
1302
1302
1303 # changegroup.readexactly() is inlined below for performance.
1303 # changegroup.readexactly() is inlined below for performance.
1304 while chunksize:
1304 while chunksize:
1305 if chunksize >= 0:
1305 if chunksize >= 0:
1306 s = read(chunksize)
1306 s = read(chunksize)
1307 if len(s) < chunksize:
1307 if len(s) < chunksize:
1308 raise error.Abort(
1308 raise error.Abort(
1309 _(
1309 _(
1310 b'stream ended unexpectedly '
1310 b'stream ended unexpectedly '
1311 b' (got %d bytes, expected %d)'
1311 b' (got %d bytes, expected %d)'
1312 )
1312 )
1313 % (len(s), chunksize)
1313 % (len(s), chunksize)
1314 )
1314 )
1315
1315
1316 yield s
1316 yield s
1317 elif chunksize == flaginterrupt:
1317 elif chunksize == flaginterrupt:
1318 # Interrupt "signal" detected. The regular stream is interrupted
1318 # Interrupt "signal" detected. The regular stream is interrupted
1319 # and a bundle2 part follows. Consume it.
1319 # and a bundle2 part follows. Consume it.
1320 interrupthandler(ui, fh)()
1320 interrupthandler(ui, fh)()
1321 else:
1321 else:
1322 raise error.BundleValueError(
1322 raise error.BundleValueError(
1323 b'negative payload chunk size: %s' % chunksize
1323 b'negative payload chunk size: %s' % chunksize
1324 )
1324 )
1325
1325
1326 s = read(headersize)
1326 s = read(headersize)
1327 if len(s) < headersize:
1327 if len(s) < headersize:
1328 raise error.Abort(
1328 raise error.Abort(
1329 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1329 _(b'stream ended unexpectedly (got %d bytes, expected %d)')
1330 % (len(s), chunksize)
1330 % (len(s), chunksize)
1331 )
1331 )
1332
1332
1333 chunksize = unpack(s)[0]
1333 chunksize = unpack(s)[0]
1334
1334
1335 # indebug() inlined for performance.
1335 # indebug() inlined for performance.
1336 if dolog:
1336 if dolog:
1337 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1337 debug(b'bundle2-input: payload chunk size: %i\n' % chunksize)
1338
1338
1339
1339
1340 class unbundlepart(unpackermixin):
1340 class unbundlepart(unpackermixin):
1341 """a bundle part read from a bundle"""
1341 """a bundle part read from a bundle"""
1342
1342
1343 def __init__(self, ui, header, fp):
1343 def __init__(self, ui, header, fp):
1344 super(unbundlepart, self).__init__(fp)
1344 super(unbundlepart, self).__init__(fp)
1345 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1345 self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
1346 fp, b'tell'
1346 fp, b'tell'
1347 )
1347 )
1348 self.ui = ui
1348 self.ui = ui
1349 # unbundle state attr
1349 # unbundle state attr
1350 self._headerdata = header
1350 self._headerdata = header
1351 self._headeroffset = 0
1351 self._headeroffset = 0
1352 self._initialized = False
1352 self._initialized = False
1353 self.consumed = False
1353 self.consumed = False
1354 # part data
1354 # part data
1355 self.id = None
1355 self.id = None
1356 self.type = None
1356 self.type = None
1357 self.mandatoryparams = None
1357 self.mandatoryparams = None
1358 self.advisoryparams = None
1358 self.advisoryparams = None
1359 self.params = None
1359 self.params = None
1360 self.mandatorykeys = ()
1360 self.mandatorykeys = ()
1361 self._readheader()
1361 self._readheader()
1362 self._mandatory = None
1362 self._mandatory = None
1363 self._pos = 0
1363 self._pos = 0
1364
1364
1365 def _fromheader(self, size):
1365 def _fromheader(self, size):
1366 """return the next <size> byte from the header"""
1366 """return the next <size> byte from the header"""
1367 offset = self._headeroffset
1367 offset = self._headeroffset
1368 data = self._headerdata[offset : (offset + size)]
1368 data = self._headerdata[offset : (offset + size)]
1369 self._headeroffset = offset + size
1369 self._headeroffset = offset + size
1370 return data
1370 return data
1371
1371
1372 def _unpackheader(self, format):
1372 def _unpackheader(self, format):
1373 """read given format from header
1373 """read given format from header
1374
1374
1375 This automatically compute the size of the format to read."""
1375 This automatically compute the size of the format to read."""
1376 data = self._fromheader(struct.calcsize(format))
1376 data = self._fromheader(struct.calcsize(format))
1377 return _unpack(format, data)
1377 return _unpack(format, data)
1378
1378
1379 def _initparams(self, mandatoryparams, advisoryparams):
1379 def _initparams(self, mandatoryparams, advisoryparams):
1380 """internal function to setup all logic related parameters"""
1380 """internal function to setup all logic related parameters"""
1381 # make it read only to prevent people touching it by mistake.
1381 # make it read only to prevent people touching it by mistake.
1382 self.mandatoryparams = tuple(mandatoryparams)
1382 self.mandatoryparams = tuple(mandatoryparams)
1383 self.advisoryparams = tuple(advisoryparams)
1383 self.advisoryparams = tuple(advisoryparams)
1384 # user friendly UI
1384 # user friendly UI
1385 self.params = util.sortdict(self.mandatoryparams)
1385 self.params = util.sortdict(self.mandatoryparams)
1386 self.params.update(self.advisoryparams)
1386 self.params.update(self.advisoryparams)
1387 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1387 self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
1388
1388
1389 def _readheader(self):
1389 def _readheader(self):
1390 """read the header and setup the object"""
1390 """read the header and setup the object"""
1391 typesize = self._unpackheader(_fparttypesize)[0]
1391 typesize = self._unpackheader(_fparttypesize)[0]
1392 self.type = self._fromheader(typesize)
1392 self.type = self._fromheader(typesize)
1393 indebug(self.ui, b'part type: "%s"' % self.type)
1393 indebug(self.ui, b'part type: "%s"' % self.type)
1394 self.id = self._unpackheader(_fpartid)[0]
1394 self.id = self._unpackheader(_fpartid)[0]
1395 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1395 indebug(self.ui, b'part id: "%s"' % pycompat.bytestr(self.id))
1396 # extract mandatory bit from type
1396 # extract mandatory bit from type
1397 self.mandatory = self.type != self.type.lower()
1397 self.mandatory = self.type != self.type.lower()
1398 self.type = self.type.lower()
1398 self.type = self.type.lower()
1399 ## reading parameters
1399 ## reading parameters
1400 # param count
1400 # param count
1401 mancount, advcount = self._unpackheader(_fpartparamcount)
1401 mancount, advcount = self._unpackheader(_fpartparamcount)
1402 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1402 indebug(self.ui, b'part parameters: %i' % (mancount + advcount))
1403 # param size
1403 # param size
1404 fparamsizes = _makefpartparamsizes(mancount + advcount)
1404 fparamsizes = _makefpartparamsizes(mancount + advcount)
1405 paramsizes = self._unpackheader(fparamsizes)
1405 paramsizes = self._unpackheader(fparamsizes)
1406 # make it a list of couple again
1406 # make it a list of couple again
1407 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1407 paramsizes = list(zip(paramsizes[::2], paramsizes[1::2]))
1408 # split mandatory from advisory
1408 # split mandatory from advisory
1409 mansizes = paramsizes[:mancount]
1409 mansizes = paramsizes[:mancount]
1410 advsizes = paramsizes[mancount:]
1410 advsizes = paramsizes[mancount:]
1411 # retrieve param value
1411 # retrieve param value
1412 manparams = []
1412 manparams = []
1413 for key, value in mansizes:
1413 for key, value in mansizes:
1414 manparams.append((self._fromheader(key), self._fromheader(value)))
1414 manparams.append((self._fromheader(key), self._fromheader(value)))
1415 advparams = []
1415 advparams = []
1416 for key, value in advsizes:
1416 for key, value in advsizes:
1417 advparams.append((self._fromheader(key), self._fromheader(value)))
1417 advparams.append((self._fromheader(key), self._fromheader(value)))
1418 self._initparams(manparams, advparams)
1418 self._initparams(manparams, advparams)
1419 ## part payload
1419 ## part payload
1420 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1420 self._payloadstream = util.chunkbuffer(self._payloadchunks())
1421 # we read the data, tell it
1421 # we read the data, tell it
1422 self._initialized = True
1422 self._initialized = True
1423
1423
1424 def _payloadchunks(self):
1424 def _payloadchunks(self):
1425 """Generator of decoded chunks in the payload."""
1425 """Generator of decoded chunks in the payload."""
1426 return decodepayloadchunks(self.ui, self._fp)
1426 return decodepayloadchunks(self.ui, self._fp)
1427
1427
1428 def consume(self):
1428 def consume(self):
1429 """Read the part payload until completion.
1429 """Read the part payload until completion.
1430
1430
1431 By consuming the part data, the underlying stream read offset will
1431 By consuming the part data, the underlying stream read offset will
1432 be advanced to the next part (or end of stream).
1432 be advanced to the next part (or end of stream).
1433 """
1433 """
1434 if self.consumed:
1434 if self.consumed:
1435 return
1435 return
1436
1436
1437 chunk = self.read(32768)
1437 chunk = self.read(32768)
1438 while chunk:
1438 while chunk:
1439 self._pos += len(chunk)
1439 self._pos += len(chunk)
1440 chunk = self.read(32768)
1440 chunk = self.read(32768)
1441
1441
1442 def read(self, size=None):
1442 def read(self, size=None):
1443 """read payload data"""
1443 """read payload data"""
1444 if not self._initialized:
1444 if not self._initialized:
1445 self._readheader()
1445 self._readheader()
1446 if size is None:
1446 if size is None:
1447 data = self._payloadstream.read()
1447 data = self._payloadstream.read()
1448 else:
1448 else:
1449 data = self._payloadstream.read(size)
1449 data = self._payloadstream.read(size)
1450 self._pos += len(data)
1450 self._pos += len(data)
1451 if size is None or len(data) < size:
1451 if size is None or len(data) < size:
1452 if not self.consumed and self._pos:
1452 if not self.consumed and self._pos:
1453 self.ui.debug(
1453 self.ui.debug(
1454 b'bundle2-input-part: total payload size %i\n' % self._pos
1454 b'bundle2-input-part: total payload size %i\n' % self._pos
1455 )
1455 )
1456 self.consumed = True
1456 self.consumed = True
1457 return data
1457 return data
1458
1458
1459
1459
1460 class seekableunbundlepart(unbundlepart):
1460 class seekableunbundlepart(unbundlepart):
1461 """A bundle2 part in a bundle that is seekable.
1461 """A bundle2 part in a bundle that is seekable.
1462
1462
1463 Regular ``unbundlepart`` instances can only be read once. This class
1463 Regular ``unbundlepart`` instances can only be read once. This class
1464 extends ``unbundlepart`` to enable bi-directional seeking within the
1464 extends ``unbundlepart`` to enable bi-directional seeking within the
1465 part.
1465 part.
1466
1466
1467 Bundle2 part data consists of framed chunks. Offsets when seeking
1467 Bundle2 part data consists of framed chunks. Offsets when seeking
1468 refer to the decoded data, not the offsets in the underlying bundle2
1468 refer to the decoded data, not the offsets in the underlying bundle2
1469 stream.
1469 stream.
1470
1470
1471 To facilitate quickly seeking within the decoded data, instances of this
1471 To facilitate quickly seeking within the decoded data, instances of this
1472 class maintain a mapping between offsets in the underlying stream and
1472 class maintain a mapping between offsets in the underlying stream and
1473 the decoded payload. This mapping will consume memory in proportion
1473 the decoded payload. This mapping will consume memory in proportion
1474 to the number of chunks within the payload (which almost certainly
1474 to the number of chunks within the payload (which almost certainly
1475 increases in proportion with the size of the part).
1475 increases in proportion with the size of the part).
1476 """
1476 """
1477
1477
1478 def __init__(self, ui, header, fp):
1478 def __init__(self, ui, header, fp):
1479 # (payload, file) offsets for chunk starts.
1479 # (payload, file) offsets for chunk starts.
1480 self._chunkindex = []
1480 self._chunkindex = []
1481
1481
1482 super(seekableunbundlepart, self).__init__(ui, header, fp)
1482 super(seekableunbundlepart, self).__init__(ui, header, fp)
1483
1483
1484 def _payloadchunks(self, chunknum=0):
1484 def _payloadchunks(self, chunknum=0):
1485 '''seek to specified chunk and start yielding data'''
1485 '''seek to specified chunk and start yielding data'''
1486 if len(self._chunkindex) == 0:
1486 if len(self._chunkindex) == 0:
1487 assert chunknum == 0, b'Must start with chunk 0'
1487 assert chunknum == 0, b'Must start with chunk 0'
1488 self._chunkindex.append((0, self._tellfp()))
1488 self._chunkindex.append((0, self._tellfp()))
1489 else:
1489 else:
1490 assert chunknum < len(self._chunkindex), (
1490 assert chunknum < len(self._chunkindex), (
1491 b'Unknown chunk %d' % chunknum
1491 b'Unknown chunk %d' % chunknum
1492 )
1492 )
1493 self._seekfp(self._chunkindex[chunknum][1])
1493 self._seekfp(self._chunkindex[chunknum][1])
1494
1494
1495 pos = self._chunkindex[chunknum][0]
1495 pos = self._chunkindex[chunknum][0]
1496
1496
1497 for chunk in decodepayloadchunks(self.ui, self._fp):
1497 for chunk in decodepayloadchunks(self.ui, self._fp):
1498 chunknum += 1
1498 chunknum += 1
1499 pos += len(chunk)
1499 pos += len(chunk)
1500 if chunknum == len(self._chunkindex):
1500 if chunknum == len(self._chunkindex):
1501 self._chunkindex.append((pos, self._tellfp()))
1501 self._chunkindex.append((pos, self._tellfp()))
1502
1502
1503 yield chunk
1503 yield chunk
1504
1504
1505 def _findchunk(self, pos):
1505 def _findchunk(self, pos):
1506 '''for a given payload position, return a chunk number and offset'''
1506 '''for a given payload position, return a chunk number and offset'''
1507 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1507 for chunk, (ppos, fpos) in enumerate(self._chunkindex):
1508 if ppos == pos:
1508 if ppos == pos:
1509 return chunk, 0
1509 return chunk, 0
1510 elif ppos > pos:
1510 elif ppos > pos:
1511 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1511 return chunk - 1, pos - self._chunkindex[chunk - 1][0]
1512 raise ValueError(b'Unknown chunk')
1512 raise ValueError(b'Unknown chunk')
1513
1513
1514 def tell(self):
1514 def tell(self):
1515 return self._pos
1515 return self._pos
1516
1516
1517 def seek(self, offset, whence=os.SEEK_SET):
1517 def seek(self, offset, whence=os.SEEK_SET):
1518 if whence == os.SEEK_SET:
1518 if whence == os.SEEK_SET:
1519 newpos = offset
1519 newpos = offset
1520 elif whence == os.SEEK_CUR:
1520 elif whence == os.SEEK_CUR:
1521 newpos = self._pos + offset
1521 newpos = self._pos + offset
1522 elif whence == os.SEEK_END:
1522 elif whence == os.SEEK_END:
1523 if not self.consumed:
1523 if not self.consumed:
1524 # Can't use self.consume() here because it advances self._pos.
1524 # Can't use self.consume() here because it advances self._pos.
1525 chunk = self.read(32768)
1525 chunk = self.read(32768)
1526 while chunk:
1526 while chunk:
1527 chunk = self.read(32768)
1527 chunk = self.read(32768)
1528 newpos = self._chunkindex[-1][0] - offset
1528 newpos = self._chunkindex[-1][0] - offset
1529 else:
1529 else:
1530 raise ValueError(b'Unknown whence value: %r' % (whence,))
1530 raise ValueError(b'Unknown whence value: %r' % (whence,))
1531
1531
1532 if newpos > self._chunkindex[-1][0] and not self.consumed:
1532 if newpos > self._chunkindex[-1][0] and not self.consumed:
1533 # Can't use self.consume() here because it advances self._pos.
1533 # Can't use self.consume() here because it advances self._pos.
1534 chunk = self.read(32768)
1534 chunk = self.read(32768)
1535 while chunk:
1535 while chunk:
1536 chunk = self.read(32668)
1536 chunk = self.read(32668)
1537
1537
1538 if not 0 <= newpos <= self._chunkindex[-1][0]:
1538 if not 0 <= newpos <= self._chunkindex[-1][0]:
1539 raise ValueError(b'Offset out of range')
1539 raise ValueError(b'Offset out of range')
1540
1540
1541 if self._pos != newpos:
1541 if self._pos != newpos:
1542 chunk, internaloffset = self._findchunk(newpos)
1542 chunk, internaloffset = self._findchunk(newpos)
1543 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1543 self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
1544 adjust = self.read(internaloffset)
1544 adjust = self.read(internaloffset)
1545 if len(adjust) != internaloffset:
1545 if len(adjust) != internaloffset:
1546 raise error.Abort(_(b'Seek failed\n'))
1546 raise error.Abort(_(b'Seek failed\n'))
1547 self._pos = newpos
1547 self._pos = newpos
1548
1548
1549 def _seekfp(self, offset, whence=0):
1549 def _seekfp(self, offset, whence=0):
1550 """move the underlying file pointer
1550 """move the underlying file pointer
1551
1551
1552 This method is meant for internal usage by the bundle2 protocol only.
1552 This method is meant for internal usage by the bundle2 protocol only.
1553 They directly manipulate the low level stream including bundle2 level
1553 They directly manipulate the low level stream including bundle2 level
1554 instruction.
1554 instruction.
1555
1555
1556 Do not use it to implement higher-level logic or methods."""
1556 Do not use it to implement higher-level logic or methods."""
1557 if self._seekable:
1557 if self._seekable:
1558 return self._fp.seek(offset, whence)
1558 return self._fp.seek(offset, whence)
1559 else:
1559 else:
1560 raise NotImplementedError(_(b'File pointer is not seekable'))
1560 raise NotImplementedError(_(b'File pointer is not seekable'))
1561
1561
1562 def _tellfp(self):
1562 def _tellfp(self):
1563 """return the file offset, or None if file is not seekable
1563 """return the file offset, or None if file is not seekable
1564
1564
1565 This method is meant for internal usage by the bundle2 protocol only.
1565 This method is meant for internal usage by the bundle2 protocol only.
1566 They directly manipulate the low level stream including bundle2 level
1566 They directly manipulate the low level stream including bundle2 level
1567 instruction.
1567 instruction.
1568
1568
1569 Do not use it to implement higher-level logic or methods."""
1569 Do not use it to implement higher-level logic or methods."""
1570 if self._seekable:
1570 if self._seekable:
1571 try:
1571 try:
1572 return self._fp.tell()
1572 return self._fp.tell()
1573 except IOError as e:
1573 except IOError as e:
1574 if e.errno == errno.ESPIPE:
1574 if e.errno == errno.ESPIPE:
1575 self._seekable = False
1575 self._seekable = False
1576 else:
1576 else:
1577 raise
1577 raise
1578 return None
1578 return None
1579
1579
1580
1580
1581 # These are only the static capabilities.
1581 # These are only the static capabilities.
1582 # Check the 'getrepocaps' function for the rest.
1582 # Check the 'getrepocaps' function for the rest.
1583 capabilities = {
1583 capabilities = {
1584 b'HG20': (),
1584 b'HG20': (),
1585 b'bookmarks': (),
1585 b'bookmarks': (),
1586 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1586 b'error': (b'abort', b'unsupportedcontent', b'pushraced', b'pushkey'),
1587 b'listkeys': (),
1587 b'listkeys': (),
1588 b'pushkey': (),
1588 b'pushkey': (),
1589 b'digests': tuple(sorted(util.DIGESTS.keys())),
1589 b'digests': tuple(sorted(util.DIGESTS.keys())),
1590 b'remote-changegroup': (b'http', b'https'),
1590 b'remote-changegroup': (b'http', b'https'),
1591 b'hgtagsfnodes': (),
1591 b'hgtagsfnodes': (),
1592 b'rev-branch-cache': (),
1592 b'rev-branch-cache': (),
1593 b'phases': (b'heads',),
1593 b'phases': (b'heads',),
1594 b'stream': (b'v2',),
1594 b'stream': (b'v2',),
1595 }
1595 }
1596
1596
1597
1597
1598 def getrepocaps(repo, allowpushback=False, role=None):
1598 def getrepocaps(repo, allowpushback=False, role=None):
1599 """return the bundle2 capabilities for a given repo
1599 """return the bundle2 capabilities for a given repo
1600
1600
1601 Exists to allow extensions (like evolution) to mutate the capabilities.
1601 Exists to allow extensions (like evolution) to mutate the capabilities.
1602
1602
1603 The returned value is used for servers advertising their capabilities as
1603 The returned value is used for servers advertising their capabilities as
1604 well as clients advertising their capabilities to servers as part of
1604 well as clients advertising their capabilities to servers as part of
1605 bundle2 requests. The ``role`` argument specifies which is which.
1605 bundle2 requests. The ``role`` argument specifies which is which.
1606 """
1606 """
1607 if role not in (b'client', b'server'):
1607 if role not in (b'client', b'server'):
1608 raise error.ProgrammingError(b'role argument must be client or server')
1608 raise error.ProgrammingError(b'role argument must be client or server')
1609
1609
1610 caps = capabilities.copy()
1610 caps = capabilities.copy()
1611 caps[b'changegroup'] = tuple(
1611 caps[b'changegroup'] = tuple(
1612 sorted(changegroup.supportedincomingversions(repo))
1612 sorted(changegroup.supportedincomingversions(repo))
1613 )
1613 )
1614 if obsolete.isenabled(repo, obsolete.exchangeopt):
1614 if obsolete.isenabled(repo, obsolete.exchangeopt):
1615 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1615 supportedformat = tuple(b'V%i' % v for v in obsolete.formats)
1616 caps[b'obsmarkers'] = supportedformat
1616 caps[b'obsmarkers'] = supportedformat
1617 if allowpushback:
1617 if allowpushback:
1618 caps[b'pushback'] = ()
1618 caps[b'pushback'] = ()
1619 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1619 cpmode = repo.ui.config(b'server', b'concurrent-push-mode')
1620 if cpmode == b'check-related':
1620 if cpmode == b'check-related':
1621 caps[b'checkheads'] = (b'related',)
1621 caps[b'checkheads'] = (b'related',)
1622 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1622 if b'phases' in repo.ui.configlist(b'devel', b'legacy.exchange'):
1623 caps.pop(b'phases')
1623 caps.pop(b'phases')
1624
1624
1625 # Don't advertise stream clone support in server mode if not configured.
1625 # Don't advertise stream clone support in server mode if not configured.
1626 if role == b'server':
1626 if role == b'server':
1627 streamsupported = repo.ui.configbool(
1627 streamsupported = repo.ui.configbool(
1628 b'server', b'uncompressed', untrusted=True
1628 b'server', b'uncompressed', untrusted=True
1629 )
1629 )
1630 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1630 featuresupported = repo.ui.configbool(b'server', b'bundle2.stream')
1631
1631
1632 if not streamsupported or not featuresupported:
1632 if not streamsupported or not featuresupported:
1633 caps.pop(b'stream')
1633 caps.pop(b'stream')
1634 # Else always advertise support on client, because payload support
1634 # Else always advertise support on client, because payload support
1635 # should always be advertised.
1635 # should always be advertised.
1636
1636
1637 return caps
1637 return caps
1638
1638
1639
1639
1640 def bundle2caps(remote):
1640 def bundle2caps(remote):
1641 """return the bundle capabilities of a peer as dict"""
1641 """return the bundle capabilities of a peer as dict"""
1642 raw = remote.capable(b'bundle2')
1642 raw = remote.capable(b'bundle2')
1643 if not raw and raw != b'':
1643 if not raw and raw != b'':
1644 return {}
1644 return {}
1645 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1645 capsblob = urlreq.unquote(remote.capable(b'bundle2'))
1646 return decodecaps(capsblob)
1646 return decodecaps(capsblob)
1647
1647
1648
1648
1649 def obsmarkersversion(caps):
1649 def obsmarkersversion(caps):
1650 """extract the list of supported obsmarkers versions from a bundle2caps dict
1650 """extract the list of supported obsmarkers versions from a bundle2caps dict
1651 """
1651 """
1652 obscaps = caps.get(b'obsmarkers', ())
1652 obscaps = caps.get(b'obsmarkers', ())
1653 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1653 return [int(c[1:]) for c in obscaps if c.startswith(b'V')]
1654
1654
1655
1655
1656 def writenewbundle(
1656 def writenewbundle(
1657 ui,
1657 ui,
1658 repo,
1658 repo,
1659 source,
1659 source,
1660 filename,
1660 filename,
1661 bundletype,
1661 bundletype,
1662 outgoing,
1662 outgoing,
1663 opts,
1663 opts,
1664 vfs=None,
1664 vfs=None,
1665 compression=None,
1665 compression=None,
1666 compopts=None,
1666 compopts=None,
1667 ):
1667 ):
1668 if bundletype.startswith(b'HG10'):
1668 if bundletype.startswith(b'HG10'):
1669 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1669 cg = changegroup.makechangegroup(repo, outgoing, b'01', source)
1670 return writebundle(
1670 return writebundle(
1671 ui,
1671 ui,
1672 cg,
1672 cg,
1673 filename,
1673 filename,
1674 bundletype,
1674 bundletype,
1675 vfs=vfs,
1675 vfs=vfs,
1676 compression=compression,
1676 compression=compression,
1677 compopts=compopts,
1677 compopts=compopts,
1678 )
1678 )
1679 elif not bundletype.startswith(b'HG20'):
1679 elif not bundletype.startswith(b'HG20'):
1680 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1680 raise error.ProgrammingError(b'unknown bundle type: %s' % bundletype)
1681
1681
1682 caps = {}
1682 caps = {}
1683 if b'obsolescence' in opts:
1683 if b'obsolescence' in opts:
1684 caps[b'obsmarkers'] = (b'V1',)
1684 caps[b'obsmarkers'] = (b'V1',)
1685 bundle = bundle20(ui, caps)
1685 bundle = bundle20(ui, caps)
1686 bundle.setcompression(compression, compopts)
1686 bundle.setcompression(compression, compopts)
1687 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1687 _addpartsfromopts(ui, repo, bundle, source, outgoing, opts)
1688 chunkiter = bundle.getchunks()
1688 chunkiter = bundle.getchunks()
1689
1689
1690 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1690 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1691
1691
1692
1692
1693 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1693 def _addpartsfromopts(ui, repo, bundler, source, outgoing, opts):
1694 # We should eventually reconcile this logic with the one behind
1694 # We should eventually reconcile this logic with the one behind
1695 # 'exchange.getbundle2partsgenerator'.
1695 # 'exchange.getbundle2partsgenerator'.
1696 #
1696 #
1697 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1697 # The type of input from 'getbundle' and 'writenewbundle' are a bit
1698 # different right now. So we keep them separated for now for the sake of
1698 # different right now. So we keep them separated for now for the sake of
1699 # simplicity.
1699 # simplicity.
1700
1700
1701 # we might not always want a changegroup in such bundle, for example in
1701 # we might not always want a changegroup in such bundle, for example in
1702 # stream bundles
1702 # stream bundles
1703 if opts.get(b'changegroup', True):
1703 if opts.get(b'changegroup', True):
1704 cgversion = opts.get(b'cg.version')
1704 cgversion = opts.get(b'cg.version')
1705 if cgversion is None:
1705 if cgversion is None:
1706 cgversion = changegroup.safeversion(repo)
1706 cgversion = changegroup.safeversion(repo)
1707 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1707 cg = changegroup.makechangegroup(repo, outgoing, cgversion, source)
1708 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1708 part = bundler.newpart(b'changegroup', data=cg.getchunks())
1709 part.addparam(b'version', cg.version)
1709 part.addparam(b'version', cg.version)
1710 if b'clcount' in cg.extras:
1710 if b'clcount' in cg.extras:
1711 part.addparam(
1711 part.addparam(
1712 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1712 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1713 )
1713 )
1714 if opts.get(b'phases') and repo.revs(
1714 if opts.get(b'phases') and repo.revs(
1715 b'%ln and secret()', outgoing.ancestorsof
1715 b'%ln and secret()', outgoing.ancestorsof
1716 ):
1716 ):
1717 part.addparam(
1717 part.addparam(
1718 b'targetphase', b'%d' % phases.secret, mandatory=False
1718 b'targetphase', b'%d' % phases.secret, mandatory=False
1719 )
1719 )
1720 if b'exp-sidedata-flag' in repo.requirements:
1720 if b'exp-sidedata-flag' in repo.requirements:
1721 part.addparam(b'exp-sidedata', b'1')
1721 part.addparam(b'exp-sidedata', b'1')
1722
1722
1723 if opts.get(b'streamv2', False):
1723 if opts.get(b'streamv2', False):
1724 addpartbundlestream2(bundler, repo, stream=True)
1724 addpartbundlestream2(bundler, repo, stream=True)
1725
1725
1726 if opts.get(b'tagsfnodescache', True):
1726 if opts.get(b'tagsfnodescache', True):
1727 addparttagsfnodescache(repo, bundler, outgoing)
1727 addparttagsfnodescache(repo, bundler, outgoing)
1728
1728
1729 if opts.get(b'revbranchcache', True):
1729 if opts.get(b'revbranchcache', True):
1730 addpartrevbranchcache(repo, bundler, outgoing)
1730 addpartrevbranchcache(repo, bundler, outgoing)
1731
1731
1732 if opts.get(b'obsolescence', False):
1732 if opts.get(b'obsolescence', False):
1733 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1733 obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
1734 buildobsmarkerspart(bundler, obsmarkers)
1734 buildobsmarkerspart(bundler, obsmarkers)
1735
1735
1736 if opts.get(b'phases', False):
1736 if opts.get(b'phases', False):
1737 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1737 headsbyphase = phases.subsetphaseheads(repo, outgoing.missing)
1738 phasedata = phases.binaryencode(headsbyphase)
1738 phasedata = phases.binaryencode(headsbyphase)
1739 bundler.newpart(b'phase-heads', data=phasedata)
1739 bundler.newpart(b'phase-heads', data=phasedata)
1740
1740
1741
1741
1742 def addparttagsfnodescache(repo, bundler, outgoing):
1742 def addparttagsfnodescache(repo, bundler, outgoing):
1743 # we include the tags fnode cache for the bundle changeset
1743 # we include the tags fnode cache for the bundle changeset
1744 # (as an optional parts)
1744 # (as an optional parts)
1745 cache = tags.hgtagsfnodescache(repo.unfiltered())
1745 cache = tags.hgtagsfnodescache(repo.unfiltered())
1746 chunks = []
1746 chunks = []
1747
1747
1748 # .hgtags fnodes are only relevant for head changesets. While we could
1748 # .hgtags fnodes are only relevant for head changesets. While we could
1749 # transfer values for all known nodes, there will likely be little to
1749 # transfer values for all known nodes, there will likely be little to
1750 # no benefit.
1750 # no benefit.
1751 #
1751 #
1752 # We don't bother using a generator to produce output data because
1752 # We don't bother using a generator to produce output data because
1753 # a) we only have 40 bytes per head and even esoteric numbers of heads
1753 # a) we only have 40 bytes per head and even esoteric numbers of heads
1754 # consume little memory (1M heads is 40MB) b) we don't want to send the
1754 # consume little memory (1M heads is 40MB) b) we don't want to send the
1755 # part if we don't have entries and knowing if we have entries requires
1755 # part if we don't have entries and knowing if we have entries requires
1756 # cache lookups.
1756 # cache lookups.
1757 for node in outgoing.ancestorsof:
1757 for node in outgoing.ancestorsof:
1758 # Don't compute missing, as this may slow down serving.
1758 # Don't compute missing, as this may slow down serving.
1759 fnode = cache.getfnode(node, computemissing=False)
1759 fnode = cache.getfnode(node, computemissing=False)
1760 if fnode is not None:
1760 if fnode is not None:
1761 chunks.extend([node, fnode])
1761 chunks.extend([node, fnode])
1762
1762
1763 if chunks:
1763 if chunks:
1764 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1764 bundler.newpart(b'hgtagsfnodes', data=b''.join(chunks))
1765
1765
1766
1766
1767 def addpartrevbranchcache(repo, bundler, outgoing):
1767 def addpartrevbranchcache(repo, bundler, outgoing):
1768 # we include the rev branch cache for the bundle changeset
1768 # we include the rev branch cache for the bundle changeset
1769 # (as an optional parts)
1769 # (as an optional parts)
1770 cache = repo.revbranchcache()
1770 cache = repo.revbranchcache()
1771 cl = repo.unfiltered().changelog
1771 cl = repo.unfiltered().changelog
1772 branchesdata = collections.defaultdict(lambda: (set(), set()))
1772 branchesdata = collections.defaultdict(lambda: (set(), set()))
1773 for node in outgoing.missing:
1773 for node in outgoing.missing:
1774 branch, close = cache.branchinfo(cl.rev(node))
1774 branch, close = cache.branchinfo(cl.rev(node))
1775 branchesdata[branch][close].add(node)
1775 branchesdata[branch][close].add(node)
1776
1776
1777 def generate():
1777 def generate():
1778 for branch, (nodes, closed) in sorted(branchesdata.items()):
1778 for branch, (nodes, closed) in sorted(branchesdata.items()):
1779 utf8branch = encoding.fromlocal(branch)
1779 utf8branch = encoding.fromlocal(branch)
1780 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1780 yield rbcstruct.pack(len(utf8branch), len(nodes), len(closed))
1781 yield utf8branch
1781 yield utf8branch
1782 for n in sorted(nodes):
1782 for n in sorted(nodes):
1783 yield n
1783 yield n
1784 for n in sorted(closed):
1784 for n in sorted(closed):
1785 yield n
1785 yield n
1786
1786
1787 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1787 bundler.newpart(b'cache:rev-branch-cache', data=generate(), mandatory=False)
1788
1788
1789
1789
1790 def _formatrequirementsspec(requirements):
1790 def _formatrequirementsspec(requirements):
1791 requirements = [req for req in requirements if req != b"shared"]
1791 requirements = [req for req in requirements if req != b"shared"]
1792 return urlreq.quote(b','.join(sorted(requirements)))
1792 return urlreq.quote(b','.join(sorted(requirements)))
1793
1793
1794
1794
1795 def _formatrequirementsparams(requirements):
1795 def _formatrequirementsparams(requirements):
1796 requirements = _formatrequirementsspec(requirements)
1796 requirements = _formatrequirementsspec(requirements)
1797 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1797 params = b"%s%s" % (urlreq.quote(b"requirements="), requirements)
1798 return params
1798 return params
1799
1799
1800
1800
1801 def addpartbundlestream2(bundler, repo, **kwargs):
1801 def addpartbundlestream2(bundler, repo, **kwargs):
1802 if not kwargs.get('stream', False):
1802 if not kwargs.get('stream', False):
1803 return
1803 return
1804
1804
1805 if not streamclone.allowservergeneration(repo):
1805 if not streamclone.allowservergeneration(repo):
1806 raise error.Abort(
1806 raise error.Abort(
1807 _(
1807 _(
1808 b'stream data requested but server does not allow '
1808 b'stream data requested but server does not allow '
1809 b'this feature'
1809 b'this feature'
1810 ),
1810 ),
1811 hint=_(
1811 hint=_(
1812 b'well-behaved clients should not be '
1812 b'well-behaved clients should not be '
1813 b'requesting stream data from servers not '
1813 b'requesting stream data from servers not '
1814 b'advertising it; the client may be buggy'
1814 b'advertising it; the client may be buggy'
1815 ),
1815 ),
1816 )
1816 )
1817
1817
1818 # Stream clones don't compress well. And compression undermines a
1818 # Stream clones don't compress well. And compression undermines a
1819 # goal of stream clones, which is to be fast. Communicate the desire
1819 # goal of stream clones, which is to be fast. Communicate the desire
1820 # to avoid compression to consumers of the bundle.
1820 # to avoid compression to consumers of the bundle.
1821 bundler.prefercompressed = False
1821 bundler.prefercompressed = False
1822
1822
1823 # get the includes and excludes
1823 # get the includes and excludes
1824 includepats = kwargs.get('includepats')
1824 includepats = kwargs.get('includepats')
1825 excludepats = kwargs.get('excludepats')
1825 excludepats = kwargs.get('excludepats')
1826
1826
1827 narrowstream = repo.ui.configbool(
1827 narrowstream = repo.ui.configbool(
1828 b'experimental', b'server.stream-narrow-clones'
1828 b'experimental', b'server.stream-narrow-clones'
1829 )
1829 )
1830
1830
1831 if (includepats or excludepats) and not narrowstream:
1831 if (includepats or excludepats) and not narrowstream:
1832 raise error.Abort(_(b'server does not support narrow stream clones'))
1832 raise error.Abort(_(b'server does not support narrow stream clones'))
1833
1833
1834 includeobsmarkers = False
1834 includeobsmarkers = False
1835 if repo.obsstore:
1835 if repo.obsstore:
1836 remoteversions = obsmarkersversion(bundler.capabilities)
1836 remoteversions = obsmarkersversion(bundler.capabilities)
1837 if not remoteversions:
1837 if not remoteversions:
1838 raise error.Abort(
1838 raise error.Abort(
1839 _(
1839 _(
1840 b'server has obsolescence markers, but client '
1840 b'server has obsolescence markers, but client '
1841 b'cannot receive them via stream clone'
1841 b'cannot receive them via stream clone'
1842 )
1842 )
1843 )
1843 )
1844 elif repo.obsstore._version in remoteversions:
1844 elif repo.obsstore._version in remoteversions:
1845 includeobsmarkers = True
1845 includeobsmarkers = True
1846
1846
1847 filecount, bytecount, it = streamclone.generatev2(
1847 filecount, bytecount, it = streamclone.generatev2(
1848 repo, includepats, excludepats, includeobsmarkers
1848 repo, includepats, excludepats, includeobsmarkers
1849 )
1849 )
1850 requirements = _formatrequirementsspec(repo.requirements)
1850 requirements = _formatrequirementsspec(repo.requirements)
1851 part = bundler.newpart(b'stream2', data=it)
1851 part = bundler.newpart(b'stream2', data=it)
1852 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1852 part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True)
1853 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1853 part.addparam(b'filecount', b'%d' % filecount, mandatory=True)
1854 part.addparam(b'requirements', requirements, mandatory=True)
1854 part.addparam(b'requirements', requirements, mandatory=True)
1855
1855
1856
1856
1857 def buildobsmarkerspart(bundler, markers):
1857 def buildobsmarkerspart(bundler, markers):
1858 """add an obsmarker part to the bundler with <markers>
1858 """add an obsmarker part to the bundler with <markers>
1859
1859
1860 No part is created if markers is empty.
1860 No part is created if markers is empty.
1861 Raises ValueError if the bundler doesn't support any known obsmarker format.
1861 Raises ValueError if the bundler doesn't support any known obsmarker format.
1862 """
1862 """
1863 if not markers:
1863 if not markers:
1864 return None
1864 return None
1865
1865
1866 remoteversions = obsmarkersversion(bundler.capabilities)
1866 remoteversions = obsmarkersversion(bundler.capabilities)
1867 version = obsolete.commonversion(remoteversions)
1867 version = obsolete.commonversion(remoteversions)
1868 if version is None:
1868 if version is None:
1869 raise ValueError(b'bundler does not support common obsmarker format')
1869 raise ValueError(b'bundler does not support common obsmarker format')
1870 stream = obsolete.encodemarkers(markers, True, version=version)
1870 stream = obsolete.encodemarkers(markers, True, version=version)
1871 return bundler.newpart(b'obsmarkers', data=stream)
1871 return bundler.newpart(b'obsmarkers', data=stream)
1872
1872
1873
1873
1874 def writebundle(
1874 def writebundle(
1875 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1875 ui, cg, filename, bundletype, vfs=None, compression=None, compopts=None
1876 ):
1876 ):
1877 """Write a bundle file and return its filename.
1877 """Write a bundle file and return its filename.
1878
1878
1879 Existing files will not be overwritten.
1879 Existing files will not be overwritten.
1880 If no filename is specified, a temporary file is created.
1880 If no filename is specified, a temporary file is created.
1881 bz2 compression can be turned off.
1881 bz2 compression can be turned off.
1882 The bundle file will be deleted in case of errors.
1882 The bundle file will be deleted in case of errors.
1883 """
1883 """
1884
1884
1885 if bundletype == b"HG20":
1885 if bundletype == b"HG20":
1886 bundle = bundle20(ui)
1886 bundle = bundle20(ui)
1887 bundle.setcompression(compression, compopts)
1887 bundle.setcompression(compression, compopts)
1888 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1888 part = bundle.newpart(b'changegroup', data=cg.getchunks())
1889 part.addparam(b'version', cg.version)
1889 part.addparam(b'version', cg.version)
1890 if b'clcount' in cg.extras:
1890 if b'clcount' in cg.extras:
1891 part.addparam(
1891 part.addparam(
1892 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1892 b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
1893 )
1893 )
1894 chunkiter = bundle.getchunks()
1894 chunkiter = bundle.getchunks()
1895 else:
1895 else:
1896 # compression argument is only for the bundle2 case
1896 # compression argument is only for the bundle2 case
1897 assert compression is None
1897 assert compression is None
1898 if cg.version != b'01':
1898 if cg.version != b'01':
1899 raise error.Abort(
1899 raise error.Abort(
1900 _(b'old bundle types only supports v1 changegroups')
1900 _(b'old bundle types only supports v1 changegroups')
1901 )
1901 )
1902 header, comp = bundletypes[bundletype]
1902 header, comp = bundletypes[bundletype]
1903 if comp not in util.compengines.supportedbundletypes:
1903 if comp not in util.compengines.supportedbundletypes:
1904 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1904 raise error.Abort(_(b'unknown stream compression type: %s') % comp)
1905 compengine = util.compengines.forbundletype(comp)
1905 compengine = util.compengines.forbundletype(comp)
1906
1906
1907 def chunkiter():
1907 def chunkiter():
1908 yield header
1908 yield header
1909 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1909 for chunk in compengine.compressstream(cg.getchunks(), compopts):
1910 yield chunk
1910 yield chunk
1911
1911
1912 chunkiter = chunkiter()
1912 chunkiter = chunkiter()
1913
1913
1914 # parse the changegroup data, otherwise we will block
1914 # parse the changegroup data, otherwise we will block
1915 # in case of sshrepo because we don't know the end of the stream
1915 # in case of sshrepo because we don't know the end of the stream
1916 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1916 return changegroup.writechunks(ui, chunkiter, filename, vfs=vfs)
1917
1917
1918
1918
1919 def combinechangegroupresults(op):
1919 def combinechangegroupresults(op):
1920 """logic to combine 0 or more addchangegroup results into one"""
1920 """logic to combine 0 or more addchangegroup results into one"""
1921 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1921 results = [r.get(b'return', 0) for r in op.records[b'changegroup']]
1922 changedheads = 0
1922 changedheads = 0
1923 result = 1
1923 result = 1
1924 for ret in results:
1924 for ret in results:
1925 # If any changegroup result is 0, return 0
1925 # If any changegroup result is 0, return 0
1926 if ret == 0:
1926 if ret == 0:
1927 result = 0
1927 result = 0
1928 break
1928 break
1929 if ret < -1:
1929 if ret < -1:
1930 changedheads += ret + 1
1930 changedheads += ret + 1
1931 elif ret > 1:
1931 elif ret > 1:
1932 changedheads += ret - 1
1932 changedheads += ret - 1
1933 if changedheads > 0:
1933 if changedheads > 0:
1934 result = 1 + changedheads
1934 result = 1 + changedheads
1935 elif changedheads < 0:
1935 elif changedheads < 0:
1936 result = -1 + changedheads
1936 result = -1 + changedheads
1937 return result
1937 return result
1938
1938
1939
1939
1940 @parthandler(
1940 @parthandler(
1941 b'changegroup',
1941 b'changegroup',
1942 (
1942 (
1943 b'version',
1943 b'version',
1944 b'nbchanges',
1944 b'nbchanges',
1945 b'exp-sidedata',
1945 b'exp-sidedata',
1946 b'treemanifest',
1946 b'treemanifest',
1947 b'targetphase',
1947 b'targetphase',
1948 ),
1948 ),
1949 )
1949 )
1950 def handlechangegroup(op, inpart):
1950 def handlechangegroup(op, inpart):
1951 """apply a changegroup part on the repo
1951 """apply a changegroup part on the repo
1952
1952
1953 This is a very early implementation that will massive rework before being
1953 This is a very early implementation that will massive rework before being
1954 inflicted to any end-user.
1954 inflicted to any end-user.
1955 """
1955 """
1956 from . import localrepo
1956 from . import localrepo
1957
1957
1958 tr = op.gettransaction()
1958 tr = op.gettransaction()
1959 unpackerversion = inpart.params.get(b'version', b'01')
1959 unpackerversion = inpart.params.get(b'version', b'01')
1960 # We should raise an appropriate exception here
1960 # We should raise an appropriate exception here
1961 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1961 cg = changegroup.getunbundler(unpackerversion, inpart, None)
1962 # the source and url passed here are overwritten by the one contained in
1962 # the source and url passed here are overwritten by the one contained in
1963 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1963 # the transaction.hookargs argument. So 'bundle2' is a placeholder
1964 nbchangesets = None
1964 nbchangesets = None
1965 if b'nbchanges' in inpart.params:
1965 if b'nbchanges' in inpart.params:
1966 nbchangesets = int(inpart.params.get(b'nbchanges'))
1966 nbchangesets = int(inpart.params.get(b'nbchanges'))
1967 if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo):
1967 if b'treemanifest' in inpart.params and not scmutil.istreemanifest(op.repo):
1968 if len(op.repo.changelog) != 0:
1968 if len(op.repo.changelog) != 0:
1969 raise error.Abort(
1969 raise error.Abort(
1970 _(
1970 _(
1971 b"bundle contains tree manifests, but local repo is "
1971 b"bundle contains tree manifests, but local repo is "
1972 b"non-empty and does not use tree manifests"
1972 b"non-empty and does not use tree manifests"
1973 )
1973 )
1974 )
1974 )
1975 op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
1975 op.repo.requirements.add(requirements.TREEMANIFEST_REQUIREMENT)
1976 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1976 op.repo.svfs.options = localrepo.resolvestorevfsoptions(
1977 op.repo.ui, op.repo.requirements, op.repo.features
1977 op.repo.ui, op.repo.requirements, op.repo.features
1978 )
1978 )
1979 scmutil.writereporequirements(op.repo)
1979 scmutil.writereporequirements(op.repo)
1980
1980
1981 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1981 bundlesidedata = bool(b'exp-sidedata' in inpart.params)
1982 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1982 reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
1983 if reposidedata and not bundlesidedata:
1983 if reposidedata and not bundlesidedata:
1984 msg = b"repository is using sidedata but the bundle source do not"
1984 msg = b"repository is using sidedata but the bundle source do not"
1985 hint = b'this is currently unsupported'
1985 hint = b'this is currently unsupported'
1986 raise error.Abort(msg, hint=hint)
1986 raise error.Abort(msg, hint=hint)
1987
1987
1988 extrakwargs = {}
1988 extrakwargs = {}
1989 targetphase = inpart.params.get(b'targetphase')
1989 targetphase = inpart.params.get(b'targetphase')
1990 if targetphase is not None:
1990 if targetphase is not None:
1991 extrakwargs['targetphase'] = int(targetphase)
1991 extrakwargs['targetphase'] = int(targetphase)
1992 ret = _processchangegroup(
1992 ret = _processchangegroup(
1993 op,
1993 op,
1994 cg,
1994 cg,
1995 tr,
1995 tr,
1996 b'bundle2',
1996 b'bundle2',
1997 b'bundle2',
1997 b'bundle2',
1998 expectedtotal=nbchangesets,
1998 expectedtotal=nbchangesets,
1999 **extrakwargs
1999 **extrakwargs
2000 )
2000 )
2001 if op.reply is not None:
2001 if op.reply is not None:
2002 # This is definitely not the final form of this
2002 # This is definitely not the final form of this
2003 # return. But one need to start somewhere.
2003 # return. But one need to start somewhere.
2004 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2004 part = op.reply.newpart(b'reply:changegroup', mandatory=False)
2005 part.addparam(
2005 part.addparam(
2006 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2006 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2007 )
2007 )
2008 part.addparam(b'return', b'%i' % ret, mandatory=False)
2008 part.addparam(b'return', b'%i' % ret, mandatory=False)
2009 assert not inpart.read()
2009 assert not inpart.read()
2010
2010
2011
2011
2012 _remotechangegroupparams = tuple(
2012 _remotechangegroupparams = tuple(
2013 [b'url', b'size', b'digests']
2013 [b'url', b'size', b'digests']
2014 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2014 + [b'digest:%s' % k for k in util.DIGESTS.keys()]
2015 )
2015 )
2016
2016
2017
2017
2018 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2018 @parthandler(b'remote-changegroup', _remotechangegroupparams)
2019 def handleremotechangegroup(op, inpart):
2019 def handleremotechangegroup(op, inpart):
2020 """apply a bundle10 on the repo, given an url and validation information
2020 """apply a bundle10 on the repo, given an url and validation information
2021
2021
2022 All the information about the remote bundle to import are given as
2022 All the information about the remote bundle to import are given as
2023 parameters. The parameters include:
2023 parameters. The parameters include:
2024 - url: the url to the bundle10.
2024 - url: the url to the bundle10.
2025 - size: the bundle10 file size. It is used to validate what was
2025 - size: the bundle10 file size. It is used to validate what was
2026 retrieved by the client matches the server knowledge about the bundle.
2026 retrieved by the client matches the server knowledge about the bundle.
2027 - digests: a space separated list of the digest types provided as
2027 - digests: a space separated list of the digest types provided as
2028 parameters.
2028 parameters.
2029 - digest:<digest-type>: the hexadecimal representation of the digest with
2029 - digest:<digest-type>: the hexadecimal representation of the digest with
2030 that name. Like the size, it is used to validate what was retrieved by
2030 that name. Like the size, it is used to validate what was retrieved by
2031 the client matches what the server knows about the bundle.
2031 the client matches what the server knows about the bundle.
2032
2032
2033 When multiple digest types are given, all of them are checked.
2033 When multiple digest types are given, all of them are checked.
2034 """
2034 """
2035 try:
2035 try:
2036 raw_url = inpart.params[b'url']
2036 raw_url = inpart.params[b'url']
2037 except KeyError:
2037 except KeyError:
2038 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2038 raise error.Abort(_(b'remote-changegroup: missing "%s" param') % b'url')
2039 parsed_url = util.url(raw_url)
2039 parsed_url = util.url(raw_url)
2040 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2040 if parsed_url.scheme not in capabilities[b'remote-changegroup']:
2041 raise error.Abort(
2041 raise error.Abort(
2042 _(b'remote-changegroup does not support %s urls')
2042 _(b'remote-changegroup does not support %s urls')
2043 % parsed_url.scheme
2043 % parsed_url.scheme
2044 )
2044 )
2045
2045
2046 try:
2046 try:
2047 size = int(inpart.params[b'size'])
2047 size = int(inpart.params[b'size'])
2048 except ValueError:
2048 except ValueError:
2049 raise error.Abort(
2049 raise error.Abort(
2050 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2050 _(b'remote-changegroup: invalid value for param "%s"') % b'size'
2051 )
2051 )
2052 except KeyError:
2052 except KeyError:
2053 raise error.Abort(
2053 raise error.Abort(
2054 _(b'remote-changegroup: missing "%s" param') % b'size'
2054 _(b'remote-changegroup: missing "%s" param') % b'size'
2055 )
2055 )
2056
2056
2057 digests = {}
2057 digests = {}
2058 for typ in inpart.params.get(b'digests', b'').split():
2058 for typ in inpart.params.get(b'digests', b'').split():
2059 param = b'digest:%s' % typ
2059 param = b'digest:%s' % typ
2060 try:
2060 try:
2061 value = inpart.params[param]
2061 value = inpart.params[param]
2062 except KeyError:
2062 except KeyError:
2063 raise error.Abort(
2063 raise error.Abort(
2064 _(b'remote-changegroup: missing "%s" param') % param
2064 _(b'remote-changegroup: missing "%s" param') % param
2065 )
2065 )
2066 digests[typ] = value
2066 digests[typ] = value
2067
2067
2068 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2068 real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
2069
2069
2070 tr = op.gettransaction()
2070 tr = op.gettransaction()
2071 from . import exchange
2071 from . import exchange
2072
2072
2073 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2073 cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
2074 if not isinstance(cg, changegroup.cg1unpacker):
2074 if not isinstance(cg, changegroup.cg1unpacker):
2075 raise error.Abort(
2075 raise error.Abort(
2076 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2076 _(b'%s: not a bundle version 1.0') % util.hidepassword(raw_url)
2077 )
2077 )
2078 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2078 ret = _processchangegroup(op, cg, tr, b'bundle2', b'bundle2')
2079 if op.reply is not None:
2079 if op.reply is not None:
2080 # This is definitely not the final form of this
2080 # This is definitely not the final form of this
2081 # return. But one need to start somewhere.
2081 # return. But one need to start somewhere.
2082 part = op.reply.newpart(b'reply:changegroup')
2082 part = op.reply.newpart(b'reply:changegroup')
2083 part.addparam(
2083 part.addparam(
2084 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2084 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2085 )
2085 )
2086 part.addparam(b'return', b'%i' % ret, mandatory=False)
2086 part.addparam(b'return', b'%i' % ret, mandatory=False)
2087 try:
2087 try:
2088 real_part.validate()
2088 real_part.validate()
2089 except error.Abort as e:
2089 except error.Abort as e:
2090 raise error.Abort(
2090 raise error.Abort(
2091 _(b'bundle at %s is corrupted:\n%s')
2091 _(b'bundle at %s is corrupted:\n%s')
2092 % (util.hidepassword(raw_url), bytes(e))
2092 % (util.hidepassword(raw_url), e.message)
2093 )
2093 )
2094 assert not inpart.read()
2094 assert not inpart.read()
2095
2095
2096
2096
2097 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2097 @parthandler(b'reply:changegroup', (b'return', b'in-reply-to'))
2098 def handlereplychangegroup(op, inpart):
2098 def handlereplychangegroup(op, inpart):
2099 ret = int(inpart.params[b'return'])
2099 ret = int(inpart.params[b'return'])
2100 replyto = int(inpart.params[b'in-reply-to'])
2100 replyto = int(inpart.params[b'in-reply-to'])
2101 op.records.add(b'changegroup', {b'return': ret}, replyto)
2101 op.records.add(b'changegroup', {b'return': ret}, replyto)
2102
2102
2103
2103
2104 @parthandler(b'check:bookmarks')
2104 @parthandler(b'check:bookmarks')
2105 def handlecheckbookmarks(op, inpart):
2105 def handlecheckbookmarks(op, inpart):
2106 """check location of bookmarks
2106 """check location of bookmarks
2107
2107
2108 This part is to be used to detect push race regarding bookmark, it
2108 This part is to be used to detect push race regarding bookmark, it
2109 contains binary encoded (bookmark, node) tuple. If the local state does
2109 contains binary encoded (bookmark, node) tuple. If the local state does
2110 not marks the one in the part, a PushRaced exception is raised
2110 not marks the one in the part, a PushRaced exception is raised
2111 """
2111 """
2112 bookdata = bookmarks.binarydecode(inpart)
2112 bookdata = bookmarks.binarydecode(inpart)
2113
2113
2114 msgstandard = (
2114 msgstandard = (
2115 b'remote repository changed while pushing - please try again '
2115 b'remote repository changed while pushing - please try again '
2116 b'(bookmark "%s" move from %s to %s)'
2116 b'(bookmark "%s" move from %s to %s)'
2117 )
2117 )
2118 msgmissing = (
2118 msgmissing = (
2119 b'remote repository changed while pushing - please try again '
2119 b'remote repository changed while pushing - please try again '
2120 b'(bookmark "%s" is missing, expected %s)'
2120 b'(bookmark "%s" is missing, expected %s)'
2121 )
2121 )
2122 msgexist = (
2122 msgexist = (
2123 b'remote repository changed while pushing - please try again '
2123 b'remote repository changed while pushing - please try again '
2124 b'(bookmark "%s" set on %s, expected missing)'
2124 b'(bookmark "%s" set on %s, expected missing)'
2125 )
2125 )
2126 for book, node in bookdata:
2126 for book, node in bookdata:
2127 currentnode = op.repo._bookmarks.get(book)
2127 currentnode = op.repo._bookmarks.get(book)
2128 if currentnode != node:
2128 if currentnode != node:
2129 if node is None:
2129 if node is None:
2130 finalmsg = msgexist % (book, nodemod.short(currentnode))
2130 finalmsg = msgexist % (book, nodemod.short(currentnode))
2131 elif currentnode is None:
2131 elif currentnode is None:
2132 finalmsg = msgmissing % (book, nodemod.short(node))
2132 finalmsg = msgmissing % (book, nodemod.short(node))
2133 else:
2133 else:
2134 finalmsg = msgstandard % (
2134 finalmsg = msgstandard % (
2135 book,
2135 book,
2136 nodemod.short(node),
2136 nodemod.short(node),
2137 nodemod.short(currentnode),
2137 nodemod.short(currentnode),
2138 )
2138 )
2139 raise error.PushRaced(finalmsg)
2139 raise error.PushRaced(finalmsg)
2140
2140
2141
2141
2142 @parthandler(b'check:heads')
2142 @parthandler(b'check:heads')
2143 def handlecheckheads(op, inpart):
2143 def handlecheckheads(op, inpart):
2144 """check that head of the repo did not change
2144 """check that head of the repo did not change
2145
2145
2146 This is used to detect a push race when using unbundle.
2146 This is used to detect a push race when using unbundle.
2147 This replaces the "heads" argument of unbundle."""
2147 This replaces the "heads" argument of unbundle."""
2148 h = inpart.read(20)
2148 h = inpart.read(20)
2149 heads = []
2149 heads = []
2150 while len(h) == 20:
2150 while len(h) == 20:
2151 heads.append(h)
2151 heads.append(h)
2152 h = inpart.read(20)
2152 h = inpart.read(20)
2153 assert not h
2153 assert not h
2154 # Trigger a transaction so that we are guaranteed to have the lock now.
2154 # Trigger a transaction so that we are guaranteed to have the lock now.
2155 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2155 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2156 op.gettransaction()
2156 op.gettransaction()
2157 if sorted(heads) != sorted(op.repo.heads()):
2157 if sorted(heads) != sorted(op.repo.heads()):
2158 raise error.PushRaced(
2158 raise error.PushRaced(
2159 b'remote repository changed while pushing - please try again'
2159 b'remote repository changed while pushing - please try again'
2160 )
2160 )
2161
2161
2162
2162
2163 @parthandler(b'check:updated-heads')
2163 @parthandler(b'check:updated-heads')
2164 def handlecheckupdatedheads(op, inpart):
2164 def handlecheckupdatedheads(op, inpart):
2165 """check for race on the heads touched by a push
2165 """check for race on the heads touched by a push
2166
2166
2167 This is similar to 'check:heads' but focus on the heads actually updated
2167 This is similar to 'check:heads' but focus on the heads actually updated
2168 during the push. If other activities happen on unrelated heads, it is
2168 during the push. If other activities happen on unrelated heads, it is
2169 ignored.
2169 ignored.
2170
2170
2171 This allow server with high traffic to avoid push contention as long as
2171 This allow server with high traffic to avoid push contention as long as
2172 unrelated parts of the graph are involved."""
2172 unrelated parts of the graph are involved."""
2173 h = inpart.read(20)
2173 h = inpart.read(20)
2174 heads = []
2174 heads = []
2175 while len(h) == 20:
2175 while len(h) == 20:
2176 heads.append(h)
2176 heads.append(h)
2177 h = inpart.read(20)
2177 h = inpart.read(20)
2178 assert not h
2178 assert not h
2179 # trigger a transaction so that we are guaranteed to have the lock now.
2179 # trigger a transaction so that we are guaranteed to have the lock now.
2180 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2180 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2181 op.gettransaction()
2181 op.gettransaction()
2182
2182
2183 currentheads = set()
2183 currentheads = set()
2184 for ls in op.repo.branchmap().iterheads():
2184 for ls in op.repo.branchmap().iterheads():
2185 currentheads.update(ls)
2185 currentheads.update(ls)
2186
2186
2187 for h in heads:
2187 for h in heads:
2188 if h not in currentheads:
2188 if h not in currentheads:
2189 raise error.PushRaced(
2189 raise error.PushRaced(
2190 b'remote repository changed while pushing - '
2190 b'remote repository changed while pushing - '
2191 b'please try again'
2191 b'please try again'
2192 )
2192 )
2193
2193
2194
2194
2195 @parthandler(b'check:phases')
2195 @parthandler(b'check:phases')
2196 def handlecheckphases(op, inpart):
2196 def handlecheckphases(op, inpart):
2197 """check that phase boundaries of the repository did not change
2197 """check that phase boundaries of the repository did not change
2198
2198
2199 This is used to detect a push race.
2199 This is used to detect a push race.
2200 """
2200 """
2201 phasetonodes = phases.binarydecode(inpart)
2201 phasetonodes = phases.binarydecode(inpart)
2202 unfi = op.repo.unfiltered()
2202 unfi = op.repo.unfiltered()
2203 cl = unfi.changelog
2203 cl = unfi.changelog
2204 phasecache = unfi._phasecache
2204 phasecache = unfi._phasecache
2205 msg = (
2205 msg = (
2206 b'remote repository changed while pushing - please try again '
2206 b'remote repository changed while pushing - please try again '
2207 b'(%s is %s expected %s)'
2207 b'(%s is %s expected %s)'
2208 )
2208 )
2209 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2209 for expectedphase, nodes in pycompat.iteritems(phasetonodes):
2210 for n in nodes:
2210 for n in nodes:
2211 actualphase = phasecache.phase(unfi, cl.rev(n))
2211 actualphase = phasecache.phase(unfi, cl.rev(n))
2212 if actualphase != expectedphase:
2212 if actualphase != expectedphase:
2213 finalmsg = msg % (
2213 finalmsg = msg % (
2214 nodemod.short(n),
2214 nodemod.short(n),
2215 phases.phasenames[actualphase],
2215 phases.phasenames[actualphase],
2216 phases.phasenames[expectedphase],
2216 phases.phasenames[expectedphase],
2217 )
2217 )
2218 raise error.PushRaced(finalmsg)
2218 raise error.PushRaced(finalmsg)
2219
2219
2220
2220
2221 @parthandler(b'output')
2221 @parthandler(b'output')
2222 def handleoutput(op, inpart):
2222 def handleoutput(op, inpart):
2223 """forward output captured on the server to the client"""
2223 """forward output captured on the server to the client"""
2224 for line in inpart.read().splitlines():
2224 for line in inpart.read().splitlines():
2225 op.ui.status(_(b'remote: %s\n') % line)
2225 op.ui.status(_(b'remote: %s\n') % line)
2226
2226
2227
2227
2228 @parthandler(b'replycaps')
2228 @parthandler(b'replycaps')
2229 def handlereplycaps(op, inpart):
2229 def handlereplycaps(op, inpart):
2230 """Notify that a reply bundle should be created
2230 """Notify that a reply bundle should be created
2231
2231
2232 The payload contains the capabilities information for the reply"""
2232 The payload contains the capabilities information for the reply"""
2233 caps = decodecaps(inpart.read())
2233 caps = decodecaps(inpart.read())
2234 if op.reply is None:
2234 if op.reply is None:
2235 op.reply = bundle20(op.ui, caps)
2235 op.reply = bundle20(op.ui, caps)
2236
2236
2237
2237
2238 class AbortFromPart(error.Abort):
2238 class AbortFromPart(error.Abort):
2239 """Sub-class of Abort that denotes an error from a bundle2 part."""
2239 """Sub-class of Abort that denotes an error from a bundle2 part."""
2240
2240
2241
2241
2242 @parthandler(b'error:abort', (b'message', b'hint'))
2242 @parthandler(b'error:abort', (b'message', b'hint'))
2243 def handleerrorabort(op, inpart):
2243 def handleerrorabort(op, inpart):
2244 """Used to transmit abort error over the wire"""
2244 """Used to transmit abort error over the wire"""
2245 raise AbortFromPart(
2245 raise AbortFromPart(
2246 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2246 inpart.params[b'message'], hint=inpart.params.get(b'hint')
2247 )
2247 )
2248
2248
2249
2249
2250 @parthandler(
2250 @parthandler(
2251 b'error:pushkey',
2251 b'error:pushkey',
2252 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2252 (b'namespace', b'key', b'new', b'old', b'ret', b'in-reply-to'),
2253 )
2253 )
2254 def handleerrorpushkey(op, inpart):
2254 def handleerrorpushkey(op, inpart):
2255 """Used to transmit failure of a mandatory pushkey over the wire"""
2255 """Used to transmit failure of a mandatory pushkey over the wire"""
2256 kwargs = {}
2256 kwargs = {}
2257 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2257 for name in (b'namespace', b'key', b'new', b'old', b'ret'):
2258 value = inpart.params.get(name)
2258 value = inpart.params.get(name)
2259 if value is not None:
2259 if value is not None:
2260 kwargs[name] = value
2260 kwargs[name] = value
2261 raise error.PushkeyFailed(
2261 raise error.PushkeyFailed(
2262 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2262 inpart.params[b'in-reply-to'], **pycompat.strkwargs(kwargs)
2263 )
2263 )
2264
2264
2265
2265
2266 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2266 @parthandler(b'error:unsupportedcontent', (b'parttype', b'params'))
2267 def handleerrorunsupportedcontent(op, inpart):
2267 def handleerrorunsupportedcontent(op, inpart):
2268 """Used to transmit unknown content error over the wire"""
2268 """Used to transmit unknown content error over the wire"""
2269 kwargs = {}
2269 kwargs = {}
2270 parttype = inpart.params.get(b'parttype')
2270 parttype = inpart.params.get(b'parttype')
2271 if parttype is not None:
2271 if parttype is not None:
2272 kwargs[b'parttype'] = parttype
2272 kwargs[b'parttype'] = parttype
2273 params = inpart.params.get(b'params')
2273 params = inpart.params.get(b'params')
2274 if params is not None:
2274 if params is not None:
2275 kwargs[b'params'] = params.split(b'\0')
2275 kwargs[b'params'] = params.split(b'\0')
2276
2276
2277 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2277 raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs))
2278
2278
2279
2279
2280 @parthandler(b'error:pushraced', (b'message',))
2280 @parthandler(b'error:pushraced', (b'message',))
2281 def handleerrorpushraced(op, inpart):
2281 def handleerrorpushraced(op, inpart):
2282 """Used to transmit push race error over the wire"""
2282 """Used to transmit push race error over the wire"""
2283 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2283 raise error.ResponseError(_(b'push failed:'), inpart.params[b'message'])
2284
2284
2285
2285
2286 @parthandler(b'listkeys', (b'namespace',))
2286 @parthandler(b'listkeys', (b'namespace',))
2287 def handlelistkeys(op, inpart):
2287 def handlelistkeys(op, inpart):
2288 """retrieve pushkey namespace content stored in a bundle2"""
2288 """retrieve pushkey namespace content stored in a bundle2"""
2289 namespace = inpart.params[b'namespace']
2289 namespace = inpart.params[b'namespace']
2290 r = pushkey.decodekeys(inpart.read())
2290 r = pushkey.decodekeys(inpart.read())
2291 op.records.add(b'listkeys', (namespace, r))
2291 op.records.add(b'listkeys', (namespace, r))
2292
2292
2293
2293
2294 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2294 @parthandler(b'pushkey', (b'namespace', b'key', b'old', b'new'))
2295 def handlepushkey(op, inpart):
2295 def handlepushkey(op, inpart):
2296 """process a pushkey request"""
2296 """process a pushkey request"""
2297 dec = pushkey.decode
2297 dec = pushkey.decode
2298 namespace = dec(inpart.params[b'namespace'])
2298 namespace = dec(inpart.params[b'namespace'])
2299 key = dec(inpart.params[b'key'])
2299 key = dec(inpart.params[b'key'])
2300 old = dec(inpart.params[b'old'])
2300 old = dec(inpart.params[b'old'])
2301 new = dec(inpart.params[b'new'])
2301 new = dec(inpart.params[b'new'])
2302 # Grab the transaction to ensure that we have the lock before performing the
2302 # Grab the transaction to ensure that we have the lock before performing the
2303 # pushkey.
2303 # pushkey.
2304 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2304 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2305 op.gettransaction()
2305 op.gettransaction()
2306 ret = op.repo.pushkey(namespace, key, old, new)
2306 ret = op.repo.pushkey(namespace, key, old, new)
2307 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2307 record = {b'namespace': namespace, b'key': key, b'old': old, b'new': new}
2308 op.records.add(b'pushkey', record)
2308 op.records.add(b'pushkey', record)
2309 if op.reply is not None:
2309 if op.reply is not None:
2310 rpart = op.reply.newpart(b'reply:pushkey')
2310 rpart = op.reply.newpart(b'reply:pushkey')
2311 rpart.addparam(
2311 rpart.addparam(
2312 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2312 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2313 )
2313 )
2314 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2314 rpart.addparam(b'return', b'%i' % ret, mandatory=False)
2315 if inpart.mandatory and not ret:
2315 if inpart.mandatory and not ret:
2316 kwargs = {}
2316 kwargs = {}
2317 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2317 for key in (b'namespace', b'key', b'new', b'old', b'ret'):
2318 if key in inpart.params:
2318 if key in inpart.params:
2319 kwargs[key] = inpart.params[key]
2319 kwargs[key] = inpart.params[key]
2320 raise error.PushkeyFailed(
2320 raise error.PushkeyFailed(
2321 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2321 partid=b'%d' % inpart.id, **pycompat.strkwargs(kwargs)
2322 )
2322 )
2323
2323
2324
2324
2325 @parthandler(b'bookmarks')
2325 @parthandler(b'bookmarks')
2326 def handlebookmark(op, inpart):
2326 def handlebookmark(op, inpart):
2327 """transmit bookmark information
2327 """transmit bookmark information
2328
2328
2329 The part contains binary encoded bookmark information.
2329 The part contains binary encoded bookmark information.
2330
2330
2331 The exact behavior of this part can be controlled by the 'bookmarks' mode
2331 The exact behavior of this part can be controlled by the 'bookmarks' mode
2332 on the bundle operation.
2332 on the bundle operation.
2333
2333
2334 When mode is 'apply' (the default) the bookmark information is applied as
2334 When mode is 'apply' (the default) the bookmark information is applied as
2335 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2335 is to the unbundling repository. Make sure a 'check:bookmarks' part is
2336 issued earlier to check for push races in such update. This behavior is
2336 issued earlier to check for push races in such update. This behavior is
2337 suitable for pushing.
2337 suitable for pushing.
2338
2338
2339 When mode is 'records', the information is recorded into the 'bookmarks'
2339 When mode is 'records', the information is recorded into the 'bookmarks'
2340 records of the bundle operation. This behavior is suitable for pulling.
2340 records of the bundle operation. This behavior is suitable for pulling.
2341 """
2341 """
2342 changes = bookmarks.binarydecode(inpart)
2342 changes = bookmarks.binarydecode(inpart)
2343
2343
2344 pushkeycompat = op.repo.ui.configbool(
2344 pushkeycompat = op.repo.ui.configbool(
2345 b'server', b'bookmarks-pushkey-compat'
2345 b'server', b'bookmarks-pushkey-compat'
2346 )
2346 )
2347 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2347 bookmarksmode = op.modes.get(b'bookmarks', b'apply')
2348
2348
2349 if bookmarksmode == b'apply':
2349 if bookmarksmode == b'apply':
2350 tr = op.gettransaction()
2350 tr = op.gettransaction()
2351 bookstore = op.repo._bookmarks
2351 bookstore = op.repo._bookmarks
2352 if pushkeycompat:
2352 if pushkeycompat:
2353 allhooks = []
2353 allhooks = []
2354 for book, node in changes:
2354 for book, node in changes:
2355 hookargs = tr.hookargs.copy()
2355 hookargs = tr.hookargs.copy()
2356 hookargs[b'pushkeycompat'] = b'1'
2356 hookargs[b'pushkeycompat'] = b'1'
2357 hookargs[b'namespace'] = b'bookmarks'
2357 hookargs[b'namespace'] = b'bookmarks'
2358 hookargs[b'key'] = book
2358 hookargs[b'key'] = book
2359 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2359 hookargs[b'old'] = nodemod.hex(bookstore.get(book, b''))
2360 hookargs[b'new'] = nodemod.hex(
2360 hookargs[b'new'] = nodemod.hex(
2361 node if node is not None else b''
2361 node if node is not None else b''
2362 )
2362 )
2363 allhooks.append(hookargs)
2363 allhooks.append(hookargs)
2364
2364
2365 for hookargs in allhooks:
2365 for hookargs in allhooks:
2366 op.repo.hook(
2366 op.repo.hook(
2367 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2367 b'prepushkey', throw=True, **pycompat.strkwargs(hookargs)
2368 )
2368 )
2369
2369
2370 for book, node in changes:
2370 for book, node in changes:
2371 if bookmarks.isdivergent(book):
2371 if bookmarks.isdivergent(book):
2372 msg = _(b'cannot accept divergent bookmark %s!') % book
2372 msg = _(b'cannot accept divergent bookmark %s!') % book
2373 raise error.Abort(msg)
2373 raise error.Abort(msg)
2374
2374
2375 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2375 bookstore.applychanges(op.repo, op.gettransaction(), changes)
2376
2376
2377 if pushkeycompat:
2377 if pushkeycompat:
2378
2378
2379 def runhook(unused_success):
2379 def runhook(unused_success):
2380 for hookargs in allhooks:
2380 for hookargs in allhooks:
2381 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2381 op.repo.hook(b'pushkey', **pycompat.strkwargs(hookargs))
2382
2382
2383 op.repo._afterlock(runhook)
2383 op.repo._afterlock(runhook)
2384
2384
2385 elif bookmarksmode == b'records':
2385 elif bookmarksmode == b'records':
2386 for book, node in changes:
2386 for book, node in changes:
2387 record = {b'bookmark': book, b'node': node}
2387 record = {b'bookmark': book, b'node': node}
2388 op.records.add(b'bookmarks', record)
2388 op.records.add(b'bookmarks', record)
2389 else:
2389 else:
2390 raise error.ProgrammingError(
2390 raise error.ProgrammingError(
2391 b'unkown bookmark mode: %s' % bookmarksmode
2391 b'unkown bookmark mode: %s' % bookmarksmode
2392 )
2392 )
2393
2393
2394
2394
2395 @parthandler(b'phase-heads')
2395 @parthandler(b'phase-heads')
2396 def handlephases(op, inpart):
2396 def handlephases(op, inpart):
2397 """apply phases from bundle part to repo"""
2397 """apply phases from bundle part to repo"""
2398 headsbyphase = phases.binarydecode(inpart)
2398 headsbyphase = phases.binarydecode(inpart)
2399 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2399 phases.updatephases(op.repo.unfiltered(), op.gettransaction, headsbyphase)
2400
2400
2401
2401
2402 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2402 @parthandler(b'reply:pushkey', (b'return', b'in-reply-to'))
2403 def handlepushkeyreply(op, inpart):
2403 def handlepushkeyreply(op, inpart):
2404 """retrieve the result of a pushkey request"""
2404 """retrieve the result of a pushkey request"""
2405 ret = int(inpart.params[b'return'])
2405 ret = int(inpart.params[b'return'])
2406 partid = int(inpart.params[b'in-reply-to'])
2406 partid = int(inpart.params[b'in-reply-to'])
2407 op.records.add(b'pushkey', {b'return': ret}, partid)
2407 op.records.add(b'pushkey', {b'return': ret}, partid)
2408
2408
2409
2409
2410 @parthandler(b'obsmarkers')
2410 @parthandler(b'obsmarkers')
2411 def handleobsmarker(op, inpart):
2411 def handleobsmarker(op, inpart):
2412 """add a stream of obsmarkers to the repo"""
2412 """add a stream of obsmarkers to the repo"""
2413 tr = op.gettransaction()
2413 tr = op.gettransaction()
2414 markerdata = inpart.read()
2414 markerdata = inpart.read()
2415 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2415 if op.ui.config(b'experimental', b'obsmarkers-exchange-debug'):
2416 op.ui.writenoi18n(
2416 op.ui.writenoi18n(
2417 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2417 b'obsmarker-exchange: %i bytes received\n' % len(markerdata)
2418 )
2418 )
2419 # The mergemarkers call will crash if marker creation is not enabled.
2419 # The mergemarkers call will crash if marker creation is not enabled.
2420 # we want to avoid this if the part is advisory.
2420 # we want to avoid this if the part is advisory.
2421 if not inpart.mandatory and op.repo.obsstore.readonly:
2421 if not inpart.mandatory and op.repo.obsstore.readonly:
2422 op.repo.ui.debug(
2422 op.repo.ui.debug(
2423 b'ignoring obsolescence markers, feature not enabled\n'
2423 b'ignoring obsolescence markers, feature not enabled\n'
2424 )
2424 )
2425 return
2425 return
2426 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2426 new = op.repo.obsstore.mergemarkers(tr, markerdata)
2427 op.repo.invalidatevolatilesets()
2427 op.repo.invalidatevolatilesets()
2428 op.records.add(b'obsmarkers', {b'new': new})
2428 op.records.add(b'obsmarkers', {b'new': new})
2429 if op.reply is not None:
2429 if op.reply is not None:
2430 rpart = op.reply.newpart(b'reply:obsmarkers')
2430 rpart = op.reply.newpart(b'reply:obsmarkers')
2431 rpart.addparam(
2431 rpart.addparam(
2432 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2432 b'in-reply-to', pycompat.bytestr(inpart.id), mandatory=False
2433 )
2433 )
2434 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2434 rpart.addparam(b'new', b'%i' % new, mandatory=False)
2435
2435
2436
2436
2437 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2437 @parthandler(b'reply:obsmarkers', (b'new', b'in-reply-to'))
2438 def handleobsmarkerreply(op, inpart):
2438 def handleobsmarkerreply(op, inpart):
2439 """retrieve the result of a pushkey request"""
2439 """retrieve the result of a pushkey request"""
2440 ret = int(inpart.params[b'new'])
2440 ret = int(inpart.params[b'new'])
2441 partid = int(inpart.params[b'in-reply-to'])
2441 partid = int(inpart.params[b'in-reply-to'])
2442 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2442 op.records.add(b'obsmarkers', {b'new': ret}, partid)
2443
2443
2444
2444
2445 @parthandler(b'hgtagsfnodes')
2445 @parthandler(b'hgtagsfnodes')
2446 def handlehgtagsfnodes(op, inpart):
2446 def handlehgtagsfnodes(op, inpart):
2447 """Applies .hgtags fnodes cache entries to the local repo.
2447 """Applies .hgtags fnodes cache entries to the local repo.
2448
2448
2449 Payload is pairs of 20 byte changeset nodes and filenodes.
2449 Payload is pairs of 20 byte changeset nodes and filenodes.
2450 """
2450 """
2451 # Grab the transaction so we ensure that we have the lock at this point.
2451 # Grab the transaction so we ensure that we have the lock at this point.
2452 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2452 if op.ui.configbool(b'experimental', b'bundle2lazylocking'):
2453 op.gettransaction()
2453 op.gettransaction()
2454 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2454 cache = tags.hgtagsfnodescache(op.repo.unfiltered())
2455
2455
2456 count = 0
2456 count = 0
2457 while True:
2457 while True:
2458 node = inpart.read(20)
2458 node = inpart.read(20)
2459 fnode = inpart.read(20)
2459 fnode = inpart.read(20)
2460 if len(node) < 20 or len(fnode) < 20:
2460 if len(node) < 20 or len(fnode) < 20:
2461 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2461 op.ui.debug(b'ignoring incomplete received .hgtags fnodes data\n')
2462 break
2462 break
2463 cache.setfnode(node, fnode)
2463 cache.setfnode(node, fnode)
2464 count += 1
2464 count += 1
2465
2465
2466 cache.write()
2466 cache.write()
2467 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2467 op.ui.debug(b'applied %i hgtags fnodes cache entries\n' % count)
2468
2468
2469
2469
2470 rbcstruct = struct.Struct(b'>III')
2470 rbcstruct = struct.Struct(b'>III')
2471
2471
2472
2472
2473 @parthandler(b'cache:rev-branch-cache')
2473 @parthandler(b'cache:rev-branch-cache')
2474 def handlerbc(op, inpart):
2474 def handlerbc(op, inpart):
2475 """receive a rev-branch-cache payload and update the local cache
2475 """receive a rev-branch-cache payload and update the local cache
2476
2476
2477 The payload is a series of data related to each branch
2477 The payload is a series of data related to each branch
2478
2478
2479 1) branch name length
2479 1) branch name length
2480 2) number of open heads
2480 2) number of open heads
2481 3) number of closed heads
2481 3) number of closed heads
2482 4) open heads nodes
2482 4) open heads nodes
2483 5) closed heads nodes
2483 5) closed heads nodes
2484 """
2484 """
2485 total = 0
2485 total = 0
2486 rawheader = inpart.read(rbcstruct.size)
2486 rawheader = inpart.read(rbcstruct.size)
2487 cache = op.repo.revbranchcache()
2487 cache = op.repo.revbranchcache()
2488 cl = op.repo.unfiltered().changelog
2488 cl = op.repo.unfiltered().changelog
2489 while rawheader:
2489 while rawheader:
2490 header = rbcstruct.unpack(rawheader)
2490 header = rbcstruct.unpack(rawheader)
2491 total += header[1] + header[2]
2491 total += header[1] + header[2]
2492 utf8branch = inpart.read(header[0])
2492 utf8branch = inpart.read(header[0])
2493 branch = encoding.tolocal(utf8branch)
2493 branch = encoding.tolocal(utf8branch)
2494 for x in pycompat.xrange(header[1]):
2494 for x in pycompat.xrange(header[1]):
2495 node = inpart.read(20)
2495 node = inpart.read(20)
2496 rev = cl.rev(node)
2496 rev = cl.rev(node)
2497 cache.setdata(branch, rev, node, False)
2497 cache.setdata(branch, rev, node, False)
2498 for x in pycompat.xrange(header[2]):
2498 for x in pycompat.xrange(header[2]):
2499 node = inpart.read(20)
2499 node = inpart.read(20)
2500 rev = cl.rev(node)
2500 rev = cl.rev(node)
2501 cache.setdata(branch, rev, node, True)
2501 cache.setdata(branch, rev, node, True)
2502 rawheader = inpart.read(rbcstruct.size)
2502 rawheader = inpart.read(rbcstruct.size)
2503 cache.write()
2503 cache.write()
2504
2504
2505
2505
2506 @parthandler(b'pushvars')
2506 @parthandler(b'pushvars')
2507 def bundle2getvars(op, part):
2507 def bundle2getvars(op, part):
2508 '''unbundle a bundle2 containing shellvars on the server'''
2508 '''unbundle a bundle2 containing shellvars on the server'''
2509 # An option to disable unbundling on server-side for security reasons
2509 # An option to disable unbundling on server-side for security reasons
2510 if op.ui.configbool(b'push', b'pushvars.server'):
2510 if op.ui.configbool(b'push', b'pushvars.server'):
2511 hookargs = {}
2511 hookargs = {}
2512 for key, value in part.advisoryparams:
2512 for key, value in part.advisoryparams:
2513 key = key.upper()
2513 key = key.upper()
2514 # We want pushed variables to have USERVAR_ prepended so we know
2514 # We want pushed variables to have USERVAR_ prepended so we know
2515 # they came from the --pushvar flag.
2515 # they came from the --pushvar flag.
2516 key = b"USERVAR_" + key
2516 key = b"USERVAR_" + key
2517 hookargs[key] = value
2517 hookargs[key] = value
2518 op.addhookargs(hookargs)
2518 op.addhookargs(hookargs)
2519
2519
2520
2520
2521 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2521 @parthandler(b'stream2', (b'requirements', b'filecount', b'bytecount'))
2522 def handlestreamv2bundle(op, part):
2522 def handlestreamv2bundle(op, part):
2523
2523
2524 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2524 requirements = urlreq.unquote(part.params[b'requirements']).split(b',')
2525 filecount = int(part.params[b'filecount'])
2525 filecount = int(part.params[b'filecount'])
2526 bytecount = int(part.params[b'bytecount'])
2526 bytecount = int(part.params[b'bytecount'])
2527
2527
2528 repo = op.repo
2528 repo = op.repo
2529 if len(repo):
2529 if len(repo):
2530 msg = _(b'cannot apply stream clone to non empty repository')
2530 msg = _(b'cannot apply stream clone to non empty repository')
2531 raise error.Abort(msg)
2531 raise error.Abort(msg)
2532
2532
2533 repo.ui.debug(b'applying stream bundle\n')
2533 repo.ui.debug(b'applying stream bundle\n')
2534 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2534 streamclone.applybundlev2(repo, part, filecount, bytecount, requirements)
2535
2535
2536
2536
2537 def widen_bundle(
2537 def widen_bundle(
2538 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2538 bundler, repo, oldmatcher, newmatcher, common, known, cgversion, ellipses
2539 ):
2539 ):
2540 """generates bundle2 for widening a narrow clone
2540 """generates bundle2 for widening a narrow clone
2541
2541
2542 bundler is the bundle to which data should be added
2542 bundler is the bundle to which data should be added
2543 repo is the localrepository instance
2543 repo is the localrepository instance
2544 oldmatcher matches what the client already has
2544 oldmatcher matches what the client already has
2545 newmatcher matches what the client needs (including what it already has)
2545 newmatcher matches what the client needs (including what it already has)
2546 common is set of common heads between server and client
2546 common is set of common heads between server and client
2547 known is a set of revs known on the client side (used in ellipses)
2547 known is a set of revs known on the client side (used in ellipses)
2548 cgversion is the changegroup version to send
2548 cgversion is the changegroup version to send
2549 ellipses is boolean value telling whether to send ellipses data or not
2549 ellipses is boolean value telling whether to send ellipses data or not
2550
2550
2551 returns bundle2 of the data required for extending
2551 returns bundle2 of the data required for extending
2552 """
2552 """
2553 commonnodes = set()
2553 commonnodes = set()
2554 cl = repo.changelog
2554 cl = repo.changelog
2555 for r in repo.revs(b"::%ln", common):
2555 for r in repo.revs(b"::%ln", common):
2556 commonnodes.add(cl.node(r))
2556 commonnodes.add(cl.node(r))
2557 if commonnodes:
2557 if commonnodes:
2558 # XXX: we should only send the filelogs (and treemanifest). user
2558 # XXX: we should only send the filelogs (and treemanifest). user
2559 # already has the changelog and manifest
2559 # already has the changelog and manifest
2560 packer = changegroup.getbundler(
2560 packer = changegroup.getbundler(
2561 cgversion,
2561 cgversion,
2562 repo,
2562 repo,
2563 oldmatcher=oldmatcher,
2563 oldmatcher=oldmatcher,
2564 matcher=newmatcher,
2564 matcher=newmatcher,
2565 fullnodes=commonnodes,
2565 fullnodes=commonnodes,
2566 )
2566 )
2567 cgdata = packer.generate(
2567 cgdata = packer.generate(
2568 {nodemod.nullid},
2568 {nodemod.nullid},
2569 list(commonnodes),
2569 list(commonnodes),
2570 False,
2570 False,
2571 b'narrow_widen',
2571 b'narrow_widen',
2572 changelog=False,
2572 changelog=False,
2573 )
2573 )
2574
2574
2575 part = bundler.newpart(b'changegroup', data=cgdata)
2575 part = bundler.newpart(b'changegroup', data=cgdata)
2576 part.addparam(b'version', cgversion)
2576 part.addparam(b'version', cgversion)
2577 if scmutil.istreemanifest(repo):
2577 if scmutil.istreemanifest(repo):
2578 part.addparam(b'treemanifest', b'1')
2578 part.addparam(b'treemanifest', b'1')
2579 if b'exp-sidedata-flag' in repo.requirements:
2579 if b'exp-sidedata-flag' in repo.requirements:
2580 part.addparam(b'exp-sidedata', b'1')
2580 part.addparam(b'exp-sidedata', b'1')
2581
2581
2582 return bundler
2582 return bundler
@@ -1,741 +1,741
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command (DEPRECATED)
22 'setumask' command (DEPRECATED)
23 'setumask2' command
23 'setumask2' command
24 set umask
24 set umask
25
25
26 'validate' command
26 'validate' command
27 reload the config and check if the server is up to date
27 reload the config and check if the server is up to date
28
28
29 Config
29 Config
30 ------
30 ------
31
31
32 ::
32 ::
33
33
34 [chgserver]
34 [chgserver]
35 # how long (in seconds) should an idle chg server exit
35 # how long (in seconds) should an idle chg server exit
36 idletimeout = 3600
36 idletimeout = 3600
37
37
38 # whether to skip config or env change checks
38 # whether to skip config or env change checks
39 skiphash = False
39 skiphash = False
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import inspect
44 import inspect
45 import os
45 import os
46 import re
46 import re
47 import socket
47 import socket
48 import stat
48 import stat
49 import struct
49 import struct
50 import time
50 import time
51
51
52 from .i18n import _
52 from .i18n import _
53 from .pycompat import (
53 from .pycompat import (
54 getattr,
54 getattr,
55 setattr,
55 setattr,
56 )
56 )
57
57
58 from . import (
58 from . import (
59 commandserver,
59 commandserver,
60 encoding,
60 encoding,
61 error,
61 error,
62 extensions,
62 extensions,
63 node,
63 node,
64 pycompat,
64 pycompat,
65 util,
65 util,
66 )
66 )
67
67
68 from .utils import (
68 from .utils import (
69 hashutil,
69 hashutil,
70 procutil,
70 procutil,
71 stringutil,
71 stringutil,
72 )
72 )
73
73
74
74
75 def _hashlist(items):
75 def _hashlist(items):
76 """return sha1 hexdigest for a list"""
76 """return sha1 hexdigest for a list"""
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
78
78
79
79
80 # sensitive config sections affecting confighash
80 # sensitive config sections affecting confighash
81 _configsections = [
81 _configsections = [
82 b'alias', # affects global state commands.table
82 b'alias', # affects global state commands.table
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
84 b'eol', # uses setconfig('eol', ...)
84 b'eol', # uses setconfig('eol', ...)
85 b'extdiff', # uisetup will register new commands
85 b'extdiff', # uisetup will register new commands
86 b'extensions',
86 b'extensions',
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
89 b'schemes', # extsetup will update global hg.schemes
89 b'schemes', # extsetup will update global hg.schemes
90 ]
90 ]
91
91
92 _configsectionitems = [
92 _configsectionitems = [
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
94 ]
94 ]
95
95
96 # sensitive environment variables affecting confighash
96 # sensitive environment variables affecting confighash
97 _envre = re.compile(
97 _envre = re.compile(
98 br'''\A(?:
98 br'''\A(?:
99 CHGHG
99 CHGHG
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
101 |HG(?:ENCODING|PLAIN).*
101 |HG(?:ENCODING|PLAIN).*
102 |LANG(?:UAGE)?
102 |LANG(?:UAGE)?
103 |LC_.*
103 |LC_.*
104 |LD_.*
104 |LD_.*
105 |PATH
105 |PATH
106 |PYTHON.*
106 |PYTHON.*
107 |TERM(?:INFO)?
107 |TERM(?:INFO)?
108 |TZ
108 |TZ
109 )\Z''',
109 )\Z''',
110 re.X,
110 re.X,
111 )
111 )
112
112
113
113
114 def _confighash(ui):
114 def _confighash(ui):
115 """return a quick hash for detecting config/env changes
115 """return a quick hash for detecting config/env changes
116
116
117 confighash is the hash of sensitive config items and environment variables.
117 confighash is the hash of sensitive config items and environment variables.
118
118
119 for chgserver, it is designed that once confighash changes, the server is
119 for chgserver, it is designed that once confighash changes, the server is
120 not qualified to serve its client and should redirect the client to a new
120 not qualified to serve its client and should redirect the client to a new
121 server. different from mtimehash, confighash change will not mark the
121 server. different from mtimehash, confighash change will not mark the
122 server outdated and exit since the user can have different configs at the
122 server outdated and exit since the user can have different configs at the
123 same time.
123 same time.
124 """
124 """
125 sectionitems = []
125 sectionitems = []
126 for section in _configsections:
126 for section in _configsections:
127 sectionitems.append(ui.configitems(section))
127 sectionitems.append(ui.configitems(section))
128 for section, item in _configsectionitems:
128 for section, item in _configsectionitems:
129 sectionitems.append(ui.config(section, item))
129 sectionitems.append(ui.config(section, item))
130 sectionhash = _hashlist(sectionitems)
130 sectionhash = _hashlist(sectionitems)
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
132 if b'CHGHG' in encoding.environ:
132 if b'CHGHG' in encoding.environ:
133 ignored = {b'HG'}
133 ignored = {b'HG'}
134 else:
134 else:
135 ignored = set()
135 ignored = set()
136 envitems = [
136 envitems = [
137 (k, v)
137 (k, v)
138 for k, v in pycompat.iteritems(encoding.environ)
138 for k, v in pycompat.iteritems(encoding.environ)
139 if _envre.match(k) and k not in ignored
139 if _envre.match(k) and k not in ignored
140 ]
140 ]
141 envhash = _hashlist(sorted(envitems))
141 envhash = _hashlist(sorted(envitems))
142 return sectionhash[:6] + envhash[:6]
142 return sectionhash[:6] + envhash[:6]
143
143
144
144
145 def _getmtimepaths(ui):
145 def _getmtimepaths(ui):
146 """get a list of paths that should be checked to detect change
146 """get a list of paths that should be checked to detect change
147
147
148 The list will include:
148 The list will include:
149 - extensions (will not cover all files for complex extensions)
149 - extensions (will not cover all files for complex extensions)
150 - mercurial/__version__.py
150 - mercurial/__version__.py
151 - python binary
151 - python binary
152 """
152 """
153 modules = [m for n, m in extensions.extensions(ui)]
153 modules = [m for n, m in extensions.extensions(ui)]
154 try:
154 try:
155 from . import __version__
155 from . import __version__
156
156
157 modules.append(__version__)
157 modules.append(__version__)
158 except ImportError:
158 except ImportError:
159 pass
159 pass
160 files = []
160 files = []
161 if pycompat.sysexecutable:
161 if pycompat.sysexecutable:
162 files.append(pycompat.sysexecutable)
162 files.append(pycompat.sysexecutable)
163 for m in modules:
163 for m in modules:
164 try:
164 try:
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
166 except TypeError:
166 except TypeError:
167 pass
167 pass
168 return sorted(set(files))
168 return sorted(set(files))
169
169
170
170
171 def _mtimehash(paths):
171 def _mtimehash(paths):
172 """return a quick hash for detecting file changes
172 """return a quick hash for detecting file changes
173
173
174 mtimehash calls stat on given paths and calculate a hash based on size and
174 mtimehash calls stat on given paths and calculate a hash based on size and
175 mtime of each file. mtimehash does not read file content because reading is
175 mtime of each file. mtimehash does not read file content because reading is
176 expensive. therefore it's not 100% reliable for detecting content changes.
176 expensive. therefore it's not 100% reliable for detecting content changes.
177 it's possible to return different hashes for same file contents.
177 it's possible to return different hashes for same file contents.
178 it's also possible to return a same hash for different file contents for
178 it's also possible to return a same hash for different file contents for
179 some carefully crafted situation.
179 some carefully crafted situation.
180
180
181 for chgserver, it is designed that once mtimehash changes, the server is
181 for chgserver, it is designed that once mtimehash changes, the server is
182 considered outdated immediately and should no longer provide service.
182 considered outdated immediately and should no longer provide service.
183
183
184 mtimehash is not included in confighash because we only know the paths of
184 mtimehash is not included in confighash because we only know the paths of
185 extensions after importing them (there is imp.find_module but that faces
185 extensions after importing them (there is imp.find_module but that faces
186 race conditions). We need to calculate confighash without importing.
186 race conditions). We need to calculate confighash without importing.
187 """
187 """
188
188
189 def trystat(path):
189 def trystat(path):
190 try:
190 try:
191 st = os.stat(path)
191 st = os.stat(path)
192 return (st[stat.ST_MTIME], st.st_size)
192 return (st[stat.ST_MTIME], st.st_size)
193 except OSError:
193 except OSError:
194 # could be ENOENT, EPERM etc. not fatal in any case
194 # could be ENOENT, EPERM etc. not fatal in any case
195 pass
195 pass
196
196
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
198
198
199
199
200 class hashstate(object):
200 class hashstate(object):
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
202
202
203 def __init__(self, confighash, mtimehash, mtimepaths):
203 def __init__(self, confighash, mtimehash, mtimepaths):
204 self.confighash = confighash
204 self.confighash = confighash
205 self.mtimehash = mtimehash
205 self.mtimehash = mtimehash
206 self.mtimepaths = mtimepaths
206 self.mtimepaths = mtimepaths
207
207
208 @staticmethod
208 @staticmethod
209 def fromui(ui, mtimepaths=None):
209 def fromui(ui, mtimepaths=None):
210 if mtimepaths is None:
210 if mtimepaths is None:
211 mtimepaths = _getmtimepaths(ui)
211 mtimepaths = _getmtimepaths(ui)
212 confighash = _confighash(ui)
212 confighash = _confighash(ui)
213 mtimehash = _mtimehash(mtimepaths)
213 mtimehash = _mtimehash(mtimepaths)
214 ui.log(
214 ui.log(
215 b'cmdserver',
215 b'cmdserver',
216 b'confighash = %s mtimehash = %s\n',
216 b'confighash = %s mtimehash = %s\n',
217 confighash,
217 confighash,
218 mtimehash,
218 mtimehash,
219 )
219 )
220 return hashstate(confighash, mtimehash, mtimepaths)
220 return hashstate(confighash, mtimehash, mtimepaths)
221
221
222
222
223 def _newchgui(srcui, csystem, attachio):
223 def _newchgui(srcui, csystem, attachio):
224 class chgui(srcui.__class__):
224 class chgui(srcui.__class__):
225 def __init__(self, src=None):
225 def __init__(self, src=None):
226 super(chgui, self).__init__(src)
226 super(chgui, self).__init__(src)
227 if src:
227 if src:
228 self._csystem = getattr(src, '_csystem', csystem)
228 self._csystem = getattr(src, '_csystem', csystem)
229 else:
229 else:
230 self._csystem = csystem
230 self._csystem = csystem
231
231
232 def _runsystem(self, cmd, environ, cwd, out):
232 def _runsystem(self, cmd, environ, cwd, out):
233 # fallback to the original system method if
233 # fallback to the original system method if
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
235 # b. or stdout is redirected by protectfinout(),
235 # b. or stdout is redirected by protectfinout(),
236 # because the chg client is not aware of these situations and
236 # because the chg client is not aware of these situations and
237 # will behave differently (i.e. write to stdout).
237 # will behave differently (i.e. write to stdout).
238 if (
238 if (
239 out is not self.fout
239 out is not self.fout
240 or not util.safehasattr(self.fout, b'fileno')
240 or not util.safehasattr(self.fout, b'fileno')
241 or self.fout.fileno() != procutil.stdout.fileno()
241 or self.fout.fileno() != procutil.stdout.fileno()
242 or self._finoutredirected
242 or self._finoutredirected
243 ):
243 ):
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
245 self.flush()
245 self.flush()
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
247
247
248 def _runpager(self, cmd, env=None):
248 def _runpager(self, cmd, env=None):
249 self._csystem(
249 self._csystem(
250 cmd,
250 cmd,
251 procutil.shellenviron(env),
251 procutil.shellenviron(env),
252 type=b'pager',
252 type=b'pager',
253 cmdtable={b'attachio': attachio},
253 cmdtable={b'attachio': attachio},
254 )
254 )
255 return True
255 return True
256
256
257 return chgui(srcui)
257 return chgui(srcui)
258
258
259
259
260 def _loadnewui(srcui, args, cdebug):
260 def _loadnewui(srcui, args, cdebug):
261 from . import dispatch # avoid cycle
261 from . import dispatch # avoid cycle
262
262
263 newui = srcui.__class__.load()
263 newui = srcui.__class__.load()
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
265 setattr(newui, a, getattr(srcui, a))
265 setattr(newui, a, getattr(srcui, a))
266 if util.safehasattr(srcui, b'_csystem'):
266 if util.safehasattr(srcui, b'_csystem'):
267 newui._csystem = srcui._csystem
267 newui._csystem = srcui._csystem
268
268
269 # command line args
269 # command line args
270 options = dispatch._earlyparseopts(newui, args)
270 options = dispatch._earlyparseopts(newui, args)
271 dispatch._parseconfig(newui, options[b'config'])
271 dispatch._parseconfig(newui, options[b'config'])
272
272
273 # stolen from tortoisehg.util.copydynamicconfig()
273 # stolen from tortoisehg.util.copydynamicconfig()
274 for section, name, value in srcui.walkconfig():
274 for section, name, value in srcui.walkconfig():
275 source = srcui.configsource(section, name)
275 source = srcui.configsource(section, name)
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
277 # path:line or command line, or environ
277 # path:line or command line, or environ
278 continue
278 continue
279 newui.setconfig(section, name, value, source)
279 newui.setconfig(section, name, value, source)
280
280
281 # load wd and repo config, copied from dispatch.py
281 # load wd and repo config, copied from dispatch.py
282 cwd = options[b'cwd']
282 cwd = options[b'cwd']
283 cwd = cwd and os.path.realpath(cwd) or None
283 cwd = cwd and os.path.realpath(cwd) or None
284 rpath = options[b'repository']
284 rpath = options[b'repository']
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
286
286
287 extensions.populateui(newui)
287 extensions.populateui(newui)
288 commandserver.setuplogging(newui, fp=cdebug)
288 commandserver.setuplogging(newui, fp=cdebug)
289 if newui is not newlui:
289 if newui is not newlui:
290 extensions.populateui(newlui)
290 extensions.populateui(newlui)
291 commandserver.setuplogging(newlui, fp=cdebug)
291 commandserver.setuplogging(newlui, fp=cdebug)
292
292
293 return (newui, newlui)
293 return (newui, newlui)
294
294
295
295
296 class channeledsystem(object):
296 class channeledsystem(object):
297 """Propagate ui.system() request in the following format:
297 """Propagate ui.system() request in the following format:
298
298
299 payload length (unsigned int),
299 payload length (unsigned int),
300 type, '\0',
300 type, '\0',
301 cmd, '\0',
301 cmd, '\0',
302 cwd, '\0',
302 cwd, '\0',
303 envkey, '=', val, '\0',
303 envkey, '=', val, '\0',
304 ...
304 ...
305 envkey, '=', val
305 envkey, '=', val
306
306
307 if type == 'system', waits for:
307 if type == 'system', waits for:
308
308
309 exitcode length (unsigned int),
309 exitcode length (unsigned int),
310 exitcode (int)
310 exitcode (int)
311
311
312 if type == 'pager', repetitively waits for a command name ending with '\n'
312 if type == 'pager', repetitively waits for a command name ending with '\n'
313 and executes it defined by cmdtable, or exits the loop if the command name
313 and executes it defined by cmdtable, or exits the loop if the command name
314 is empty.
314 is empty.
315 """
315 """
316
316
317 def __init__(self, in_, out, channel):
317 def __init__(self, in_, out, channel):
318 self.in_ = in_
318 self.in_ = in_
319 self.out = out
319 self.out = out
320 self.channel = channel
320 self.channel = channel
321
321
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
325 data = b'\0'.join(args)
325 data = b'\0'.join(args)
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
327 self.out.write(data)
327 self.out.write(data)
328 self.out.flush()
328 self.out.flush()
329
329
330 if type == b'system':
330 if type == b'system':
331 length = self.in_.read(4)
331 length = self.in_.read(4)
332 (length,) = struct.unpack(b'>I', length)
332 (length,) = struct.unpack(b'>I', length)
333 if length != 4:
333 if length != 4:
334 raise error.Abort(_(b'invalid response'))
334 raise error.Abort(_(b'invalid response'))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
336 return rc
336 return rc
337 elif type == b'pager':
337 elif type == b'pager':
338 while True:
338 while True:
339 cmd = self.in_.readline()[:-1]
339 cmd = self.in_.readline()[:-1]
340 if not cmd:
340 if not cmd:
341 break
341 break
342 if cmdtable and cmd in cmdtable:
342 if cmdtable and cmd in cmdtable:
343 cmdtable[cmd]()
343 cmdtable[cmd]()
344 else:
344 else:
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
346 else:
346 else:
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
348
348
349
349
350 _iochannels = [
350 _iochannels = [
351 # server.ch, ui.fp, mode
351 # server.ch, ui.fp, mode
352 (b'cin', b'fin', 'rb'),
352 (b'cin', b'fin', 'rb'),
353 (b'cout', b'fout', 'wb'),
353 (b'cout', b'fout', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
355 ]
355 ]
356
356
357
357
358 class chgcmdserver(commandserver.server):
358 class chgcmdserver(commandserver.server):
359 def __init__(
359 def __init__(
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
361 ):
361 ):
362 super(chgcmdserver, self).__init__(
362 super(chgcmdserver, self).__init__(
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
364 repo,
364 repo,
365 fin,
365 fin,
366 fout,
366 fout,
367 prereposetups,
367 prereposetups,
368 )
368 )
369 self.clientsock = sock
369 self.clientsock = sock
370 self._ioattached = False
370 self._ioattached = False
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
372 self.hashstate = hashstate
372 self.hashstate = hashstate
373 self.baseaddress = baseaddress
373 self.baseaddress = baseaddress
374 if hashstate is not None:
374 if hashstate is not None:
375 self.capabilities = self.capabilities.copy()
375 self.capabilities = self.capabilities.copy()
376 self.capabilities[b'validate'] = chgcmdserver.validate
376 self.capabilities[b'validate'] = chgcmdserver.validate
377
377
378 def cleanup(self):
378 def cleanup(self):
379 super(chgcmdserver, self).cleanup()
379 super(chgcmdserver, self).cleanup()
380 # dispatch._runcatch() does not flush outputs if exception is not
380 # dispatch._runcatch() does not flush outputs if exception is not
381 # handled by dispatch._dispatch()
381 # handled by dispatch._dispatch()
382 self.ui.flush()
382 self.ui.flush()
383 self._restoreio()
383 self._restoreio()
384 self._ioattached = False
384 self._ioattached = False
385
385
386 def attachio(self):
386 def attachio(self):
387 """Attach to client's stdio passed via unix domain socket; all
387 """Attach to client's stdio passed via unix domain socket; all
388 channels except cresult will no longer be used
388 channels except cresult will no longer be used
389 """
389 """
390 # tell client to sendmsg() with 1-byte payload, which makes it
390 # tell client to sendmsg() with 1-byte payload, which makes it
391 # distinctive from "attachio\n" command consumed by client.read()
391 # distinctive from "attachio\n" command consumed by client.read()
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
393 clientfds = util.recvfds(self.clientsock.fileno())
393 clientfds = util.recvfds(self.clientsock.fileno())
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
395
395
396 ui = self.ui
396 ui = self.ui
397 ui.flush()
397 ui.flush()
398 self._saveio()
398 self._saveio()
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
400 assert fd > 0
400 assert fd > 0
401 fp = getattr(ui, fn)
401 fp = getattr(ui, fn)
402 os.dup2(fd, fp.fileno())
402 os.dup2(fd, fp.fileno())
403 os.close(fd)
403 os.close(fd)
404 if self._ioattached:
404 if self._ioattached:
405 continue
405 continue
406 # reset buffering mode when client is first attached. as we want
406 # reset buffering mode when client is first attached. as we want
407 # to see output immediately on pager, the mode stays unchanged
407 # to see output immediately on pager, the mode stays unchanged
408 # when client re-attached. ferr is unchanged because it should
408 # when client re-attached. ferr is unchanged because it should
409 # be unbuffered no matter if it is a tty or not.
409 # be unbuffered no matter if it is a tty or not.
410 if fn == b'ferr':
410 if fn == b'ferr':
411 newfp = fp
411 newfp = fp
412 else:
412 else:
413 # make it line buffered explicitly because the default is
413 # make it line buffered explicitly because the default is
414 # decided on first write(), where fout could be a pager.
414 # decided on first write(), where fout could be a pager.
415 if fp.isatty():
415 if fp.isatty():
416 bufsize = 1 # line buffered
416 bufsize = 1 # line buffered
417 else:
417 else:
418 bufsize = -1 # system default
418 bufsize = -1 # system default
419 newfp = os.fdopen(fp.fileno(), mode, bufsize)
419 newfp = os.fdopen(fp.fileno(), mode, bufsize)
420 setattr(ui, fn, newfp)
420 setattr(ui, fn, newfp)
421 setattr(self, cn, newfp)
421 setattr(self, cn, newfp)
422
422
423 self._ioattached = True
423 self._ioattached = True
424 self.cresult.write(struct.pack(b'>i', len(clientfds)))
424 self.cresult.write(struct.pack(b'>i', len(clientfds)))
425
425
426 def _saveio(self):
426 def _saveio(self):
427 if self._oldios:
427 if self._oldios:
428 return
428 return
429 ui = self.ui
429 ui = self.ui
430 for cn, fn, _mode in _iochannels:
430 for cn, fn, _mode in _iochannels:
431 ch = getattr(self, cn)
431 ch = getattr(self, cn)
432 fp = getattr(ui, fn)
432 fp = getattr(ui, fn)
433 fd = os.dup(fp.fileno())
433 fd = os.dup(fp.fileno())
434 self._oldios.append((ch, fp, fd))
434 self._oldios.append((ch, fp, fd))
435
435
436 def _restoreio(self):
436 def _restoreio(self):
437 if not self._oldios:
437 if not self._oldios:
438 return
438 return
439 nullfd = os.open(os.devnull, os.O_WRONLY)
439 nullfd = os.open(os.devnull, os.O_WRONLY)
440 ui = self.ui
440 ui = self.ui
441 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
441 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
442 newfp = getattr(ui, fn)
442 newfp = getattr(ui, fn)
443 # close newfp while it's associated with client; otherwise it
443 # close newfp while it's associated with client; otherwise it
444 # would be closed when newfp is deleted
444 # would be closed when newfp is deleted
445 if newfp is not fp:
445 if newfp is not fp:
446 newfp.close()
446 newfp.close()
447 # restore original fd: fp is open again
447 # restore original fd: fp is open again
448 try:
448 try:
449 if newfp is fp and 'w' in mode:
449 if newfp is fp and 'w' in mode:
450 # Discard buffered data which couldn't be flushed because
450 # Discard buffered data which couldn't be flushed because
451 # of EPIPE. The data should belong to the current session
451 # of EPIPE. The data should belong to the current session
452 # and should never persist.
452 # and should never persist.
453 os.dup2(nullfd, fp.fileno())
453 os.dup2(nullfd, fp.fileno())
454 fp.flush()
454 fp.flush()
455 os.dup2(fd, fp.fileno())
455 os.dup2(fd, fp.fileno())
456 except OSError as err:
456 except OSError as err:
457 # According to issue6330, running chg on heavy loaded systems
457 # According to issue6330, running chg on heavy loaded systems
458 # can lead to EBUSY. [man dup2] indicates that, on Linux,
458 # can lead to EBUSY. [man dup2] indicates that, on Linux,
459 # EBUSY comes from a race condition between open() and dup2().
459 # EBUSY comes from a race condition between open() and dup2().
460 # However it's not clear why open() race occurred for
460 # However it's not clear why open() race occurred for
461 # newfd=stdin/out/err.
461 # newfd=stdin/out/err.
462 self.ui.log(
462 self.ui.log(
463 b'chgserver',
463 b'chgserver',
464 b'got %s while duplicating %s\n',
464 b'got %s while duplicating %s\n',
465 stringutil.forcebytestr(err),
465 stringutil.forcebytestr(err),
466 fn,
466 fn,
467 )
467 )
468 os.close(fd)
468 os.close(fd)
469 setattr(self, cn, ch)
469 setattr(self, cn, ch)
470 setattr(ui, fn, fp)
470 setattr(ui, fn, fp)
471 os.close(nullfd)
471 os.close(nullfd)
472 del self._oldios[:]
472 del self._oldios[:]
473
473
474 def validate(self):
474 def validate(self):
475 """Reload the config and check if the server is up to date
475 """Reload the config and check if the server is up to date
476
476
477 Read a list of '\0' separated arguments.
477 Read a list of '\0' separated arguments.
478 Write a non-empty list of '\0' separated instruction strings or '\0'
478 Write a non-empty list of '\0' separated instruction strings or '\0'
479 if the list is empty.
479 if the list is empty.
480 An instruction string could be either:
480 An instruction string could be either:
481 - "unlink $path", the client should unlink the path to stop the
481 - "unlink $path", the client should unlink the path to stop the
482 outdated server.
482 outdated server.
483 - "redirect $path", the client should attempt to connect to $path
483 - "redirect $path", the client should attempt to connect to $path
484 first. If it does not work, start a new server. It implies
484 first. If it does not work, start a new server. It implies
485 "reconnect".
485 "reconnect".
486 - "exit $n", the client should exit directly with code n.
486 - "exit $n", the client should exit directly with code n.
487 This may happen if we cannot parse the config.
487 This may happen if we cannot parse the config.
488 - "reconnect", the client should close the connection and
488 - "reconnect", the client should close the connection and
489 reconnect.
489 reconnect.
490 If neither "reconnect" nor "redirect" is included in the instruction
490 If neither "reconnect" nor "redirect" is included in the instruction
491 list, the client can continue with this server after completing all
491 list, the client can continue with this server after completing all
492 the instructions.
492 the instructions.
493 """
493 """
494 from . import dispatch # avoid cycle
494 from . import dispatch # avoid cycle
495
495
496 args = self._readlist()
496 args = self._readlist()
497 try:
497 try:
498 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
498 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
499 except error.ParseError as inst:
499 except error.ParseError as inst:
500 dispatch._formatparse(self.ui.warn, inst)
500 dispatch._formatparse(self.ui.warn, inst)
501 self.ui.flush()
501 self.ui.flush()
502 self.cresult.write(b'exit 255')
502 self.cresult.write(b'exit 255')
503 return
503 return
504 except error.Abort as inst:
504 except error.Abort as inst:
505 self.ui.error(_(b"abort: %s\n") % inst)
505 self.ui.error(_(b"abort: %s\n") % inst.message)
506 if inst.hint:
506 if inst.hint:
507 self.ui.error(_(b"(%s)\n") % inst.hint)
507 self.ui.error(_(b"(%s)\n") % inst.hint)
508 self.ui.flush()
508 self.ui.flush()
509 self.cresult.write(b'exit 255')
509 self.cresult.write(b'exit 255')
510 return
510 return
511 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
511 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
512 insts = []
512 insts = []
513 if newhash.mtimehash != self.hashstate.mtimehash:
513 if newhash.mtimehash != self.hashstate.mtimehash:
514 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
514 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
515 insts.append(b'unlink %s' % addr)
515 insts.append(b'unlink %s' % addr)
516 # mtimehash is empty if one or more extensions fail to load.
516 # mtimehash is empty if one or more extensions fail to load.
517 # to be compatible with hg, still serve the client this time.
517 # to be compatible with hg, still serve the client this time.
518 if self.hashstate.mtimehash:
518 if self.hashstate.mtimehash:
519 insts.append(b'reconnect')
519 insts.append(b'reconnect')
520 if newhash.confighash != self.hashstate.confighash:
520 if newhash.confighash != self.hashstate.confighash:
521 addr = _hashaddress(self.baseaddress, newhash.confighash)
521 addr = _hashaddress(self.baseaddress, newhash.confighash)
522 insts.append(b'redirect %s' % addr)
522 insts.append(b'redirect %s' % addr)
523 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
523 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
524 self.cresult.write(b'\0'.join(insts) or b'\0')
524 self.cresult.write(b'\0'.join(insts) or b'\0')
525
525
526 def chdir(self):
526 def chdir(self):
527 """Change current directory
527 """Change current directory
528
528
529 Note that the behavior of --cwd option is bit different from this.
529 Note that the behavior of --cwd option is bit different from this.
530 It does not affect --config parameter.
530 It does not affect --config parameter.
531 """
531 """
532 path = self._readstr()
532 path = self._readstr()
533 if not path:
533 if not path:
534 return
534 return
535 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
535 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
536 os.chdir(path)
536 os.chdir(path)
537
537
538 def setumask(self):
538 def setumask(self):
539 """Change umask (DEPRECATED)"""
539 """Change umask (DEPRECATED)"""
540 # BUG: this does not follow the message frame structure, but kept for
540 # BUG: this does not follow the message frame structure, but kept for
541 # backward compatibility with old chg clients for some time
541 # backward compatibility with old chg clients for some time
542 self._setumask(self._read(4))
542 self._setumask(self._read(4))
543
543
544 def setumask2(self):
544 def setumask2(self):
545 """Change umask"""
545 """Change umask"""
546 data = self._readstr()
546 data = self._readstr()
547 if len(data) != 4:
547 if len(data) != 4:
548 raise ValueError(b'invalid mask length in setumask2 request')
548 raise ValueError(b'invalid mask length in setumask2 request')
549 self._setumask(data)
549 self._setumask(data)
550
550
551 def _setumask(self, data):
551 def _setumask(self, data):
552 mask = struct.unpack(b'>I', data)[0]
552 mask = struct.unpack(b'>I', data)[0]
553 self.ui.log(b'chgserver', b'setumask %r\n', mask)
553 self.ui.log(b'chgserver', b'setumask %r\n', mask)
554 util.setumask(mask)
554 util.setumask(mask)
555
555
556 def runcommand(self):
556 def runcommand(self):
557 # pager may be attached within the runcommand session, which should
557 # pager may be attached within the runcommand session, which should
558 # be detached at the end of the session. otherwise the pager wouldn't
558 # be detached at the end of the session. otherwise the pager wouldn't
559 # receive EOF.
559 # receive EOF.
560 globaloldios = self._oldios
560 globaloldios = self._oldios
561 self._oldios = []
561 self._oldios = []
562 try:
562 try:
563 return super(chgcmdserver, self).runcommand()
563 return super(chgcmdserver, self).runcommand()
564 finally:
564 finally:
565 self._restoreio()
565 self._restoreio()
566 self._oldios = globaloldios
566 self._oldios = globaloldios
567
567
568 def setenv(self):
568 def setenv(self):
569 """Clear and update os.environ
569 """Clear and update os.environ
570
570
571 Note that not all variables can make an effect on the running process.
571 Note that not all variables can make an effect on the running process.
572 """
572 """
573 l = self._readlist()
573 l = self._readlist()
574 try:
574 try:
575 newenv = dict(s.split(b'=', 1) for s in l)
575 newenv = dict(s.split(b'=', 1) for s in l)
576 except ValueError:
576 except ValueError:
577 raise ValueError(b'unexpected value in setenv request')
577 raise ValueError(b'unexpected value in setenv request')
578 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
578 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
579
579
580 encoding.environ.clear()
580 encoding.environ.clear()
581 encoding.environ.update(newenv)
581 encoding.environ.update(newenv)
582
582
583 capabilities = commandserver.server.capabilities.copy()
583 capabilities = commandserver.server.capabilities.copy()
584 capabilities.update(
584 capabilities.update(
585 {
585 {
586 b'attachio': attachio,
586 b'attachio': attachio,
587 b'chdir': chdir,
587 b'chdir': chdir,
588 b'runcommand': runcommand,
588 b'runcommand': runcommand,
589 b'setenv': setenv,
589 b'setenv': setenv,
590 b'setumask': setumask,
590 b'setumask': setumask,
591 b'setumask2': setumask2,
591 b'setumask2': setumask2,
592 }
592 }
593 )
593 )
594
594
595 if util.safehasattr(procutil, b'setprocname'):
595 if util.safehasattr(procutil, b'setprocname'):
596
596
597 def setprocname(self):
597 def setprocname(self):
598 """Change process title"""
598 """Change process title"""
599 name = self._readstr()
599 name = self._readstr()
600 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
600 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
601 procutil.setprocname(name)
601 procutil.setprocname(name)
602
602
603 capabilities[b'setprocname'] = setprocname
603 capabilities[b'setprocname'] = setprocname
604
604
605
605
606 def _tempaddress(address):
606 def _tempaddress(address):
607 return b'%s.%d.tmp' % (address, os.getpid())
607 return b'%s.%d.tmp' % (address, os.getpid())
608
608
609
609
610 def _hashaddress(address, hashstr):
610 def _hashaddress(address, hashstr):
611 # if the basename of address contains '.', use only the left part. this
611 # if the basename of address contains '.', use only the left part. this
612 # makes it possible for the client to pass 'server.tmp$PID' and follow by
612 # makes it possible for the client to pass 'server.tmp$PID' and follow by
613 # an atomic rename to avoid locking when spawning new servers.
613 # an atomic rename to avoid locking when spawning new servers.
614 dirname, basename = os.path.split(address)
614 dirname, basename = os.path.split(address)
615 basename = basename.split(b'.', 1)[0]
615 basename = basename.split(b'.', 1)[0]
616 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
616 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
617
617
618
618
619 class chgunixservicehandler(object):
619 class chgunixservicehandler(object):
620 """Set of operations for chg services"""
620 """Set of operations for chg services"""
621
621
622 pollinterval = 1 # [sec]
622 pollinterval = 1 # [sec]
623
623
624 def __init__(self, ui):
624 def __init__(self, ui):
625 self.ui = ui
625 self.ui = ui
626 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
626 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
627 self._lastactive = time.time()
627 self._lastactive = time.time()
628
628
629 def bindsocket(self, sock, address):
629 def bindsocket(self, sock, address):
630 self._inithashstate(address)
630 self._inithashstate(address)
631 self._checkextensions()
631 self._checkextensions()
632 self._bind(sock)
632 self._bind(sock)
633 self._createsymlink()
633 self._createsymlink()
634 # no "listening at" message should be printed to simulate hg behavior
634 # no "listening at" message should be printed to simulate hg behavior
635
635
636 def _inithashstate(self, address):
636 def _inithashstate(self, address):
637 self._baseaddress = address
637 self._baseaddress = address
638 if self.ui.configbool(b'chgserver', b'skiphash'):
638 if self.ui.configbool(b'chgserver', b'skiphash'):
639 self._hashstate = None
639 self._hashstate = None
640 self._realaddress = address
640 self._realaddress = address
641 return
641 return
642 self._hashstate = hashstate.fromui(self.ui)
642 self._hashstate = hashstate.fromui(self.ui)
643 self._realaddress = _hashaddress(address, self._hashstate.confighash)
643 self._realaddress = _hashaddress(address, self._hashstate.confighash)
644
644
645 def _checkextensions(self):
645 def _checkextensions(self):
646 if not self._hashstate:
646 if not self._hashstate:
647 return
647 return
648 if extensions.notloaded():
648 if extensions.notloaded():
649 # one or more extensions failed to load. mtimehash becomes
649 # one or more extensions failed to load. mtimehash becomes
650 # meaningless because we do not know the paths of those extensions.
650 # meaningless because we do not know the paths of those extensions.
651 # set mtimehash to an illegal hash value to invalidate the server.
651 # set mtimehash to an illegal hash value to invalidate the server.
652 self._hashstate.mtimehash = b''
652 self._hashstate.mtimehash = b''
653
653
654 def _bind(self, sock):
654 def _bind(self, sock):
655 # use a unique temp address so we can stat the file and do ownership
655 # use a unique temp address so we can stat the file and do ownership
656 # check later
656 # check later
657 tempaddress = _tempaddress(self._realaddress)
657 tempaddress = _tempaddress(self._realaddress)
658 util.bindunixsocket(sock, tempaddress)
658 util.bindunixsocket(sock, tempaddress)
659 self._socketstat = os.stat(tempaddress)
659 self._socketstat = os.stat(tempaddress)
660 sock.listen(socket.SOMAXCONN)
660 sock.listen(socket.SOMAXCONN)
661 # rename will replace the old socket file if exists atomically. the
661 # rename will replace the old socket file if exists atomically. the
662 # old server will detect ownership change and exit.
662 # old server will detect ownership change and exit.
663 util.rename(tempaddress, self._realaddress)
663 util.rename(tempaddress, self._realaddress)
664
664
665 def _createsymlink(self):
665 def _createsymlink(self):
666 if self._baseaddress == self._realaddress:
666 if self._baseaddress == self._realaddress:
667 return
667 return
668 tempaddress = _tempaddress(self._baseaddress)
668 tempaddress = _tempaddress(self._baseaddress)
669 os.symlink(os.path.basename(self._realaddress), tempaddress)
669 os.symlink(os.path.basename(self._realaddress), tempaddress)
670 util.rename(tempaddress, self._baseaddress)
670 util.rename(tempaddress, self._baseaddress)
671
671
672 def _issocketowner(self):
672 def _issocketowner(self):
673 try:
673 try:
674 st = os.stat(self._realaddress)
674 st = os.stat(self._realaddress)
675 return (
675 return (
676 st.st_ino == self._socketstat.st_ino
676 st.st_ino == self._socketstat.st_ino
677 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
677 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
678 )
678 )
679 except OSError:
679 except OSError:
680 return False
680 return False
681
681
682 def unlinksocket(self, address):
682 def unlinksocket(self, address):
683 if not self._issocketowner():
683 if not self._issocketowner():
684 return
684 return
685 # it is possible to have a race condition here that we may
685 # it is possible to have a race condition here that we may
686 # remove another server's socket file. but that's okay
686 # remove another server's socket file. but that's okay
687 # since that server will detect and exit automatically and
687 # since that server will detect and exit automatically and
688 # the client will start a new server on demand.
688 # the client will start a new server on demand.
689 util.tryunlink(self._realaddress)
689 util.tryunlink(self._realaddress)
690
690
691 def shouldexit(self):
691 def shouldexit(self):
692 if not self._issocketowner():
692 if not self._issocketowner():
693 self.ui.log(
693 self.ui.log(
694 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
694 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
695 )
695 )
696 return True
696 return True
697 if time.time() - self._lastactive > self._idletimeout:
697 if time.time() - self._lastactive > self._idletimeout:
698 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
698 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
699 return True
699 return True
700 return False
700 return False
701
701
702 def newconnection(self):
702 def newconnection(self):
703 self._lastactive = time.time()
703 self._lastactive = time.time()
704
704
705 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
705 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
706 return chgcmdserver(
706 return chgcmdserver(
707 self.ui,
707 self.ui,
708 repo,
708 repo,
709 fin,
709 fin,
710 fout,
710 fout,
711 conn,
711 conn,
712 prereposetups,
712 prereposetups,
713 self._hashstate,
713 self._hashstate,
714 self._baseaddress,
714 self._baseaddress,
715 )
715 )
716
716
717
717
718 def chgunixservice(ui, repo, opts):
718 def chgunixservice(ui, repo, opts):
719 # CHGINTERNALMARK is set by chg client. It is an indication of things are
719 # CHGINTERNALMARK is set by chg client. It is an indication of things are
720 # started by chg so other code can do things accordingly, like disabling
720 # started by chg so other code can do things accordingly, like disabling
721 # demandimport or detecting chg client started by chg client. When executed
721 # demandimport or detecting chg client started by chg client. When executed
722 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
722 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
723 # environ cleaner.
723 # environ cleaner.
724 if b'CHGINTERNALMARK' in encoding.environ:
724 if b'CHGINTERNALMARK' in encoding.environ:
725 del encoding.environ[b'CHGINTERNALMARK']
725 del encoding.environ[b'CHGINTERNALMARK']
726 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
726 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
727 # it thinks the current value is "C". This breaks the hash computation and
727 # it thinks the current value is "C". This breaks the hash computation and
728 # causes chg to restart loop.
728 # causes chg to restart loop.
729 if b'CHGORIG_LC_CTYPE' in encoding.environ:
729 if b'CHGORIG_LC_CTYPE' in encoding.environ:
730 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
730 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
731 del encoding.environ[b'CHGORIG_LC_CTYPE']
731 del encoding.environ[b'CHGORIG_LC_CTYPE']
732 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
732 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
733 if b'LC_CTYPE' in encoding.environ:
733 if b'LC_CTYPE' in encoding.environ:
734 del encoding.environ[b'LC_CTYPE']
734 del encoding.environ[b'LC_CTYPE']
735 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
735 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
736
736
737 if repo:
737 if repo:
738 # one chgserver can serve multiple repos. drop repo information
738 # one chgserver can serve multiple repos. drop repo information
739 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
739 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
740 h = chgunixservicehandler(ui)
740 h = chgunixservicehandler(ui)
741 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
741 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,771 +1,771
1 # commandserver.py - communicate with Mercurial's API over a pipe
1 # commandserver.py - communicate with Mercurial's API over a pipe
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import gc
11 import gc
12 import os
12 import os
13 import random
13 import random
14 import signal
14 import signal
15 import socket
15 import socket
16 import struct
16 import struct
17 import traceback
17 import traceback
18
18
19 try:
19 try:
20 import selectors
20 import selectors
21
21
22 selectors.BaseSelector
22 selectors.BaseSelector
23 except ImportError:
23 except ImportError:
24 from .thirdparty import selectors2 as selectors
24 from .thirdparty import selectors2 as selectors
25
25
26 from .i18n import _
26 from .i18n import _
27 from .pycompat import getattr
27 from .pycompat import getattr
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 loggingutil,
31 loggingutil,
32 pycompat,
32 pycompat,
33 repocache,
33 repocache,
34 util,
34 util,
35 vfs as vfsmod,
35 vfs as vfsmod,
36 )
36 )
37 from .utils import (
37 from .utils import (
38 cborutil,
38 cborutil,
39 procutil,
39 procutil,
40 )
40 )
41
41
42
42
43 class channeledoutput(object):
43 class channeledoutput(object):
44 """
44 """
45 Write data to out in the following format:
45 Write data to out in the following format:
46
46
47 data length (unsigned int),
47 data length (unsigned int),
48 data
48 data
49 """
49 """
50
50
51 def __init__(self, out, channel):
51 def __init__(self, out, channel):
52 self.out = out
52 self.out = out
53 self.channel = channel
53 self.channel = channel
54
54
55 @property
55 @property
56 def name(self):
56 def name(self):
57 return b'<%c-channel>' % self.channel
57 return b'<%c-channel>' % self.channel
58
58
59 def write(self, data):
59 def write(self, data):
60 if not data:
60 if not data:
61 return
61 return
62 # single write() to guarantee the same atomicity as the underlying file
62 # single write() to guarantee the same atomicity as the underlying file
63 self.out.write(struct.pack(b'>cI', self.channel, len(data)) + data)
63 self.out.write(struct.pack(b'>cI', self.channel, len(data)) + data)
64 self.out.flush()
64 self.out.flush()
65
65
66 def __getattr__(self, attr):
66 def __getattr__(self, attr):
67 if attr in ('isatty', 'fileno', 'tell', 'seek'):
67 if attr in ('isatty', 'fileno', 'tell', 'seek'):
68 raise AttributeError(attr)
68 raise AttributeError(attr)
69 return getattr(self.out, attr)
69 return getattr(self.out, attr)
70
70
71
71
72 class channeledmessage(object):
72 class channeledmessage(object):
73 """
73 """
74 Write encoded message and metadata to out in the following format:
74 Write encoded message and metadata to out in the following format:
75
75
76 data length (unsigned int),
76 data length (unsigned int),
77 encoded message and metadata, as a flat key-value dict.
77 encoded message and metadata, as a flat key-value dict.
78
78
79 Each message should have 'type' attribute. Messages of unknown type
79 Each message should have 'type' attribute. Messages of unknown type
80 should be ignored.
80 should be ignored.
81 """
81 """
82
82
83 # teach ui that write() can take **opts
83 # teach ui that write() can take **opts
84 structured = True
84 structured = True
85
85
86 def __init__(self, out, channel, encodename, encodefn):
86 def __init__(self, out, channel, encodename, encodefn):
87 self._cout = channeledoutput(out, channel)
87 self._cout = channeledoutput(out, channel)
88 self.encoding = encodename
88 self.encoding = encodename
89 self._encodefn = encodefn
89 self._encodefn = encodefn
90
90
91 def write(self, data, **opts):
91 def write(self, data, **opts):
92 opts = pycompat.byteskwargs(opts)
92 opts = pycompat.byteskwargs(opts)
93 if data is not None:
93 if data is not None:
94 opts[b'data'] = data
94 opts[b'data'] = data
95 self._cout.write(self._encodefn(opts))
95 self._cout.write(self._encodefn(opts))
96
96
97 def __getattr__(self, attr):
97 def __getattr__(self, attr):
98 return getattr(self._cout, attr)
98 return getattr(self._cout, attr)
99
99
100
100
101 class channeledinput(object):
101 class channeledinput(object):
102 """
102 """
103 Read data from in_.
103 Read data from in_.
104
104
105 Requests for input are written to out in the following format:
105 Requests for input are written to out in the following format:
106 channel identifier - 'I' for plain input, 'L' line based (1 byte)
106 channel identifier - 'I' for plain input, 'L' line based (1 byte)
107 how many bytes to send at most (unsigned int),
107 how many bytes to send at most (unsigned int),
108
108
109 The client replies with:
109 The client replies with:
110 data length (unsigned int), 0 meaning EOF
110 data length (unsigned int), 0 meaning EOF
111 data
111 data
112 """
112 """
113
113
114 maxchunksize = 4 * 1024
114 maxchunksize = 4 * 1024
115
115
116 def __init__(self, in_, out, channel):
116 def __init__(self, in_, out, channel):
117 self.in_ = in_
117 self.in_ = in_
118 self.out = out
118 self.out = out
119 self.channel = channel
119 self.channel = channel
120
120
121 @property
121 @property
122 def name(self):
122 def name(self):
123 return b'<%c-channel>' % self.channel
123 return b'<%c-channel>' % self.channel
124
124
125 def read(self, size=-1):
125 def read(self, size=-1):
126 if size < 0:
126 if size < 0:
127 # if we need to consume all the clients input, ask for 4k chunks
127 # if we need to consume all the clients input, ask for 4k chunks
128 # so the pipe doesn't fill up risking a deadlock
128 # so the pipe doesn't fill up risking a deadlock
129 size = self.maxchunksize
129 size = self.maxchunksize
130 s = self._read(size, self.channel)
130 s = self._read(size, self.channel)
131 buf = s
131 buf = s
132 while s:
132 while s:
133 s = self._read(size, self.channel)
133 s = self._read(size, self.channel)
134 buf += s
134 buf += s
135
135
136 return buf
136 return buf
137 else:
137 else:
138 return self._read(size, self.channel)
138 return self._read(size, self.channel)
139
139
140 def _read(self, size, channel):
140 def _read(self, size, channel):
141 if not size:
141 if not size:
142 return b''
142 return b''
143 assert size > 0
143 assert size > 0
144
144
145 # tell the client we need at most size bytes
145 # tell the client we need at most size bytes
146 self.out.write(struct.pack(b'>cI', channel, size))
146 self.out.write(struct.pack(b'>cI', channel, size))
147 self.out.flush()
147 self.out.flush()
148
148
149 length = self.in_.read(4)
149 length = self.in_.read(4)
150 length = struct.unpack(b'>I', length)[0]
150 length = struct.unpack(b'>I', length)[0]
151 if not length:
151 if not length:
152 return b''
152 return b''
153 else:
153 else:
154 return self.in_.read(length)
154 return self.in_.read(length)
155
155
156 def readline(self, size=-1):
156 def readline(self, size=-1):
157 if size < 0:
157 if size < 0:
158 size = self.maxchunksize
158 size = self.maxchunksize
159 s = self._read(size, b'L')
159 s = self._read(size, b'L')
160 buf = s
160 buf = s
161 # keep asking for more until there's either no more or
161 # keep asking for more until there's either no more or
162 # we got a full line
162 # we got a full line
163 while s and not s.endswith(b'\n'):
163 while s and not s.endswith(b'\n'):
164 s = self._read(size, b'L')
164 s = self._read(size, b'L')
165 buf += s
165 buf += s
166
166
167 return buf
167 return buf
168 else:
168 else:
169 return self._read(size, b'L')
169 return self._read(size, b'L')
170
170
171 def __iter__(self):
171 def __iter__(self):
172 return self
172 return self
173
173
174 def next(self):
174 def next(self):
175 l = self.readline()
175 l = self.readline()
176 if not l:
176 if not l:
177 raise StopIteration
177 raise StopIteration
178 return l
178 return l
179
179
180 __next__ = next
180 __next__ = next
181
181
182 def __getattr__(self, attr):
182 def __getattr__(self, attr):
183 if attr in ('isatty', 'fileno', 'tell', 'seek'):
183 if attr in ('isatty', 'fileno', 'tell', 'seek'):
184 raise AttributeError(attr)
184 raise AttributeError(attr)
185 return getattr(self.in_, attr)
185 return getattr(self.in_, attr)
186
186
187
187
188 _messageencoders = {
188 _messageencoders = {
189 b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
189 b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
190 }
190 }
191
191
192
192
193 def _selectmessageencoder(ui):
193 def _selectmessageencoder(ui):
194 encnames = ui.configlist(b'cmdserver', b'message-encodings')
194 encnames = ui.configlist(b'cmdserver', b'message-encodings')
195 for n in encnames:
195 for n in encnames:
196 f = _messageencoders.get(n)
196 f = _messageencoders.get(n)
197 if f:
197 if f:
198 return n, f
198 return n, f
199 raise error.Abort(
199 raise error.Abort(
200 b'no supported message encodings: %s' % b' '.join(encnames)
200 b'no supported message encodings: %s' % b' '.join(encnames)
201 )
201 )
202
202
203
203
204 class server(object):
204 class server(object):
205 """
205 """
206 Listens for commands on fin, runs them and writes the output on a channel
206 Listens for commands on fin, runs them and writes the output on a channel
207 based stream to fout.
207 based stream to fout.
208 """
208 """
209
209
210 def __init__(self, ui, repo, fin, fout, prereposetups=None):
210 def __init__(self, ui, repo, fin, fout, prereposetups=None):
211 self.cwd = encoding.getcwd()
211 self.cwd = encoding.getcwd()
212
212
213 if repo:
213 if repo:
214 # the ui here is really the repo ui so take its baseui so we don't
214 # the ui here is really the repo ui so take its baseui so we don't
215 # end up with its local configuration
215 # end up with its local configuration
216 self.ui = repo.baseui
216 self.ui = repo.baseui
217 self.repo = repo
217 self.repo = repo
218 self.repoui = repo.ui
218 self.repoui = repo.ui
219 else:
219 else:
220 self.ui = ui
220 self.ui = ui
221 self.repo = self.repoui = None
221 self.repo = self.repoui = None
222 self._prereposetups = prereposetups
222 self._prereposetups = prereposetups
223
223
224 self.cdebug = channeledoutput(fout, b'd')
224 self.cdebug = channeledoutput(fout, b'd')
225 self.cerr = channeledoutput(fout, b'e')
225 self.cerr = channeledoutput(fout, b'e')
226 self.cout = channeledoutput(fout, b'o')
226 self.cout = channeledoutput(fout, b'o')
227 self.cin = channeledinput(fin, fout, b'I')
227 self.cin = channeledinput(fin, fout, b'I')
228 self.cresult = channeledoutput(fout, b'r')
228 self.cresult = channeledoutput(fout, b'r')
229
229
230 if self.ui.config(b'cmdserver', b'log') == b'-':
230 if self.ui.config(b'cmdserver', b'log') == b'-':
231 # switch log stream of server's ui to the 'd' (debug) channel
231 # switch log stream of server's ui to the 'd' (debug) channel
232 # (don't touch repo.ui as its lifetime is longer than the server)
232 # (don't touch repo.ui as its lifetime is longer than the server)
233 self.ui = self.ui.copy()
233 self.ui = self.ui.copy()
234 setuplogging(self.ui, repo=None, fp=self.cdebug)
234 setuplogging(self.ui, repo=None, fp=self.cdebug)
235
235
236 self.cmsg = None
236 self.cmsg = None
237 if ui.config(b'ui', b'message-output') == b'channel':
237 if ui.config(b'ui', b'message-output') == b'channel':
238 encname, encfn = _selectmessageencoder(ui)
238 encname, encfn = _selectmessageencoder(ui)
239 self.cmsg = channeledmessage(fout, b'm', encname, encfn)
239 self.cmsg = channeledmessage(fout, b'm', encname, encfn)
240
240
241 self.client = fin
241 self.client = fin
242
242
243 # If shutdown-on-interrupt is off, the default SIGINT handler is
243 # If shutdown-on-interrupt is off, the default SIGINT handler is
244 # removed so that client-server communication wouldn't be interrupted.
244 # removed so that client-server communication wouldn't be interrupted.
245 # For example, 'runcommand' handler will issue three short read()s.
245 # For example, 'runcommand' handler will issue three short read()s.
246 # If one of the first two read()s were interrupted, the communication
246 # If one of the first two read()s were interrupted, the communication
247 # channel would be left at dirty state and the subsequent request
247 # channel would be left at dirty state and the subsequent request
248 # wouldn't be parsed. So catching KeyboardInterrupt isn't enough.
248 # wouldn't be parsed. So catching KeyboardInterrupt isn't enough.
249 self._shutdown_on_interrupt = ui.configbool(
249 self._shutdown_on_interrupt = ui.configbool(
250 b'cmdserver', b'shutdown-on-interrupt'
250 b'cmdserver', b'shutdown-on-interrupt'
251 )
251 )
252 self._old_inthandler = None
252 self._old_inthandler = None
253 if not self._shutdown_on_interrupt:
253 if not self._shutdown_on_interrupt:
254 self._old_inthandler = signal.signal(signal.SIGINT, signal.SIG_IGN)
254 self._old_inthandler = signal.signal(signal.SIGINT, signal.SIG_IGN)
255
255
256 def cleanup(self):
256 def cleanup(self):
257 """release and restore resources taken during server session"""
257 """release and restore resources taken during server session"""
258 if not self._shutdown_on_interrupt:
258 if not self._shutdown_on_interrupt:
259 signal.signal(signal.SIGINT, self._old_inthandler)
259 signal.signal(signal.SIGINT, self._old_inthandler)
260
260
261 def _read(self, size):
261 def _read(self, size):
262 if not size:
262 if not size:
263 return b''
263 return b''
264
264
265 data = self.client.read(size)
265 data = self.client.read(size)
266
266
267 # is the other end closed?
267 # is the other end closed?
268 if not data:
268 if not data:
269 raise EOFError
269 raise EOFError
270
270
271 return data
271 return data
272
272
273 def _readstr(self):
273 def _readstr(self):
274 """read a string from the channel
274 """read a string from the channel
275
275
276 format:
276 format:
277 data length (uint32), data
277 data length (uint32), data
278 """
278 """
279 length = struct.unpack(b'>I', self._read(4))[0]
279 length = struct.unpack(b'>I', self._read(4))[0]
280 if not length:
280 if not length:
281 return b''
281 return b''
282 return self._read(length)
282 return self._read(length)
283
283
284 def _readlist(self):
284 def _readlist(self):
285 """read a list of NULL separated strings from the channel"""
285 """read a list of NULL separated strings from the channel"""
286 s = self._readstr()
286 s = self._readstr()
287 if s:
287 if s:
288 return s.split(b'\0')
288 return s.split(b'\0')
289 else:
289 else:
290 return []
290 return []
291
291
292 def _dispatchcommand(self, req):
292 def _dispatchcommand(self, req):
293 from . import dispatch # avoid cycle
293 from . import dispatch # avoid cycle
294
294
295 if self._shutdown_on_interrupt:
295 if self._shutdown_on_interrupt:
296 # no need to restore SIGINT handler as it is unmodified.
296 # no need to restore SIGINT handler as it is unmodified.
297 return dispatch.dispatch(req)
297 return dispatch.dispatch(req)
298
298
299 try:
299 try:
300 signal.signal(signal.SIGINT, self._old_inthandler)
300 signal.signal(signal.SIGINT, self._old_inthandler)
301 return dispatch.dispatch(req)
301 return dispatch.dispatch(req)
302 except error.SignalInterrupt:
302 except error.SignalInterrupt:
303 # propagate SIGBREAK, SIGHUP, or SIGTERM.
303 # propagate SIGBREAK, SIGHUP, or SIGTERM.
304 raise
304 raise
305 except KeyboardInterrupt:
305 except KeyboardInterrupt:
306 # SIGINT may be received out of the try-except block of dispatch(),
306 # SIGINT may be received out of the try-except block of dispatch(),
307 # so catch it as last ditch. Another KeyboardInterrupt may be
307 # so catch it as last ditch. Another KeyboardInterrupt may be
308 # raised while handling exceptions here, but there's no way to
308 # raised while handling exceptions here, but there's no way to
309 # avoid that except for doing everything in C.
309 # avoid that except for doing everything in C.
310 pass
310 pass
311 finally:
311 finally:
312 signal.signal(signal.SIGINT, signal.SIG_IGN)
312 signal.signal(signal.SIGINT, signal.SIG_IGN)
313 # On KeyboardInterrupt, print error message and exit *after* SIGINT
313 # On KeyboardInterrupt, print error message and exit *after* SIGINT
314 # handler removed.
314 # handler removed.
315 req.ui.error(_(b'interrupted!\n'))
315 req.ui.error(_(b'interrupted!\n'))
316 return -1
316 return -1
317
317
318 def runcommand(self):
318 def runcommand(self):
319 """ reads a list of \0 terminated arguments, executes
319 """ reads a list of \0 terminated arguments, executes
320 and writes the return code to the result channel """
320 and writes the return code to the result channel """
321 from . import dispatch # avoid cycle
321 from . import dispatch # avoid cycle
322
322
323 args = self._readlist()
323 args = self._readlist()
324
324
325 # copy the uis so changes (e.g. --config or --verbose) don't
325 # copy the uis so changes (e.g. --config or --verbose) don't
326 # persist between requests
326 # persist between requests
327 copiedui = self.ui.copy()
327 copiedui = self.ui.copy()
328 uis = [copiedui]
328 uis = [copiedui]
329 if self.repo:
329 if self.repo:
330 self.repo.baseui = copiedui
330 self.repo.baseui = copiedui
331 # clone ui without using ui.copy because this is protected
331 # clone ui without using ui.copy because this is protected
332 repoui = self.repoui.__class__(self.repoui)
332 repoui = self.repoui.__class__(self.repoui)
333 repoui.copy = copiedui.copy # redo copy protection
333 repoui.copy = copiedui.copy # redo copy protection
334 uis.append(repoui)
334 uis.append(repoui)
335 self.repo.ui = self.repo.dirstate._ui = repoui
335 self.repo.ui = self.repo.dirstate._ui = repoui
336 self.repo.invalidateall()
336 self.repo.invalidateall()
337
337
338 for ui in uis:
338 for ui in uis:
339 ui.resetstate()
339 ui.resetstate()
340 # any kind of interaction must use server channels, but chg may
340 # any kind of interaction must use server channels, but chg may
341 # replace channels by fully functional tty files. so nontty is
341 # replace channels by fully functional tty files. so nontty is
342 # enforced only if cin is a channel.
342 # enforced only if cin is a channel.
343 if not util.safehasattr(self.cin, b'fileno'):
343 if not util.safehasattr(self.cin, b'fileno'):
344 ui.setconfig(b'ui', b'nontty', b'true', b'commandserver')
344 ui.setconfig(b'ui', b'nontty', b'true', b'commandserver')
345
345
346 req = dispatch.request(
346 req = dispatch.request(
347 args[:],
347 args[:],
348 copiedui,
348 copiedui,
349 self.repo,
349 self.repo,
350 self.cin,
350 self.cin,
351 self.cout,
351 self.cout,
352 self.cerr,
352 self.cerr,
353 self.cmsg,
353 self.cmsg,
354 prereposetups=self._prereposetups,
354 prereposetups=self._prereposetups,
355 )
355 )
356
356
357 try:
357 try:
358 ret = self._dispatchcommand(req) & 255
358 ret = self._dispatchcommand(req) & 255
359 # If shutdown-on-interrupt is off, it's important to write the
359 # If shutdown-on-interrupt is off, it's important to write the
360 # result code *after* SIGINT handler removed. If the result code
360 # result code *after* SIGINT handler removed. If the result code
361 # were lost, the client wouldn't be able to continue processing.
361 # were lost, the client wouldn't be able to continue processing.
362 self.cresult.write(struct.pack(b'>i', int(ret)))
362 self.cresult.write(struct.pack(b'>i', int(ret)))
363 finally:
363 finally:
364 # restore old cwd
364 # restore old cwd
365 if b'--cwd' in args:
365 if b'--cwd' in args:
366 os.chdir(self.cwd)
366 os.chdir(self.cwd)
367
367
368 def getencoding(self):
368 def getencoding(self):
369 """ writes the current encoding to the result channel """
369 """ writes the current encoding to the result channel """
370 self.cresult.write(encoding.encoding)
370 self.cresult.write(encoding.encoding)
371
371
372 def serveone(self):
372 def serveone(self):
373 cmd = self.client.readline()[:-1]
373 cmd = self.client.readline()[:-1]
374 if cmd:
374 if cmd:
375 handler = self.capabilities.get(cmd)
375 handler = self.capabilities.get(cmd)
376 if handler:
376 if handler:
377 handler(self)
377 handler(self)
378 else:
378 else:
379 # clients are expected to check what commands are supported by
379 # clients are expected to check what commands are supported by
380 # looking at the servers capabilities
380 # looking at the servers capabilities
381 raise error.Abort(_(b'unknown command %s') % cmd)
381 raise error.Abort(_(b'unknown command %s') % cmd)
382
382
383 return cmd != b''
383 return cmd != b''
384
384
385 capabilities = {b'runcommand': runcommand, b'getencoding': getencoding}
385 capabilities = {b'runcommand': runcommand, b'getencoding': getencoding}
386
386
387 def serve(self):
387 def serve(self):
388 hellomsg = b'capabilities: ' + b' '.join(sorted(self.capabilities))
388 hellomsg = b'capabilities: ' + b' '.join(sorted(self.capabilities))
389 hellomsg += b'\n'
389 hellomsg += b'\n'
390 hellomsg += b'encoding: ' + encoding.encoding
390 hellomsg += b'encoding: ' + encoding.encoding
391 hellomsg += b'\n'
391 hellomsg += b'\n'
392 if self.cmsg:
392 if self.cmsg:
393 hellomsg += b'message-encoding: %s\n' % self.cmsg.encoding
393 hellomsg += b'message-encoding: %s\n' % self.cmsg.encoding
394 hellomsg += b'pid: %d' % procutil.getpid()
394 hellomsg += b'pid: %d' % procutil.getpid()
395 if util.safehasattr(os, b'getpgid'):
395 if util.safehasattr(os, b'getpgid'):
396 hellomsg += b'\n'
396 hellomsg += b'\n'
397 hellomsg += b'pgid: %d' % os.getpgid(0)
397 hellomsg += b'pgid: %d' % os.getpgid(0)
398
398
399 # write the hello msg in -one- chunk
399 # write the hello msg in -one- chunk
400 self.cout.write(hellomsg)
400 self.cout.write(hellomsg)
401
401
402 try:
402 try:
403 while self.serveone():
403 while self.serveone():
404 pass
404 pass
405 except EOFError:
405 except EOFError:
406 # we'll get here if the client disconnected while we were reading
406 # we'll get here if the client disconnected while we were reading
407 # its request
407 # its request
408 return 1
408 return 1
409
409
410 return 0
410 return 0
411
411
412
412
413 def setuplogging(ui, repo=None, fp=None):
413 def setuplogging(ui, repo=None, fp=None):
414 """Set up server logging facility
414 """Set up server logging facility
415
415
416 If cmdserver.log is '-', log messages will be sent to the given fp.
416 If cmdserver.log is '-', log messages will be sent to the given fp.
417 It should be the 'd' channel while a client is connected, and otherwise
417 It should be the 'd' channel while a client is connected, and otherwise
418 is the stderr of the server process.
418 is the stderr of the server process.
419 """
419 """
420 # developer config: cmdserver.log
420 # developer config: cmdserver.log
421 logpath = ui.config(b'cmdserver', b'log')
421 logpath = ui.config(b'cmdserver', b'log')
422 if not logpath:
422 if not logpath:
423 return
423 return
424 # developer config: cmdserver.track-log
424 # developer config: cmdserver.track-log
425 tracked = set(ui.configlist(b'cmdserver', b'track-log'))
425 tracked = set(ui.configlist(b'cmdserver', b'track-log'))
426
426
427 if logpath == b'-' and fp:
427 if logpath == b'-' and fp:
428 logger = loggingutil.fileobjectlogger(fp, tracked)
428 logger = loggingutil.fileobjectlogger(fp, tracked)
429 elif logpath == b'-':
429 elif logpath == b'-':
430 logger = loggingutil.fileobjectlogger(ui.ferr, tracked)
430 logger = loggingutil.fileobjectlogger(ui.ferr, tracked)
431 else:
431 else:
432 logpath = os.path.abspath(util.expandpath(logpath))
432 logpath = os.path.abspath(util.expandpath(logpath))
433 # developer config: cmdserver.max-log-files
433 # developer config: cmdserver.max-log-files
434 maxfiles = ui.configint(b'cmdserver', b'max-log-files')
434 maxfiles = ui.configint(b'cmdserver', b'max-log-files')
435 # developer config: cmdserver.max-log-size
435 # developer config: cmdserver.max-log-size
436 maxsize = ui.configbytes(b'cmdserver', b'max-log-size')
436 maxsize = ui.configbytes(b'cmdserver', b'max-log-size')
437 vfs = vfsmod.vfs(os.path.dirname(logpath))
437 vfs = vfsmod.vfs(os.path.dirname(logpath))
438 logger = loggingutil.filelogger(
438 logger = loggingutil.filelogger(
439 vfs,
439 vfs,
440 os.path.basename(logpath),
440 os.path.basename(logpath),
441 tracked,
441 tracked,
442 maxfiles=maxfiles,
442 maxfiles=maxfiles,
443 maxsize=maxsize,
443 maxsize=maxsize,
444 )
444 )
445
445
446 targetuis = {ui}
446 targetuis = {ui}
447 if repo:
447 if repo:
448 targetuis.add(repo.baseui)
448 targetuis.add(repo.baseui)
449 targetuis.add(repo.ui)
449 targetuis.add(repo.ui)
450 for u in targetuis:
450 for u in targetuis:
451 u.setlogger(b'cmdserver', logger)
451 u.setlogger(b'cmdserver', logger)
452
452
453
453
454 class pipeservice(object):
454 class pipeservice(object):
455 def __init__(self, ui, repo, opts):
455 def __init__(self, ui, repo, opts):
456 self.ui = ui
456 self.ui = ui
457 self.repo = repo
457 self.repo = repo
458
458
459 def init(self):
459 def init(self):
460 pass
460 pass
461
461
462 def run(self):
462 def run(self):
463 ui = self.ui
463 ui = self.ui
464 # redirect stdio to null device so that broken extensions or in-process
464 # redirect stdio to null device so that broken extensions or in-process
465 # hooks will never cause corruption of channel protocol.
465 # hooks will never cause corruption of channel protocol.
466 with ui.protectedfinout() as (fin, fout):
466 with ui.protectedfinout() as (fin, fout):
467 sv = server(ui, self.repo, fin, fout)
467 sv = server(ui, self.repo, fin, fout)
468 try:
468 try:
469 return sv.serve()
469 return sv.serve()
470 finally:
470 finally:
471 sv.cleanup()
471 sv.cleanup()
472
472
473
473
474 def _initworkerprocess():
474 def _initworkerprocess():
475 # use a different process group from the master process, in order to:
475 # use a different process group from the master process, in order to:
476 # 1. make the current process group no longer "orphaned" (because the
476 # 1. make the current process group no longer "orphaned" (because the
477 # parent of this process is in a different process group while
477 # parent of this process is in a different process group while
478 # remains in a same session)
478 # remains in a same session)
479 # according to POSIX 2.2.2.52, orphaned process group will ignore
479 # according to POSIX 2.2.2.52, orphaned process group will ignore
480 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
480 # terminal-generated stop signals like SIGTSTP (Ctrl+Z), which will
481 # cause trouble for things like ncurses.
481 # cause trouble for things like ncurses.
482 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
482 # 2. the client can use kill(-pgid, sig) to simulate terminal-generated
483 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
483 # SIGINT (Ctrl+C) and process-exit-generated SIGHUP. our child
484 # processes like ssh will be killed properly, without affecting
484 # processes like ssh will be killed properly, without affecting
485 # unrelated processes.
485 # unrelated processes.
486 os.setpgid(0, 0)
486 os.setpgid(0, 0)
487 # change random state otherwise forked request handlers would have a
487 # change random state otherwise forked request handlers would have a
488 # same state inherited from parent.
488 # same state inherited from parent.
489 random.seed()
489 random.seed()
490
490
491
491
492 def _serverequest(ui, repo, conn, createcmdserver, prereposetups):
492 def _serverequest(ui, repo, conn, createcmdserver, prereposetups):
493 fin = conn.makefile('rb')
493 fin = conn.makefile('rb')
494 fout = conn.makefile('wb')
494 fout = conn.makefile('wb')
495 sv = None
495 sv = None
496 try:
496 try:
497 sv = createcmdserver(repo, conn, fin, fout, prereposetups)
497 sv = createcmdserver(repo, conn, fin, fout, prereposetups)
498 try:
498 try:
499 sv.serve()
499 sv.serve()
500 # handle exceptions that may be raised by command server. most of
500 # handle exceptions that may be raised by command server. most of
501 # known exceptions are caught by dispatch.
501 # known exceptions are caught by dispatch.
502 except error.Abort as inst:
502 except error.Abort as inst:
503 ui.error(_(b'abort: %s\n') % inst)
503 ui.error(_(b'abort: %s\n') % inst.message)
504 except IOError as inst:
504 except IOError as inst:
505 if inst.errno != errno.EPIPE:
505 if inst.errno != errno.EPIPE:
506 raise
506 raise
507 except KeyboardInterrupt:
507 except KeyboardInterrupt:
508 pass
508 pass
509 finally:
509 finally:
510 sv.cleanup()
510 sv.cleanup()
511 except: # re-raises
511 except: # re-raises
512 # also write traceback to error channel. otherwise client cannot
512 # also write traceback to error channel. otherwise client cannot
513 # see it because it is written to server's stderr by default.
513 # see it because it is written to server's stderr by default.
514 if sv:
514 if sv:
515 cerr = sv.cerr
515 cerr = sv.cerr
516 else:
516 else:
517 cerr = channeledoutput(fout, b'e')
517 cerr = channeledoutput(fout, b'e')
518 cerr.write(encoding.strtolocal(traceback.format_exc()))
518 cerr.write(encoding.strtolocal(traceback.format_exc()))
519 raise
519 raise
520 finally:
520 finally:
521 fin.close()
521 fin.close()
522 try:
522 try:
523 fout.close() # implicit flush() may cause another EPIPE
523 fout.close() # implicit flush() may cause another EPIPE
524 except IOError as inst:
524 except IOError as inst:
525 if inst.errno != errno.EPIPE:
525 if inst.errno != errno.EPIPE:
526 raise
526 raise
527
527
528
528
529 class unixservicehandler(object):
529 class unixservicehandler(object):
530 """Set of pluggable operations for unix-mode services
530 """Set of pluggable operations for unix-mode services
531
531
532 Almost all methods except for createcmdserver() are called in the main
532 Almost all methods except for createcmdserver() are called in the main
533 process. You can't pass mutable resource back from createcmdserver().
533 process. You can't pass mutable resource back from createcmdserver().
534 """
534 """
535
535
536 pollinterval = None
536 pollinterval = None
537
537
538 def __init__(self, ui):
538 def __init__(self, ui):
539 self.ui = ui
539 self.ui = ui
540
540
541 def bindsocket(self, sock, address):
541 def bindsocket(self, sock, address):
542 util.bindunixsocket(sock, address)
542 util.bindunixsocket(sock, address)
543 sock.listen(socket.SOMAXCONN)
543 sock.listen(socket.SOMAXCONN)
544 self.ui.status(_(b'listening at %s\n') % address)
544 self.ui.status(_(b'listening at %s\n') % address)
545 self.ui.flush() # avoid buffering of status message
545 self.ui.flush() # avoid buffering of status message
546
546
547 def unlinksocket(self, address):
547 def unlinksocket(self, address):
548 os.unlink(address)
548 os.unlink(address)
549
549
550 def shouldexit(self):
550 def shouldexit(self):
551 """True if server should shut down; checked per pollinterval"""
551 """True if server should shut down; checked per pollinterval"""
552 return False
552 return False
553
553
554 def newconnection(self):
554 def newconnection(self):
555 """Called when main process notices new connection"""
555 """Called when main process notices new connection"""
556
556
557 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
557 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
558 """Create new command server instance; called in the process that
558 """Create new command server instance; called in the process that
559 serves for the current connection"""
559 serves for the current connection"""
560 return server(self.ui, repo, fin, fout, prereposetups)
560 return server(self.ui, repo, fin, fout, prereposetups)
561
561
562
562
563 class unixforkingservice(object):
563 class unixforkingservice(object):
564 """
564 """
565 Listens on unix domain socket and forks server per connection
565 Listens on unix domain socket and forks server per connection
566 """
566 """
567
567
568 def __init__(self, ui, repo, opts, handler=None):
568 def __init__(self, ui, repo, opts, handler=None):
569 self.ui = ui
569 self.ui = ui
570 self.repo = repo
570 self.repo = repo
571 self.address = opts[b'address']
571 self.address = opts[b'address']
572 if not util.safehasattr(socket, b'AF_UNIX'):
572 if not util.safehasattr(socket, b'AF_UNIX'):
573 raise error.Abort(_(b'unsupported platform'))
573 raise error.Abort(_(b'unsupported platform'))
574 if not self.address:
574 if not self.address:
575 raise error.Abort(_(b'no socket path specified with --address'))
575 raise error.Abort(_(b'no socket path specified with --address'))
576 self._servicehandler = handler or unixservicehandler(ui)
576 self._servicehandler = handler or unixservicehandler(ui)
577 self._sock = None
577 self._sock = None
578 self._mainipc = None
578 self._mainipc = None
579 self._workeripc = None
579 self._workeripc = None
580 self._oldsigchldhandler = None
580 self._oldsigchldhandler = None
581 self._workerpids = set() # updated by signal handler; do not iterate
581 self._workerpids = set() # updated by signal handler; do not iterate
582 self._socketunlinked = None
582 self._socketunlinked = None
583 # experimental config: cmdserver.max-repo-cache
583 # experimental config: cmdserver.max-repo-cache
584 maxlen = ui.configint(b'cmdserver', b'max-repo-cache')
584 maxlen = ui.configint(b'cmdserver', b'max-repo-cache')
585 if maxlen < 0:
585 if maxlen < 0:
586 raise error.Abort(_(b'negative max-repo-cache size not allowed'))
586 raise error.Abort(_(b'negative max-repo-cache size not allowed'))
587 self._repoloader = repocache.repoloader(ui, maxlen)
587 self._repoloader = repocache.repoloader(ui, maxlen)
588 # attempt to avoid crash in CoreFoundation when using chg after fix in
588 # attempt to avoid crash in CoreFoundation when using chg after fix in
589 # a89381e04c58
589 # a89381e04c58
590 if pycompat.isdarwin:
590 if pycompat.isdarwin:
591 procutil.gui()
591 procutil.gui()
592
592
593 def init(self):
593 def init(self):
594 self._sock = socket.socket(socket.AF_UNIX)
594 self._sock = socket.socket(socket.AF_UNIX)
595 # IPC channel from many workers to one main process; this is actually
595 # IPC channel from many workers to one main process; this is actually
596 # a uni-directional pipe, but is backed by a DGRAM socket so each
596 # a uni-directional pipe, but is backed by a DGRAM socket so each
597 # message can be easily separated.
597 # message can be easily separated.
598 o = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
598 o = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
599 self._mainipc, self._workeripc = o
599 self._mainipc, self._workeripc = o
600 self._servicehandler.bindsocket(self._sock, self.address)
600 self._servicehandler.bindsocket(self._sock, self.address)
601 if util.safehasattr(procutil, b'unblocksignal'):
601 if util.safehasattr(procutil, b'unblocksignal'):
602 procutil.unblocksignal(signal.SIGCHLD)
602 procutil.unblocksignal(signal.SIGCHLD)
603 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
603 o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
604 self._oldsigchldhandler = o
604 self._oldsigchldhandler = o
605 self._socketunlinked = False
605 self._socketunlinked = False
606 self._repoloader.start()
606 self._repoloader.start()
607
607
608 def _unlinksocket(self):
608 def _unlinksocket(self):
609 if not self._socketunlinked:
609 if not self._socketunlinked:
610 self._servicehandler.unlinksocket(self.address)
610 self._servicehandler.unlinksocket(self.address)
611 self._socketunlinked = True
611 self._socketunlinked = True
612
612
613 def _cleanup(self):
613 def _cleanup(self):
614 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
614 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
615 self._sock.close()
615 self._sock.close()
616 self._mainipc.close()
616 self._mainipc.close()
617 self._workeripc.close()
617 self._workeripc.close()
618 self._unlinksocket()
618 self._unlinksocket()
619 self._repoloader.stop()
619 self._repoloader.stop()
620 # don't kill child processes as they have active clients, just wait
620 # don't kill child processes as they have active clients, just wait
621 self._reapworkers(0)
621 self._reapworkers(0)
622
622
623 def run(self):
623 def run(self):
624 try:
624 try:
625 self._mainloop()
625 self._mainloop()
626 finally:
626 finally:
627 self._cleanup()
627 self._cleanup()
628
628
629 def _mainloop(self):
629 def _mainloop(self):
630 exiting = False
630 exiting = False
631 h = self._servicehandler
631 h = self._servicehandler
632 selector = selectors.DefaultSelector()
632 selector = selectors.DefaultSelector()
633 selector.register(
633 selector.register(
634 self._sock, selectors.EVENT_READ, self._acceptnewconnection
634 self._sock, selectors.EVENT_READ, self._acceptnewconnection
635 )
635 )
636 selector.register(
636 selector.register(
637 self._mainipc, selectors.EVENT_READ, self._handlemainipc
637 self._mainipc, selectors.EVENT_READ, self._handlemainipc
638 )
638 )
639 while True:
639 while True:
640 if not exiting and h.shouldexit():
640 if not exiting and h.shouldexit():
641 # clients can no longer connect() to the domain socket, so
641 # clients can no longer connect() to the domain socket, so
642 # we stop queuing new requests.
642 # we stop queuing new requests.
643 # for requests that are queued (connect()-ed, but haven't been
643 # for requests that are queued (connect()-ed, but haven't been
644 # accept()-ed), handle them before exit. otherwise, clients
644 # accept()-ed), handle them before exit. otherwise, clients
645 # waiting for recv() will receive ECONNRESET.
645 # waiting for recv() will receive ECONNRESET.
646 self._unlinksocket()
646 self._unlinksocket()
647 exiting = True
647 exiting = True
648 try:
648 try:
649 events = selector.select(timeout=h.pollinterval)
649 events = selector.select(timeout=h.pollinterval)
650 except OSError as inst:
650 except OSError as inst:
651 # selectors2 raises ETIMEDOUT if timeout exceeded while
651 # selectors2 raises ETIMEDOUT if timeout exceeded while
652 # handling signal interrupt. That's probably wrong, but
652 # handling signal interrupt. That's probably wrong, but
653 # we can easily get around it.
653 # we can easily get around it.
654 if inst.errno != errno.ETIMEDOUT:
654 if inst.errno != errno.ETIMEDOUT:
655 raise
655 raise
656 events = []
656 events = []
657 if not events:
657 if not events:
658 # only exit if we completed all queued requests
658 # only exit if we completed all queued requests
659 if exiting:
659 if exiting:
660 break
660 break
661 continue
661 continue
662 for key, _mask in events:
662 for key, _mask in events:
663 key.data(key.fileobj, selector)
663 key.data(key.fileobj, selector)
664 selector.close()
664 selector.close()
665
665
666 def _acceptnewconnection(self, sock, selector):
666 def _acceptnewconnection(self, sock, selector):
667 h = self._servicehandler
667 h = self._servicehandler
668 try:
668 try:
669 conn, _addr = sock.accept()
669 conn, _addr = sock.accept()
670 except socket.error as inst:
670 except socket.error as inst:
671 if inst.args[0] == errno.EINTR:
671 if inst.args[0] == errno.EINTR:
672 return
672 return
673 raise
673 raise
674
674
675 # Future improvement: On Python 3.7, maybe gc.freeze() can be used
675 # Future improvement: On Python 3.7, maybe gc.freeze() can be used
676 # to prevent COW memory from being touched by GC.
676 # to prevent COW memory from being touched by GC.
677 # https://instagram-engineering.com/
677 # https://instagram-engineering.com/
678 # copy-on-write-friendly-python-garbage-collection-ad6ed5233ddf
678 # copy-on-write-friendly-python-garbage-collection-ad6ed5233ddf
679 pid = os.fork()
679 pid = os.fork()
680 if pid:
680 if pid:
681 try:
681 try:
682 self.ui.log(
682 self.ui.log(
683 b'cmdserver', b'forked worker process (pid=%d)\n', pid
683 b'cmdserver', b'forked worker process (pid=%d)\n', pid
684 )
684 )
685 self._workerpids.add(pid)
685 self._workerpids.add(pid)
686 h.newconnection()
686 h.newconnection()
687 finally:
687 finally:
688 conn.close() # release handle in parent process
688 conn.close() # release handle in parent process
689 else:
689 else:
690 try:
690 try:
691 selector.close()
691 selector.close()
692 sock.close()
692 sock.close()
693 self._mainipc.close()
693 self._mainipc.close()
694 self._runworker(conn)
694 self._runworker(conn)
695 conn.close()
695 conn.close()
696 self._workeripc.close()
696 self._workeripc.close()
697 os._exit(0)
697 os._exit(0)
698 except: # never return, hence no re-raises
698 except: # never return, hence no re-raises
699 try:
699 try:
700 self.ui.traceback(force=True)
700 self.ui.traceback(force=True)
701 finally:
701 finally:
702 os._exit(255)
702 os._exit(255)
703
703
704 def _handlemainipc(self, sock, selector):
704 def _handlemainipc(self, sock, selector):
705 """Process messages sent from a worker"""
705 """Process messages sent from a worker"""
706 try:
706 try:
707 path = sock.recv(32768) # large enough to receive path
707 path = sock.recv(32768) # large enough to receive path
708 except socket.error as inst:
708 except socket.error as inst:
709 if inst.args[0] == errno.EINTR:
709 if inst.args[0] == errno.EINTR:
710 return
710 return
711 raise
711 raise
712 self._repoloader.load(path)
712 self._repoloader.load(path)
713
713
714 def _sigchldhandler(self, signal, frame):
714 def _sigchldhandler(self, signal, frame):
715 self._reapworkers(os.WNOHANG)
715 self._reapworkers(os.WNOHANG)
716
716
717 def _reapworkers(self, options):
717 def _reapworkers(self, options):
718 while self._workerpids:
718 while self._workerpids:
719 try:
719 try:
720 pid, _status = os.waitpid(-1, options)
720 pid, _status = os.waitpid(-1, options)
721 except OSError as inst:
721 except OSError as inst:
722 if inst.errno == errno.EINTR:
722 if inst.errno == errno.EINTR:
723 continue
723 continue
724 if inst.errno != errno.ECHILD:
724 if inst.errno != errno.ECHILD:
725 raise
725 raise
726 # no child processes at all (reaped by other waitpid()?)
726 # no child processes at all (reaped by other waitpid()?)
727 self._workerpids.clear()
727 self._workerpids.clear()
728 return
728 return
729 if pid == 0:
729 if pid == 0:
730 # no waitable child processes
730 # no waitable child processes
731 return
731 return
732 self.ui.log(b'cmdserver', b'worker process exited (pid=%d)\n', pid)
732 self.ui.log(b'cmdserver', b'worker process exited (pid=%d)\n', pid)
733 self._workerpids.discard(pid)
733 self._workerpids.discard(pid)
734
734
735 def _runworker(self, conn):
735 def _runworker(self, conn):
736 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
736 signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
737 _initworkerprocess()
737 _initworkerprocess()
738 h = self._servicehandler
738 h = self._servicehandler
739 try:
739 try:
740 _serverequest(
740 _serverequest(
741 self.ui,
741 self.ui,
742 self.repo,
742 self.repo,
743 conn,
743 conn,
744 h.createcmdserver,
744 h.createcmdserver,
745 prereposetups=[self._reposetup],
745 prereposetups=[self._reposetup],
746 )
746 )
747 finally:
747 finally:
748 gc.collect() # trigger __del__ since worker process uses os._exit
748 gc.collect() # trigger __del__ since worker process uses os._exit
749
749
750 def _reposetup(self, ui, repo):
750 def _reposetup(self, ui, repo):
751 if not repo.local():
751 if not repo.local():
752 return
752 return
753
753
754 class unixcmdserverrepo(repo.__class__):
754 class unixcmdserverrepo(repo.__class__):
755 def close(self):
755 def close(self):
756 super(unixcmdserverrepo, self).close()
756 super(unixcmdserverrepo, self).close()
757 try:
757 try:
758 self._cmdserveripc.send(self.root)
758 self._cmdserveripc.send(self.root)
759 except socket.error:
759 except socket.error:
760 self.ui.log(
760 self.ui.log(
761 b'cmdserver', b'failed to send repo root to master\n'
761 b'cmdserver', b'failed to send repo root to master\n'
762 )
762 )
763
763
764 repo.__class__ = unixcmdserverrepo
764 repo.__class__ = unixcmdserverrepo
765 repo._cmdserveripc = self._workeripc
765 repo._cmdserveripc = self._workeripc
766
766
767 cachedrepo = self._repoloader.get(repo.root)
767 cachedrepo = self._repoloader.get(repo.root)
768 if cachedrepo is None:
768 if cachedrepo is None:
769 return
769 return
770 repo.ui.log(b'repocache', b'repo from cache: %s\n', repo.root)
770 repo.ui.log(b'repocache', b'repo from cache: %s\n', repo.root)
771 repocache.copycache(cachedrepo, repo)
771 repocache.copycache(cachedrepo, repo)
@@ -1,2034 +1,2034
1 # stuff related specifically to patch manipulation / parsing
1 # stuff related specifically to patch manipulation / parsing
2 #
2 #
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # This code is based on the Mark Edgington's crecord extension.
8 # This code is based on the Mark Edgington's crecord extension.
9 # (Itself based on Bryan O'Sullivan's record extension.)
9 # (Itself based on Bryan O'Sullivan's record extension.)
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import os
13 import os
14 import re
14 import re
15 import signal
15 import signal
16
16
17 from .i18n import _
17 from .i18n import _
18 from .pycompat import (
18 from .pycompat import (
19 getattr,
19 getattr,
20 open,
20 open,
21 )
21 )
22 from . import (
22 from . import (
23 diffhelper,
23 diffhelper,
24 encoding,
24 encoding,
25 error,
25 error,
26 patch as patchmod,
26 patch as patchmod,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 )
30 )
31 from .utils import stringutil
31 from .utils import stringutil
32
32
33 stringio = util.stringio
33 stringio = util.stringio
34
34
35 # patch comments based on the git one
35 # patch comments based on the git one
36 diffhelptext = _(
36 diffhelptext = _(
37 """# To remove '-' lines, make them ' ' lines (context).
37 """# To remove '-' lines, make them ' ' lines (context).
38 # To remove '+' lines, delete them.
38 # To remove '+' lines, delete them.
39 # Lines starting with # will be removed from the patch.
39 # Lines starting with # will be removed from the patch.
40 """
40 """
41 )
41 )
42
42
43 hunkhelptext = _(
43 hunkhelptext = _(
44 """#
44 """#
45 # If the patch applies cleanly, the edited hunk will immediately be
45 # If the patch applies cleanly, the edited hunk will immediately be
46 # added to the record list. If it does not apply cleanly, a rejects file
46 # added to the record list. If it does not apply cleanly, a rejects file
47 # will be generated. You can use that when you try again. If all lines
47 # will be generated. You can use that when you try again. If all lines
48 # of the hunk are removed, then the edit is aborted and the hunk is left
48 # of the hunk are removed, then the edit is aborted and the hunk is left
49 # unchanged.
49 # unchanged.
50 """
50 """
51 )
51 )
52
52
53 patchhelptext = _(
53 patchhelptext = _(
54 """#
54 """#
55 # If the patch applies cleanly, the edited patch will immediately
55 # If the patch applies cleanly, the edited patch will immediately
56 # be finalised. If it does not apply cleanly, rejects files will be
56 # be finalised. If it does not apply cleanly, rejects files will be
57 # generated. You can use those when you try again.
57 # generated. You can use those when you try again.
58 """
58 """
59 )
59 )
60
60
61 try:
61 try:
62 import curses
62 import curses
63 import curses.ascii
63 import curses.ascii
64
64
65 curses.error
65 curses.error
66 except (ImportError, AttributeError):
66 except (ImportError, AttributeError):
67 curses = False
67 curses = False
68
68
69
69
70 class fallbackerror(error.Abort):
70 class fallbackerror(error.Abort):
71 """Error that indicates the client should try to fallback to text mode."""
71 """Error that indicates the client should try to fallback to text mode."""
72
72
73 # Inherits from error.Abort so that existing behavior is preserved if the
73 # Inherits from error.Abort so that existing behavior is preserved if the
74 # calling code does not know how to fallback.
74 # calling code does not know how to fallback.
75
75
76
76
77 def checkcurses(ui):
77 def checkcurses(ui):
78 """Return True if the user wants to use curses
78 """Return True if the user wants to use curses
79
79
80 This method returns True if curses is found (and that python is built with
80 This method returns True if curses is found (and that python is built with
81 it) and that the user has the correct flag for the ui.
81 it) and that the user has the correct flag for the ui.
82 """
82 """
83 return curses and ui.interface(b"chunkselector") == b"curses"
83 return curses and ui.interface(b"chunkselector") == b"curses"
84
84
85
85
86 class patchnode(object):
86 class patchnode(object):
87 """abstract class for patch graph nodes
87 """abstract class for patch graph nodes
88 (i.e. patchroot, header, hunk, hunkline)
88 (i.e. patchroot, header, hunk, hunkline)
89 """
89 """
90
90
91 def firstchild(self):
91 def firstchild(self):
92 raise NotImplementedError(b"method must be implemented by subclass")
92 raise NotImplementedError(b"method must be implemented by subclass")
93
93
94 def lastchild(self):
94 def lastchild(self):
95 raise NotImplementedError(b"method must be implemented by subclass")
95 raise NotImplementedError(b"method must be implemented by subclass")
96
96
97 def allchildren(self):
97 def allchildren(self):
98 """Return a list of all of the direct children of this node"""
98 """Return a list of all of the direct children of this node"""
99 raise NotImplementedError(b"method must be implemented by subclass")
99 raise NotImplementedError(b"method must be implemented by subclass")
100
100
101 def nextsibling(self):
101 def nextsibling(self):
102 """
102 """
103 Return the closest next item of the same type where there are no items
103 Return the closest next item of the same type where there are no items
104 of different types between the current item and this closest item.
104 of different types between the current item and this closest item.
105 If no such item exists, return None.
105 If no such item exists, return None.
106 """
106 """
107 raise NotImplementedError(b"method must be implemented by subclass")
107 raise NotImplementedError(b"method must be implemented by subclass")
108
108
109 def prevsibling(self):
109 def prevsibling(self):
110 """
110 """
111 Return the closest previous item of the same type where there are no
111 Return the closest previous item of the same type where there are no
112 items of different types between the current item and this closest item.
112 items of different types between the current item and this closest item.
113 If no such item exists, return None.
113 If no such item exists, return None.
114 """
114 """
115 raise NotImplementedError(b"method must be implemented by subclass")
115 raise NotImplementedError(b"method must be implemented by subclass")
116
116
117 def parentitem(self):
117 def parentitem(self):
118 raise NotImplementedError(b"method must be implemented by subclass")
118 raise NotImplementedError(b"method must be implemented by subclass")
119
119
120 def nextitem(self, skipfolded=True):
120 def nextitem(self, skipfolded=True):
121 """
121 """
122 Try to return the next item closest to this item, regardless of item's
122 Try to return the next item closest to this item, regardless of item's
123 type (header, hunk, or hunkline).
123 type (header, hunk, or hunkline).
124
124
125 If skipfolded == True, and the current item is folded, then the child
125 If skipfolded == True, and the current item is folded, then the child
126 items that are hidden due to folding will be skipped when determining
126 items that are hidden due to folding will be skipped when determining
127 the next item.
127 the next item.
128
128
129 If it is not possible to get the next item, return None.
129 If it is not possible to get the next item, return None.
130 """
130 """
131 try:
131 try:
132 itemfolded = self.folded
132 itemfolded = self.folded
133 except AttributeError:
133 except AttributeError:
134 itemfolded = False
134 itemfolded = False
135 if skipfolded and itemfolded:
135 if skipfolded and itemfolded:
136 nextitem = self.nextsibling()
136 nextitem = self.nextsibling()
137 if nextitem is None:
137 if nextitem is None:
138 try:
138 try:
139 nextitem = self.parentitem().nextsibling()
139 nextitem = self.parentitem().nextsibling()
140 except AttributeError:
140 except AttributeError:
141 nextitem = None
141 nextitem = None
142 return nextitem
142 return nextitem
143 else:
143 else:
144 # try child
144 # try child
145 item = self.firstchild()
145 item = self.firstchild()
146 if item is not None:
146 if item is not None:
147 return item
147 return item
148
148
149 # else try next sibling
149 # else try next sibling
150 item = self.nextsibling()
150 item = self.nextsibling()
151 if item is not None:
151 if item is not None:
152 return item
152 return item
153
153
154 try:
154 try:
155 # else try parent's next sibling
155 # else try parent's next sibling
156 item = self.parentitem().nextsibling()
156 item = self.parentitem().nextsibling()
157 if item is not None:
157 if item is not None:
158 return item
158 return item
159
159
160 # else return grandparent's next sibling (or None)
160 # else return grandparent's next sibling (or None)
161 return self.parentitem().parentitem().nextsibling()
161 return self.parentitem().parentitem().nextsibling()
162
162
163 except AttributeError: # parent and/or grandparent was None
163 except AttributeError: # parent and/or grandparent was None
164 return None
164 return None
165
165
166 def previtem(self):
166 def previtem(self):
167 """
167 """
168 Try to return the previous item closest to this item, regardless of
168 Try to return the previous item closest to this item, regardless of
169 item's type (header, hunk, or hunkline).
169 item's type (header, hunk, or hunkline).
170
170
171 If it is not possible to get the previous item, return None.
171 If it is not possible to get the previous item, return None.
172 """
172 """
173 # try previous sibling's last child's last child,
173 # try previous sibling's last child's last child,
174 # else try previous sibling's last child, else try previous sibling
174 # else try previous sibling's last child, else try previous sibling
175 prevsibling = self.prevsibling()
175 prevsibling = self.prevsibling()
176 if prevsibling is not None:
176 if prevsibling is not None:
177 prevsiblinglastchild = prevsibling.lastchild()
177 prevsiblinglastchild = prevsibling.lastchild()
178 if (prevsiblinglastchild is not None) and not prevsibling.folded:
178 if (prevsiblinglastchild is not None) and not prevsibling.folded:
179 prevsiblinglclc = prevsiblinglastchild.lastchild()
179 prevsiblinglclc = prevsiblinglastchild.lastchild()
180 if (
180 if (
181 prevsiblinglclc is not None
181 prevsiblinglclc is not None
182 ) and not prevsiblinglastchild.folded:
182 ) and not prevsiblinglastchild.folded:
183 return prevsiblinglclc
183 return prevsiblinglclc
184 else:
184 else:
185 return prevsiblinglastchild
185 return prevsiblinglastchild
186 else:
186 else:
187 return prevsibling
187 return prevsibling
188
188
189 # try parent (or None)
189 # try parent (or None)
190 return self.parentitem()
190 return self.parentitem()
191
191
192
192
193 class patch(patchnode, list): # todo: rename patchroot
193 class patch(patchnode, list): # todo: rename patchroot
194 """
194 """
195 list of header objects representing the patch.
195 list of header objects representing the patch.
196 """
196 """
197
197
198 def __init__(self, headerlist):
198 def __init__(self, headerlist):
199 self.extend(headerlist)
199 self.extend(headerlist)
200 # add parent patch object reference to each header
200 # add parent patch object reference to each header
201 for header in self:
201 for header in self:
202 header.patch = self
202 header.patch = self
203
203
204
204
205 class uiheader(patchnode):
205 class uiheader(patchnode):
206 """patch header
206 """patch header
207
207
208 xxx shouldn't we move this to mercurial/patch.py ?
208 xxx shouldn't we move this to mercurial/patch.py ?
209 """
209 """
210
210
211 def __init__(self, header):
211 def __init__(self, header):
212 self.nonuiheader = header
212 self.nonuiheader = header
213 # flag to indicate whether to apply this chunk
213 # flag to indicate whether to apply this chunk
214 self.applied = True
214 self.applied = True
215 # flag which only affects the status display indicating if a node's
215 # flag which only affects the status display indicating if a node's
216 # children are partially applied (i.e. some applied, some not).
216 # children are partially applied (i.e. some applied, some not).
217 self.partial = False
217 self.partial = False
218
218
219 # flag to indicate whether to display as folded/unfolded to user
219 # flag to indicate whether to display as folded/unfolded to user
220 self.folded = True
220 self.folded = True
221
221
222 # list of all headers in patch
222 # list of all headers in patch
223 self.patch = None
223 self.patch = None
224
224
225 # flag is False if this header was ever unfolded from initial state
225 # flag is False if this header was ever unfolded from initial state
226 self.neverunfolded = True
226 self.neverunfolded = True
227 self.hunks = [uihunk(h, self) for h in self.hunks]
227 self.hunks = [uihunk(h, self) for h in self.hunks]
228
228
229 def prettystr(self):
229 def prettystr(self):
230 x = stringio()
230 x = stringio()
231 self.pretty(x)
231 self.pretty(x)
232 return x.getvalue()
232 return x.getvalue()
233
233
234 def nextsibling(self):
234 def nextsibling(self):
235 numheadersinpatch = len(self.patch)
235 numheadersinpatch = len(self.patch)
236 indexofthisheader = self.patch.index(self)
236 indexofthisheader = self.patch.index(self)
237
237
238 if indexofthisheader < numheadersinpatch - 1:
238 if indexofthisheader < numheadersinpatch - 1:
239 nextheader = self.patch[indexofthisheader + 1]
239 nextheader = self.patch[indexofthisheader + 1]
240 return nextheader
240 return nextheader
241 else:
241 else:
242 return None
242 return None
243
243
244 def prevsibling(self):
244 def prevsibling(self):
245 indexofthisheader = self.patch.index(self)
245 indexofthisheader = self.patch.index(self)
246 if indexofthisheader > 0:
246 if indexofthisheader > 0:
247 previousheader = self.patch[indexofthisheader - 1]
247 previousheader = self.patch[indexofthisheader - 1]
248 return previousheader
248 return previousheader
249 else:
249 else:
250 return None
250 return None
251
251
252 def parentitem(self):
252 def parentitem(self):
253 """
253 """
254 there is no 'real' parent item of a header that can be selected,
254 there is no 'real' parent item of a header that can be selected,
255 so return None.
255 so return None.
256 """
256 """
257 return None
257 return None
258
258
259 def firstchild(self):
259 def firstchild(self):
260 """return the first child of this item, if one exists. otherwise
260 """return the first child of this item, if one exists. otherwise
261 None."""
261 None."""
262 if len(self.hunks) > 0:
262 if len(self.hunks) > 0:
263 return self.hunks[0]
263 return self.hunks[0]
264 else:
264 else:
265 return None
265 return None
266
266
267 def lastchild(self):
267 def lastchild(self):
268 """return the last child of this item, if one exists. otherwise
268 """return the last child of this item, if one exists. otherwise
269 None."""
269 None."""
270 if len(self.hunks) > 0:
270 if len(self.hunks) > 0:
271 return self.hunks[-1]
271 return self.hunks[-1]
272 else:
272 else:
273 return None
273 return None
274
274
275 def allchildren(self):
275 def allchildren(self):
276 """return a list of all of the direct children of this node"""
276 """return a list of all of the direct children of this node"""
277 return self.hunks
277 return self.hunks
278
278
279 def __getattr__(self, name):
279 def __getattr__(self, name):
280 return getattr(self.nonuiheader, name)
280 return getattr(self.nonuiheader, name)
281
281
282
282
283 class uihunkline(patchnode):
283 class uihunkline(patchnode):
284 """represents a changed line in a hunk"""
284 """represents a changed line in a hunk"""
285
285
286 def __init__(self, linetext, hunk):
286 def __init__(self, linetext, hunk):
287 self.linetext = linetext
287 self.linetext = linetext
288 self.applied = True
288 self.applied = True
289 # the parent hunk to which this line belongs
289 # the parent hunk to which this line belongs
290 self.hunk = hunk
290 self.hunk = hunk
291 # folding lines currently is not used/needed, but this flag is needed
291 # folding lines currently is not used/needed, but this flag is needed
292 # in the previtem method.
292 # in the previtem method.
293 self.folded = False
293 self.folded = False
294
294
295 def prettystr(self):
295 def prettystr(self):
296 return self.linetext
296 return self.linetext
297
297
298 def nextsibling(self):
298 def nextsibling(self):
299 numlinesinhunk = len(self.hunk.changedlines)
299 numlinesinhunk = len(self.hunk.changedlines)
300 indexofthisline = self.hunk.changedlines.index(self)
300 indexofthisline = self.hunk.changedlines.index(self)
301
301
302 if indexofthisline < numlinesinhunk - 1:
302 if indexofthisline < numlinesinhunk - 1:
303 nextline = self.hunk.changedlines[indexofthisline + 1]
303 nextline = self.hunk.changedlines[indexofthisline + 1]
304 return nextline
304 return nextline
305 else:
305 else:
306 return None
306 return None
307
307
308 def prevsibling(self):
308 def prevsibling(self):
309 indexofthisline = self.hunk.changedlines.index(self)
309 indexofthisline = self.hunk.changedlines.index(self)
310 if indexofthisline > 0:
310 if indexofthisline > 0:
311 previousline = self.hunk.changedlines[indexofthisline - 1]
311 previousline = self.hunk.changedlines[indexofthisline - 1]
312 return previousline
312 return previousline
313 else:
313 else:
314 return None
314 return None
315
315
316 def parentitem(self):
316 def parentitem(self):
317 """return the parent to the current item"""
317 """return the parent to the current item"""
318 return self.hunk
318 return self.hunk
319
319
320 def firstchild(self):
320 def firstchild(self):
321 """return the first child of this item, if one exists. otherwise
321 """return the first child of this item, if one exists. otherwise
322 None."""
322 None."""
323 # hunk-lines don't have children
323 # hunk-lines don't have children
324 return None
324 return None
325
325
326 def lastchild(self):
326 def lastchild(self):
327 """return the last child of this item, if one exists. otherwise
327 """return the last child of this item, if one exists. otherwise
328 None."""
328 None."""
329 # hunk-lines don't have children
329 # hunk-lines don't have children
330 return None
330 return None
331
331
332
332
333 class uihunk(patchnode):
333 class uihunk(patchnode):
334 """ui patch hunk, wraps a hunk and keep track of ui behavior """
334 """ui patch hunk, wraps a hunk and keep track of ui behavior """
335
335
336 maxcontext = 3
336 maxcontext = 3
337
337
338 def __init__(self, hunk, header):
338 def __init__(self, hunk, header):
339 self._hunk = hunk
339 self._hunk = hunk
340 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
340 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
341 self.header = header
341 self.header = header
342 # used at end for detecting how many removed lines were un-applied
342 # used at end for detecting how many removed lines were un-applied
343 self.originalremoved = self.removed
343 self.originalremoved = self.removed
344
344
345 # flag to indicate whether to display as folded/unfolded to user
345 # flag to indicate whether to display as folded/unfolded to user
346 self.folded = True
346 self.folded = True
347 # flag to indicate whether to apply this chunk
347 # flag to indicate whether to apply this chunk
348 self.applied = True
348 self.applied = True
349 # flag which only affects the status display indicating if a node's
349 # flag which only affects the status display indicating if a node's
350 # children are partially applied (i.e. some applied, some not).
350 # children are partially applied (i.e. some applied, some not).
351 self.partial = False
351 self.partial = False
352
352
353 def nextsibling(self):
353 def nextsibling(self):
354 numhunksinheader = len(self.header.hunks)
354 numhunksinheader = len(self.header.hunks)
355 indexofthishunk = self.header.hunks.index(self)
355 indexofthishunk = self.header.hunks.index(self)
356
356
357 if indexofthishunk < numhunksinheader - 1:
357 if indexofthishunk < numhunksinheader - 1:
358 nexthunk = self.header.hunks[indexofthishunk + 1]
358 nexthunk = self.header.hunks[indexofthishunk + 1]
359 return nexthunk
359 return nexthunk
360 else:
360 else:
361 return None
361 return None
362
362
363 def prevsibling(self):
363 def prevsibling(self):
364 indexofthishunk = self.header.hunks.index(self)
364 indexofthishunk = self.header.hunks.index(self)
365 if indexofthishunk > 0:
365 if indexofthishunk > 0:
366 previoushunk = self.header.hunks[indexofthishunk - 1]
366 previoushunk = self.header.hunks[indexofthishunk - 1]
367 return previoushunk
367 return previoushunk
368 else:
368 else:
369 return None
369 return None
370
370
371 def parentitem(self):
371 def parentitem(self):
372 """return the parent to the current item"""
372 """return the parent to the current item"""
373 return self.header
373 return self.header
374
374
375 def firstchild(self):
375 def firstchild(self):
376 """return the first child of this item, if one exists. otherwise
376 """return the first child of this item, if one exists. otherwise
377 None."""
377 None."""
378 if len(self.changedlines) > 0:
378 if len(self.changedlines) > 0:
379 return self.changedlines[0]
379 return self.changedlines[0]
380 else:
380 else:
381 return None
381 return None
382
382
383 def lastchild(self):
383 def lastchild(self):
384 """return the last child of this item, if one exists. otherwise
384 """return the last child of this item, if one exists. otherwise
385 None."""
385 None."""
386 if len(self.changedlines) > 0:
386 if len(self.changedlines) > 0:
387 return self.changedlines[-1]
387 return self.changedlines[-1]
388 else:
388 else:
389 return None
389 return None
390
390
391 def allchildren(self):
391 def allchildren(self):
392 """return a list of all of the direct children of this node"""
392 """return a list of all of the direct children of this node"""
393 return self.changedlines
393 return self.changedlines
394
394
395 def countchanges(self):
395 def countchanges(self):
396 """changedlines -> (n+,n-)"""
396 """changedlines -> (n+,n-)"""
397 add = len(
397 add = len(
398 [
398 [
399 l
399 l
400 for l in self.changedlines
400 for l in self.changedlines
401 if l.applied and l.prettystr().startswith(b'+')
401 if l.applied and l.prettystr().startswith(b'+')
402 ]
402 ]
403 )
403 )
404 rem = len(
404 rem = len(
405 [
405 [
406 l
406 l
407 for l in self.changedlines
407 for l in self.changedlines
408 if l.applied and l.prettystr().startswith(b'-')
408 if l.applied and l.prettystr().startswith(b'-')
409 ]
409 ]
410 )
410 )
411 return add, rem
411 return add, rem
412
412
413 def getfromtoline(self):
413 def getfromtoline(self):
414 # calculate the number of removed lines converted to context lines
414 # calculate the number of removed lines converted to context lines
415 removedconvertedtocontext = self.originalremoved - self.removed
415 removedconvertedtocontext = self.originalremoved - self.removed
416
416
417 contextlen = (
417 contextlen = (
418 len(self.before) + len(self.after) + removedconvertedtocontext
418 len(self.before) + len(self.after) + removedconvertedtocontext
419 )
419 )
420 if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
420 if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
421 contextlen -= 1
421 contextlen -= 1
422 fromlen = contextlen + self.removed
422 fromlen = contextlen + self.removed
423 tolen = contextlen + self.added
423 tolen = contextlen + self.added
424
424
425 # diffutils manual, section "2.2.2.2 detailed description of unified
425 # diffutils manual, section "2.2.2.2 detailed description of unified
426 # format": "an empty hunk is considered to end at the line that
426 # format": "an empty hunk is considered to end at the line that
427 # precedes the hunk."
427 # precedes the hunk."
428 #
428 #
429 # so, if either of hunks is empty, decrease its line start. --immerrr
429 # so, if either of hunks is empty, decrease its line start. --immerrr
430 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
430 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
431 fromline, toline = self.fromline, self.toline
431 fromline, toline = self.fromline, self.toline
432 if fromline != 0:
432 if fromline != 0:
433 if fromlen == 0:
433 if fromlen == 0:
434 fromline -= 1
434 fromline -= 1
435 if tolen == 0 and toline > 0:
435 if tolen == 0 and toline > 0:
436 toline -= 1
436 toline -= 1
437
437
438 fromtoline = b'@@ -%d,%d +%d,%d @@%s\n' % (
438 fromtoline = b'@@ -%d,%d +%d,%d @@%s\n' % (
439 fromline,
439 fromline,
440 fromlen,
440 fromlen,
441 toline,
441 toline,
442 tolen,
442 tolen,
443 self.proc and (b' ' + self.proc),
443 self.proc and (b' ' + self.proc),
444 )
444 )
445 return fromtoline
445 return fromtoline
446
446
447 def write(self, fp):
447 def write(self, fp):
448 # updated self.added/removed, which are used by getfromtoline()
448 # updated self.added/removed, which are used by getfromtoline()
449 self.added, self.removed = self.countchanges()
449 self.added, self.removed = self.countchanges()
450 fp.write(self.getfromtoline())
450 fp.write(self.getfromtoline())
451
451
452 hunklinelist = []
452 hunklinelist = []
453 # add the following to the list: (1) all applied lines, and
453 # add the following to the list: (1) all applied lines, and
454 # (2) all unapplied removal lines (convert these to context lines)
454 # (2) all unapplied removal lines (convert these to context lines)
455 for changedline in self.changedlines:
455 for changedline in self.changedlines:
456 changedlinestr = changedline.prettystr()
456 changedlinestr = changedline.prettystr()
457 if changedline.applied:
457 if changedline.applied:
458 hunklinelist.append(changedlinestr)
458 hunklinelist.append(changedlinestr)
459 elif changedlinestr.startswith(b"-"):
459 elif changedlinestr.startswith(b"-"):
460 hunklinelist.append(b" " + changedlinestr[1:])
460 hunklinelist.append(b" " + changedlinestr[1:])
461
461
462 fp.write(b''.join(self.before + hunklinelist + self.after))
462 fp.write(b''.join(self.before + hunklinelist + self.after))
463
463
464 pretty = write
464 pretty = write
465
465
466 def prettystr(self):
466 def prettystr(self):
467 x = stringio()
467 x = stringio()
468 self.pretty(x)
468 self.pretty(x)
469 return x.getvalue()
469 return x.getvalue()
470
470
471 def reversehunk(self):
471 def reversehunk(self):
472 """return a recordhunk which is the reverse of the hunk
472 """return a recordhunk which is the reverse of the hunk
473
473
474 Assuming the displayed patch is diff(A, B) result. The returned hunk is
474 Assuming the displayed patch is diff(A, B) result. The returned hunk is
475 intended to be applied to B, instead of A.
475 intended to be applied to B, instead of A.
476
476
477 For example, when A is "0\n1\n2\n6\n" and B is "0\n3\n4\n5\n6\n", and
477 For example, when A is "0\n1\n2\n6\n" and B is "0\n3\n4\n5\n6\n", and
478 the user made the following selection:
478 the user made the following selection:
479
479
480 0
480 0
481 [x] -1 [x]: selected
481 [x] -1 [x]: selected
482 [ ] -2 [ ]: not selected
482 [ ] -2 [ ]: not selected
483 [x] +3
483 [x] +3
484 [ ] +4
484 [ ] +4
485 [x] +5
485 [x] +5
486 6
486 6
487
487
488 This function returns a hunk like:
488 This function returns a hunk like:
489
489
490 0
490 0
491 -3
491 -3
492 -4
492 -4
493 -5
493 -5
494 +1
494 +1
495 +4
495 +4
496 6
496 6
497
497
498 Note "4" was first deleted then added. That's because "4" exists in B
498 Note "4" was first deleted then added. That's because "4" exists in B
499 side and "-4" must exist between "-3" and "-5" to make the patch
499 side and "-4" must exist between "-3" and "-5" to make the patch
500 applicable to B.
500 applicable to B.
501 """
501 """
502 dels = []
502 dels = []
503 adds = []
503 adds = []
504 noeol = False
504 noeol = False
505 for line in self.changedlines:
505 for line in self.changedlines:
506 text = line.linetext
506 text = line.linetext
507 if line.linetext == diffhelper.MISSING_NEWLINE_MARKER:
507 if line.linetext == diffhelper.MISSING_NEWLINE_MARKER:
508 noeol = True
508 noeol = True
509 break
509 break
510 if line.applied:
510 if line.applied:
511 if text.startswith(b'+'):
511 if text.startswith(b'+'):
512 dels.append(text[1:])
512 dels.append(text[1:])
513 elif text.startswith(b'-'):
513 elif text.startswith(b'-'):
514 adds.append(text[1:])
514 adds.append(text[1:])
515 elif text.startswith(b'+'):
515 elif text.startswith(b'+'):
516 dels.append(text[1:])
516 dels.append(text[1:])
517 adds.append(text[1:])
517 adds.append(text[1:])
518 hunk = [b'-%s' % l for l in dels] + [b'+%s' % l for l in adds]
518 hunk = [b'-%s' % l for l in dels] + [b'+%s' % l for l in adds]
519 if noeol and hunk:
519 if noeol and hunk:
520 # Remove the newline from the end of the hunk.
520 # Remove the newline from the end of the hunk.
521 hunk[-1] = hunk[-1][:-1]
521 hunk[-1] = hunk[-1][:-1]
522 h = self._hunk
522 h = self._hunk
523 return patchmod.recordhunk(
523 return patchmod.recordhunk(
524 h.header, h.toline, h.fromline, h.proc, h.before, hunk, h.after
524 h.header, h.toline, h.fromline, h.proc, h.before, hunk, h.after
525 )
525 )
526
526
527 def __getattr__(self, name):
527 def __getattr__(self, name):
528 return getattr(self._hunk, name)
528 return getattr(self._hunk, name)
529
529
530 def __repr__(self):
530 def __repr__(self):
531 return '<hunk %r@%d>' % (self.filename(), self.fromline)
531 return '<hunk %r@%d>' % (self.filename(), self.fromline)
532
532
533
533
534 def filterpatch(ui, chunks, chunkselector, operation=None):
534 def filterpatch(ui, chunks, chunkselector, operation=None):
535 """interactively filter patch chunks into applied-only chunks"""
535 """interactively filter patch chunks into applied-only chunks"""
536 chunks = list(chunks)
536 chunks = list(chunks)
537 # convert chunks list into structure suitable for displaying/modifying
537 # convert chunks list into structure suitable for displaying/modifying
538 # with curses. create a list of headers only.
538 # with curses. create a list of headers only.
539 headers = [c for c in chunks if isinstance(c, patchmod.header)]
539 headers = [c for c in chunks if isinstance(c, patchmod.header)]
540
540
541 # if there are no changed files
541 # if there are no changed files
542 if len(headers) == 0:
542 if len(headers) == 0:
543 return [], {}
543 return [], {}
544 uiheaders = [uiheader(h) for h in headers]
544 uiheaders = [uiheader(h) for h in headers]
545 # let user choose headers/hunks/lines, and mark their applied flags
545 # let user choose headers/hunks/lines, and mark their applied flags
546 # accordingly
546 # accordingly
547 ret = chunkselector(ui, uiheaders, operation=operation)
547 ret = chunkselector(ui, uiheaders, operation=operation)
548 appliedhunklist = []
548 appliedhunklist = []
549 for hdr in uiheaders:
549 for hdr in uiheaders:
550 if hdr.applied and (
550 if hdr.applied and (
551 hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0
551 hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0
552 ):
552 ):
553 appliedhunklist.append(hdr)
553 appliedhunklist.append(hdr)
554 fixoffset = 0
554 fixoffset = 0
555 for hnk in hdr.hunks:
555 for hnk in hdr.hunks:
556 if hnk.applied:
556 if hnk.applied:
557 appliedhunklist.append(hnk)
557 appliedhunklist.append(hnk)
558 # adjust the 'to'-line offset of the hunk to be correct
558 # adjust the 'to'-line offset of the hunk to be correct
559 # after de-activating some of the other hunks for this file
559 # after de-activating some of the other hunks for this file
560 if fixoffset:
560 if fixoffset:
561 # hnk = copy.copy(hnk) # necessary??
561 # hnk = copy.copy(hnk) # necessary??
562 hnk.toline += fixoffset
562 hnk.toline += fixoffset
563 else:
563 else:
564 fixoffset += hnk.removed - hnk.added
564 fixoffset += hnk.removed - hnk.added
565
565
566 return (appliedhunklist, ret)
566 return (appliedhunklist, ret)
567
567
568
568
569 def chunkselector(ui, headerlist, operation=None):
569 def chunkselector(ui, headerlist, operation=None):
570 """
570 """
571 curses interface to get selection of chunks, and mark the applied flags
571 curses interface to get selection of chunks, and mark the applied flags
572 of the chosen chunks.
572 of the chosen chunks.
573 """
573 """
574 ui.write(_(b'starting interactive selection\n'))
574 ui.write(_(b'starting interactive selection\n'))
575 chunkselector = curseschunkselector(headerlist, ui, operation)
575 chunkselector = curseschunkselector(headerlist, ui, operation)
576 origsigtstp = sentinel = object()
576 origsigtstp = sentinel = object()
577 if util.safehasattr(signal, b'SIGTSTP'):
577 if util.safehasattr(signal, b'SIGTSTP'):
578 origsigtstp = signal.getsignal(signal.SIGTSTP)
578 origsigtstp = signal.getsignal(signal.SIGTSTP)
579 try:
579 try:
580 with util.with_lc_ctype():
580 with util.with_lc_ctype():
581 curses.wrapper(chunkselector.main)
581 curses.wrapper(chunkselector.main)
582 if chunkselector.initexc is not None:
582 if chunkselector.initexc is not None:
583 raise chunkselector.initexc
583 raise chunkselector.initexc
584 # ncurses does not restore signal handler for SIGTSTP
584 # ncurses does not restore signal handler for SIGTSTP
585 finally:
585 finally:
586 if origsigtstp is not sentinel:
586 if origsigtstp is not sentinel:
587 signal.signal(signal.SIGTSTP, origsigtstp)
587 signal.signal(signal.SIGTSTP, origsigtstp)
588 return chunkselector.opts
588 return chunkselector.opts
589
589
590
590
591 def testdecorator(testfn, f):
591 def testdecorator(testfn, f):
592 def u(*args, **kwargs):
592 def u(*args, **kwargs):
593 return f(testfn, *args, **kwargs)
593 return f(testfn, *args, **kwargs)
594
594
595 return u
595 return u
596
596
597
597
598 def testchunkselector(testfn, ui, headerlist, operation=None):
598 def testchunkselector(testfn, ui, headerlist, operation=None):
599 """
599 """
600 test interface to get selection of chunks, and mark the applied flags
600 test interface to get selection of chunks, and mark the applied flags
601 of the chosen chunks.
601 of the chosen chunks.
602 """
602 """
603 chunkselector = curseschunkselector(headerlist, ui, operation)
603 chunkselector = curseschunkselector(headerlist, ui, operation)
604
604
605 class dummystdscr(object):
605 class dummystdscr(object):
606 def clear(self):
606 def clear(self):
607 pass
607 pass
608
608
609 def refresh(self):
609 def refresh(self):
610 pass
610 pass
611
611
612 chunkselector.stdscr = dummystdscr()
612 chunkselector.stdscr = dummystdscr()
613 if testfn and os.path.exists(testfn):
613 if testfn and os.path.exists(testfn):
614 testf = open(testfn, 'r')
614 testf = open(testfn, 'r')
615 testcommands = [x.rstrip('\n') for x in testf.readlines()]
615 testcommands = [x.rstrip('\n') for x in testf.readlines()]
616 testf.close()
616 testf.close()
617 while True:
617 while True:
618 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
618 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
619 break
619 break
620 return chunkselector.opts
620 return chunkselector.opts
621
621
622
622
623 _headermessages = { # {operation: text}
623 _headermessages = { # {operation: text}
624 b'apply': _(b'Select hunks to apply'),
624 b'apply': _(b'Select hunks to apply'),
625 b'discard': _(b'Select hunks to discard'),
625 b'discard': _(b'Select hunks to discard'),
626 b'keep': _(b'Select hunks to keep'),
626 b'keep': _(b'Select hunks to keep'),
627 None: _(b'Select hunks to record'),
627 None: _(b'Select hunks to record'),
628 }
628 }
629
629
630
630
631 class curseschunkselector(object):
631 class curseschunkselector(object):
632 def __init__(self, headerlist, ui, operation=None):
632 def __init__(self, headerlist, ui, operation=None):
633 # put the headers into a patch object
633 # put the headers into a patch object
634 self.headerlist = patch(headerlist)
634 self.headerlist = patch(headerlist)
635
635
636 self.ui = ui
636 self.ui = ui
637 self.opts = {}
637 self.opts = {}
638
638
639 self.errorstr = None
639 self.errorstr = None
640 # list of all chunks
640 # list of all chunks
641 self.chunklist = []
641 self.chunklist = []
642 for h in headerlist:
642 for h in headerlist:
643 self.chunklist.append(h)
643 self.chunklist.append(h)
644 self.chunklist.extend(h.hunks)
644 self.chunklist.extend(h.hunks)
645
645
646 # dictionary mapping (fgcolor, bgcolor) pairs to the
646 # dictionary mapping (fgcolor, bgcolor) pairs to the
647 # corresponding curses color-pair value.
647 # corresponding curses color-pair value.
648 self.colorpairs = {}
648 self.colorpairs = {}
649 # maps custom nicknames of color-pairs to curses color-pair values
649 # maps custom nicknames of color-pairs to curses color-pair values
650 self.colorpairnames = {}
650 self.colorpairnames = {}
651
651
652 # Honor color setting of ui section. Keep colored setup as
652 # Honor color setting of ui section. Keep colored setup as
653 # long as not explicitly set to a falsy value - especially,
653 # long as not explicitly set to a falsy value - especially,
654 # when not set at all. This is to stay most compatible with
654 # when not set at all. This is to stay most compatible with
655 # previous (color only) behaviour.
655 # previous (color only) behaviour.
656 uicolor = stringutil.parsebool(self.ui.config(b'ui', b'color'))
656 uicolor = stringutil.parsebool(self.ui.config(b'ui', b'color'))
657 self.usecolor = uicolor is not False
657 self.usecolor = uicolor is not False
658
658
659 # the currently selected header, hunk, or hunk-line
659 # the currently selected header, hunk, or hunk-line
660 self.currentselecteditem = self.headerlist[0]
660 self.currentselecteditem = self.headerlist[0]
661 self.lastapplieditem = None
661 self.lastapplieditem = None
662
662
663 # updated when printing out patch-display -- the 'lines' here are the
663 # updated when printing out patch-display -- the 'lines' here are the
664 # line positions *in the pad*, not on the screen.
664 # line positions *in the pad*, not on the screen.
665 self.selecteditemstartline = 0
665 self.selecteditemstartline = 0
666 self.selecteditemendline = None
666 self.selecteditemendline = None
667
667
668 # define indentation levels
668 # define indentation levels
669 self.headerindentnumchars = 0
669 self.headerindentnumchars = 0
670 self.hunkindentnumchars = 3
670 self.hunkindentnumchars = 3
671 self.hunklineindentnumchars = 6
671 self.hunklineindentnumchars = 6
672
672
673 # the first line of the pad to print to the screen
673 # the first line of the pad to print to the screen
674 self.firstlineofpadtoprint = 0
674 self.firstlineofpadtoprint = 0
675
675
676 # keeps track of the number of lines in the pad
676 # keeps track of the number of lines in the pad
677 self.numpadlines = None
677 self.numpadlines = None
678
678
679 self.numstatuslines = 1
679 self.numstatuslines = 1
680
680
681 # keep a running count of the number of lines printed to the pad
681 # keep a running count of the number of lines printed to the pad
682 # (used for determining when the selected item begins/ends)
682 # (used for determining when the selected item begins/ends)
683 self.linesprintedtopadsofar = 0
683 self.linesprintedtopadsofar = 0
684
684
685 # stores optional text for a commit comment provided by the user
685 # stores optional text for a commit comment provided by the user
686 self.commenttext = b""
686 self.commenttext = b""
687
687
688 # if the last 'toggle all' command caused all changes to be applied
688 # if the last 'toggle all' command caused all changes to be applied
689 self.waslasttoggleallapplied = True
689 self.waslasttoggleallapplied = True
690
690
691 # affects some ui text
691 # affects some ui text
692 if operation not in _headermessages:
692 if operation not in _headermessages:
693 raise error.ProgrammingError(
693 raise error.ProgrammingError(
694 b'unexpected operation: %s' % operation
694 b'unexpected operation: %s' % operation
695 )
695 )
696 self.operation = operation
696 self.operation = operation
697
697
698 def uparrowevent(self):
698 def uparrowevent(self):
699 """
699 """
700 try to select the previous item to the current item that has the
700 try to select the previous item to the current item that has the
701 most-indented level. for example, if a hunk is selected, try to select
701 most-indented level. for example, if a hunk is selected, try to select
702 the last hunkline of the hunk prior to the selected hunk. or, if
702 the last hunkline of the hunk prior to the selected hunk. or, if
703 the first hunkline of a hunk is currently selected, then select the
703 the first hunkline of a hunk is currently selected, then select the
704 hunk itself.
704 hunk itself.
705 """
705 """
706 currentitem = self.currentselecteditem
706 currentitem = self.currentselecteditem
707
707
708 nextitem = currentitem.previtem()
708 nextitem = currentitem.previtem()
709
709
710 if nextitem is None:
710 if nextitem is None:
711 # if no parent item (i.e. currentitem is the first header), then
711 # if no parent item (i.e. currentitem is the first header), then
712 # no change...
712 # no change...
713 nextitem = currentitem
713 nextitem = currentitem
714
714
715 self.currentselecteditem = nextitem
715 self.currentselecteditem = nextitem
716
716
717 def uparrowshiftevent(self):
717 def uparrowshiftevent(self):
718 """
718 """
719 select (if possible) the previous item on the same level as the
719 select (if possible) the previous item on the same level as the
720 currently selected item. otherwise, select (if possible) the
720 currently selected item. otherwise, select (if possible) the
721 parent-item of the currently selected item.
721 parent-item of the currently selected item.
722 """
722 """
723 currentitem = self.currentselecteditem
723 currentitem = self.currentselecteditem
724 nextitem = currentitem.prevsibling()
724 nextitem = currentitem.prevsibling()
725 # if there's no previous sibling, try choosing the parent
725 # if there's no previous sibling, try choosing the parent
726 if nextitem is None:
726 if nextitem is None:
727 nextitem = currentitem.parentitem()
727 nextitem = currentitem.parentitem()
728 if nextitem is None:
728 if nextitem is None:
729 # if no parent item (i.e. currentitem is the first header), then
729 # if no parent item (i.e. currentitem is the first header), then
730 # no change...
730 # no change...
731 nextitem = currentitem
731 nextitem = currentitem
732
732
733 self.currentselecteditem = nextitem
733 self.currentselecteditem = nextitem
734 self.recenterdisplayedarea()
734 self.recenterdisplayedarea()
735
735
736 def downarrowevent(self):
736 def downarrowevent(self):
737 """
737 """
738 try to select the next item to the current item that has the
738 try to select the next item to the current item that has the
739 most-indented level. for example, if a hunk is selected, select
739 most-indented level. for example, if a hunk is selected, select
740 the first hunkline of the selected hunk. or, if the last hunkline of
740 the first hunkline of the selected hunk. or, if the last hunkline of
741 a hunk is currently selected, then select the next hunk, if one exists,
741 a hunk is currently selected, then select the next hunk, if one exists,
742 or if not, the next header if one exists.
742 or if not, the next header if one exists.
743 """
743 """
744 # self.startprintline += 1 #debug
744 # self.startprintline += 1 #debug
745 currentitem = self.currentselecteditem
745 currentitem = self.currentselecteditem
746
746
747 nextitem = currentitem.nextitem()
747 nextitem = currentitem.nextitem()
748 # if there's no next item, keep the selection as-is
748 # if there's no next item, keep the selection as-is
749 if nextitem is None:
749 if nextitem is None:
750 nextitem = currentitem
750 nextitem = currentitem
751
751
752 self.currentselecteditem = nextitem
752 self.currentselecteditem = nextitem
753
753
754 def downarrowshiftevent(self):
754 def downarrowshiftevent(self):
755 """
755 """
756 select (if possible) the next item on the same level as the currently
756 select (if possible) the next item on the same level as the currently
757 selected item. otherwise, select (if possible) the next item on the
757 selected item. otherwise, select (if possible) the next item on the
758 same level as the parent item of the currently selected item.
758 same level as the parent item of the currently selected item.
759 """
759 """
760 currentitem = self.currentselecteditem
760 currentitem = self.currentselecteditem
761 nextitem = currentitem.nextsibling()
761 nextitem = currentitem.nextsibling()
762 # if there's no next sibling, try choosing the parent's nextsibling
762 # if there's no next sibling, try choosing the parent's nextsibling
763 if nextitem is None:
763 if nextitem is None:
764 try:
764 try:
765 nextitem = currentitem.parentitem().nextsibling()
765 nextitem = currentitem.parentitem().nextsibling()
766 except AttributeError:
766 except AttributeError:
767 # parentitem returned None, so nextsibling() can't be called
767 # parentitem returned None, so nextsibling() can't be called
768 nextitem = None
768 nextitem = None
769 if nextitem is None:
769 if nextitem is None:
770 # if parent has no next sibling, then no change...
770 # if parent has no next sibling, then no change...
771 nextitem = currentitem
771 nextitem = currentitem
772
772
773 self.currentselecteditem = nextitem
773 self.currentselecteditem = nextitem
774 self.recenterdisplayedarea()
774 self.recenterdisplayedarea()
775
775
776 def nextsametype(self, test=False):
776 def nextsametype(self, test=False):
777 currentitem = self.currentselecteditem
777 currentitem = self.currentselecteditem
778 sametype = lambda item: isinstance(item, type(currentitem))
778 sametype = lambda item: isinstance(item, type(currentitem))
779 nextitem = currentitem.nextitem()
779 nextitem = currentitem.nextitem()
780
780
781 while nextitem is not None and not sametype(nextitem):
781 while nextitem is not None and not sametype(nextitem):
782 nextitem = nextitem.nextitem()
782 nextitem = nextitem.nextitem()
783
783
784 if nextitem is None:
784 if nextitem is None:
785 nextitem = currentitem
785 nextitem = currentitem
786 else:
786 else:
787 parent = nextitem.parentitem()
787 parent = nextitem.parentitem()
788 if parent is not None and parent.folded:
788 if parent is not None and parent.folded:
789 self.togglefolded(parent)
789 self.togglefolded(parent)
790
790
791 self.currentselecteditem = nextitem
791 self.currentselecteditem = nextitem
792 if not test:
792 if not test:
793 self.recenterdisplayedarea()
793 self.recenterdisplayedarea()
794
794
795 def rightarrowevent(self):
795 def rightarrowevent(self):
796 """
796 """
797 select (if possible) the first of this item's child-items.
797 select (if possible) the first of this item's child-items.
798 """
798 """
799 currentitem = self.currentselecteditem
799 currentitem = self.currentselecteditem
800 nextitem = currentitem.firstchild()
800 nextitem = currentitem.firstchild()
801
801
802 # turn off folding if we want to show a child-item
802 # turn off folding if we want to show a child-item
803 if currentitem.folded:
803 if currentitem.folded:
804 self.togglefolded(currentitem)
804 self.togglefolded(currentitem)
805
805
806 if nextitem is None:
806 if nextitem is None:
807 # if no next item on parent-level, then no change...
807 # if no next item on parent-level, then no change...
808 nextitem = currentitem
808 nextitem = currentitem
809
809
810 self.currentselecteditem = nextitem
810 self.currentselecteditem = nextitem
811
811
812 def leftarrowevent(self):
812 def leftarrowevent(self):
813 """
813 """
814 if the current item can be folded (i.e. it is an unfolded header or
814 if the current item can be folded (i.e. it is an unfolded header or
815 hunk), then fold it. otherwise try select (if possible) the parent
815 hunk), then fold it. otherwise try select (if possible) the parent
816 of this item.
816 of this item.
817 """
817 """
818 currentitem = self.currentselecteditem
818 currentitem = self.currentselecteditem
819
819
820 # try to fold the item
820 # try to fold the item
821 if not isinstance(currentitem, uihunkline):
821 if not isinstance(currentitem, uihunkline):
822 if not currentitem.folded:
822 if not currentitem.folded:
823 self.togglefolded(item=currentitem)
823 self.togglefolded(item=currentitem)
824 return
824 return
825
825
826 # if it can't be folded, try to select the parent item
826 # if it can't be folded, try to select the parent item
827 nextitem = currentitem.parentitem()
827 nextitem = currentitem.parentitem()
828
828
829 if nextitem is None:
829 if nextitem is None:
830 # if no item on parent-level, then no change...
830 # if no item on parent-level, then no change...
831 nextitem = currentitem
831 nextitem = currentitem
832 if not nextitem.folded:
832 if not nextitem.folded:
833 self.togglefolded(item=nextitem)
833 self.togglefolded(item=nextitem)
834
834
835 self.currentselecteditem = nextitem
835 self.currentselecteditem = nextitem
836
836
837 def leftarrowshiftevent(self):
837 def leftarrowshiftevent(self):
838 """
838 """
839 select the header of the current item (or fold current item if the
839 select the header of the current item (or fold current item if the
840 current item is already a header).
840 current item is already a header).
841 """
841 """
842 currentitem = self.currentselecteditem
842 currentitem = self.currentselecteditem
843
843
844 if isinstance(currentitem, uiheader):
844 if isinstance(currentitem, uiheader):
845 if not currentitem.folded:
845 if not currentitem.folded:
846 self.togglefolded(item=currentitem)
846 self.togglefolded(item=currentitem)
847 return
847 return
848
848
849 # select the parent item recursively until we're at a header
849 # select the parent item recursively until we're at a header
850 while True:
850 while True:
851 nextitem = currentitem.parentitem()
851 nextitem = currentitem.parentitem()
852 if nextitem is None:
852 if nextitem is None:
853 break
853 break
854 else:
854 else:
855 currentitem = nextitem
855 currentitem = nextitem
856
856
857 self.currentselecteditem = currentitem
857 self.currentselecteditem = currentitem
858
858
859 def updatescroll(self):
859 def updatescroll(self):
860 """scroll the screen to fully show the currently-selected"""
860 """scroll the screen to fully show the currently-selected"""
861 selstart = self.selecteditemstartline
861 selstart = self.selecteditemstartline
862 selend = self.selecteditemendline
862 selend = self.selecteditemendline
863
863
864 padstart = self.firstlineofpadtoprint
864 padstart = self.firstlineofpadtoprint
865 padend = padstart + self.yscreensize - self.numstatuslines - 1
865 padend = padstart + self.yscreensize - self.numstatuslines - 1
866 # 'buffered' pad start/end values which scroll with a certain
866 # 'buffered' pad start/end values which scroll with a certain
867 # top/bottom context margin
867 # top/bottom context margin
868 padstartbuffered = padstart + 3
868 padstartbuffered = padstart + 3
869 padendbuffered = padend - 3
869 padendbuffered = padend - 3
870
870
871 if selend > padendbuffered:
871 if selend > padendbuffered:
872 self.scrolllines(selend - padendbuffered)
872 self.scrolllines(selend - padendbuffered)
873 elif selstart < padstartbuffered:
873 elif selstart < padstartbuffered:
874 # negative values scroll in pgup direction
874 # negative values scroll in pgup direction
875 self.scrolllines(selstart - padstartbuffered)
875 self.scrolllines(selstart - padstartbuffered)
876
876
877 def scrolllines(self, numlines):
877 def scrolllines(self, numlines):
878 """scroll the screen up (down) by numlines when numlines >0 (<0)."""
878 """scroll the screen up (down) by numlines when numlines >0 (<0)."""
879 self.firstlineofpadtoprint += numlines
879 self.firstlineofpadtoprint += numlines
880 if self.firstlineofpadtoprint < 0:
880 if self.firstlineofpadtoprint < 0:
881 self.firstlineofpadtoprint = 0
881 self.firstlineofpadtoprint = 0
882 if self.firstlineofpadtoprint > self.numpadlines - 1:
882 if self.firstlineofpadtoprint > self.numpadlines - 1:
883 self.firstlineofpadtoprint = self.numpadlines - 1
883 self.firstlineofpadtoprint = self.numpadlines - 1
884
884
885 def toggleapply(self, item=None):
885 def toggleapply(self, item=None):
886 """
886 """
887 toggle the applied flag of the specified item. if no item is specified,
887 toggle the applied flag of the specified item. if no item is specified,
888 toggle the flag of the currently selected item.
888 toggle the flag of the currently selected item.
889 """
889 """
890 if item is None:
890 if item is None:
891 item = self.currentselecteditem
891 item = self.currentselecteditem
892 # Only set this when NOT using 'toggleall'
892 # Only set this when NOT using 'toggleall'
893 self.lastapplieditem = item
893 self.lastapplieditem = item
894
894
895 item.applied = not item.applied
895 item.applied = not item.applied
896
896
897 if isinstance(item, uiheader):
897 if isinstance(item, uiheader):
898 item.partial = False
898 item.partial = False
899 if item.applied:
899 if item.applied:
900 # apply all its hunks
900 # apply all its hunks
901 for hnk in item.hunks:
901 for hnk in item.hunks:
902 hnk.applied = True
902 hnk.applied = True
903 # apply all their hunklines
903 # apply all their hunklines
904 for hunkline in hnk.changedlines:
904 for hunkline in hnk.changedlines:
905 hunkline.applied = True
905 hunkline.applied = True
906 else:
906 else:
907 # un-apply all its hunks
907 # un-apply all its hunks
908 for hnk in item.hunks:
908 for hnk in item.hunks:
909 hnk.applied = False
909 hnk.applied = False
910 hnk.partial = False
910 hnk.partial = False
911 # un-apply all their hunklines
911 # un-apply all their hunklines
912 for hunkline in hnk.changedlines:
912 for hunkline in hnk.changedlines:
913 hunkline.applied = False
913 hunkline.applied = False
914 elif isinstance(item, uihunk):
914 elif isinstance(item, uihunk):
915 item.partial = False
915 item.partial = False
916 # apply all it's hunklines
916 # apply all it's hunklines
917 for hunkline in item.changedlines:
917 for hunkline in item.changedlines:
918 hunkline.applied = item.applied
918 hunkline.applied = item.applied
919
919
920 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
920 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
921 allsiblingsapplied = not (False in siblingappliedstatus)
921 allsiblingsapplied = not (False in siblingappliedstatus)
922 nosiblingsapplied = not (True in siblingappliedstatus)
922 nosiblingsapplied = not (True in siblingappliedstatus)
923
923
924 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
924 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
925 somesiblingspartial = True in siblingspartialstatus
925 somesiblingspartial = True in siblingspartialstatus
926
926
927 # cases where applied or partial should be removed from header
927 # cases where applied or partial should be removed from header
928
928
929 # if no 'sibling' hunks are applied (including this hunk)
929 # if no 'sibling' hunks are applied (including this hunk)
930 if nosiblingsapplied:
930 if nosiblingsapplied:
931 if not item.header.special():
931 if not item.header.special():
932 item.header.applied = False
932 item.header.applied = False
933 item.header.partial = False
933 item.header.partial = False
934 else: # some/all parent siblings are applied
934 else: # some/all parent siblings are applied
935 item.header.applied = True
935 item.header.applied = True
936 item.header.partial = (
936 item.header.partial = (
937 somesiblingspartial or not allsiblingsapplied
937 somesiblingspartial or not allsiblingsapplied
938 )
938 )
939
939
940 elif isinstance(item, uihunkline):
940 elif isinstance(item, uihunkline):
941 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
941 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
942 allsiblingsapplied = not (False in siblingappliedstatus)
942 allsiblingsapplied = not (False in siblingappliedstatus)
943 nosiblingsapplied = not (True in siblingappliedstatus)
943 nosiblingsapplied = not (True in siblingappliedstatus)
944
944
945 # if no 'sibling' lines are applied
945 # if no 'sibling' lines are applied
946 if nosiblingsapplied:
946 if nosiblingsapplied:
947 item.hunk.applied = False
947 item.hunk.applied = False
948 item.hunk.partial = False
948 item.hunk.partial = False
949 elif allsiblingsapplied:
949 elif allsiblingsapplied:
950 item.hunk.applied = True
950 item.hunk.applied = True
951 item.hunk.partial = False
951 item.hunk.partial = False
952 else: # some siblings applied
952 else: # some siblings applied
953 item.hunk.applied = True
953 item.hunk.applied = True
954 item.hunk.partial = True
954 item.hunk.partial = True
955
955
956 parentsiblingsapplied = [
956 parentsiblingsapplied = [
957 hnk.applied for hnk in item.hunk.header.hunks
957 hnk.applied for hnk in item.hunk.header.hunks
958 ]
958 ]
959 noparentsiblingsapplied = not (True in parentsiblingsapplied)
959 noparentsiblingsapplied = not (True in parentsiblingsapplied)
960 allparentsiblingsapplied = not (False in parentsiblingsapplied)
960 allparentsiblingsapplied = not (False in parentsiblingsapplied)
961
961
962 parentsiblingspartial = [
962 parentsiblingspartial = [
963 hnk.partial for hnk in item.hunk.header.hunks
963 hnk.partial for hnk in item.hunk.header.hunks
964 ]
964 ]
965 someparentsiblingspartial = True in parentsiblingspartial
965 someparentsiblingspartial = True in parentsiblingspartial
966
966
967 # if all parent hunks are not applied, un-apply header
967 # if all parent hunks are not applied, un-apply header
968 if noparentsiblingsapplied:
968 if noparentsiblingsapplied:
969 if not item.hunk.header.special():
969 if not item.hunk.header.special():
970 item.hunk.header.applied = False
970 item.hunk.header.applied = False
971 item.hunk.header.partial = False
971 item.hunk.header.partial = False
972 # set the applied and partial status of the header if needed
972 # set the applied and partial status of the header if needed
973 else: # some/all parent siblings are applied
973 else: # some/all parent siblings are applied
974 item.hunk.header.applied = True
974 item.hunk.header.applied = True
975 item.hunk.header.partial = (
975 item.hunk.header.partial = (
976 someparentsiblingspartial or not allparentsiblingsapplied
976 someparentsiblingspartial or not allparentsiblingsapplied
977 )
977 )
978
978
979 def toggleall(self):
979 def toggleall(self):
980 """toggle the applied flag of all items."""
980 """toggle the applied flag of all items."""
981 if self.waslasttoggleallapplied: # then unapply them this time
981 if self.waslasttoggleallapplied: # then unapply them this time
982 for item in self.headerlist:
982 for item in self.headerlist:
983 if item.applied:
983 if item.applied:
984 self.toggleapply(item)
984 self.toggleapply(item)
985 else:
985 else:
986 for item in self.headerlist:
986 for item in self.headerlist:
987 if not item.applied:
987 if not item.applied:
988 self.toggleapply(item)
988 self.toggleapply(item)
989 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
989 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
990
990
991 def flipselections(self):
991 def flipselections(self):
992 """
992 """
993 Flip all selections. Every selected line is unselected and vice
993 Flip all selections. Every selected line is unselected and vice
994 versa.
994 versa.
995 """
995 """
996 for header in self.headerlist:
996 for header in self.headerlist:
997 for hunk in header.allchildren():
997 for hunk in header.allchildren():
998 for line in hunk.allchildren():
998 for line in hunk.allchildren():
999 self.toggleapply(line)
999 self.toggleapply(line)
1000
1000
1001 def toggleallbetween(self):
1001 def toggleallbetween(self):
1002 """toggle applied on or off for all items in range [lastapplied,
1002 """toggle applied on or off for all items in range [lastapplied,
1003 current]. """
1003 current]. """
1004 if (
1004 if (
1005 not self.lastapplieditem
1005 not self.lastapplieditem
1006 or self.currentselecteditem == self.lastapplieditem
1006 or self.currentselecteditem == self.lastapplieditem
1007 ):
1007 ):
1008 # Treat this like a normal 'x'/' '
1008 # Treat this like a normal 'x'/' '
1009 self.toggleapply()
1009 self.toggleapply()
1010 return
1010 return
1011
1011
1012 startitem = self.lastapplieditem
1012 startitem = self.lastapplieditem
1013 enditem = self.currentselecteditem
1013 enditem = self.currentselecteditem
1014 # Verify that enditem is "after" startitem, otherwise swap them.
1014 # Verify that enditem is "after" startitem, otherwise swap them.
1015 for direction in [b'forward', b'reverse']:
1015 for direction in [b'forward', b'reverse']:
1016 nextitem = startitem.nextitem()
1016 nextitem = startitem.nextitem()
1017 while nextitem and nextitem != enditem:
1017 while nextitem and nextitem != enditem:
1018 nextitem = nextitem.nextitem()
1018 nextitem = nextitem.nextitem()
1019 if nextitem:
1019 if nextitem:
1020 break
1020 break
1021 # Looks like we went the wrong direction :)
1021 # Looks like we went the wrong direction :)
1022 startitem, enditem = enditem, startitem
1022 startitem, enditem = enditem, startitem
1023
1023
1024 if not nextitem:
1024 if not nextitem:
1025 # We didn't find a path going either forward or backward? Don't know
1025 # We didn't find a path going either forward or backward? Don't know
1026 # how this can happen, let's not crash though.
1026 # how this can happen, let's not crash though.
1027 return
1027 return
1028
1028
1029 nextitem = startitem
1029 nextitem = startitem
1030 # Switch all items to be the opposite state of the currently selected
1030 # Switch all items to be the opposite state of the currently selected
1031 # item. Specifically:
1031 # item. Specifically:
1032 # [ ] startitem
1032 # [ ] startitem
1033 # [x] middleitem
1033 # [x] middleitem
1034 # [ ] enditem <-- currently selected
1034 # [ ] enditem <-- currently selected
1035 # This will turn all three on, since the currently selected item is off.
1035 # This will turn all three on, since the currently selected item is off.
1036 # This does *not* invert each item (i.e. middleitem stays marked/on)
1036 # This does *not* invert each item (i.e. middleitem stays marked/on)
1037 desiredstate = not self.currentselecteditem.applied
1037 desiredstate = not self.currentselecteditem.applied
1038 while nextitem != enditem.nextitem():
1038 while nextitem != enditem.nextitem():
1039 if nextitem.applied != desiredstate:
1039 if nextitem.applied != desiredstate:
1040 self.toggleapply(item=nextitem)
1040 self.toggleapply(item=nextitem)
1041 nextitem = nextitem.nextitem()
1041 nextitem = nextitem.nextitem()
1042
1042
1043 def togglefolded(self, item=None, foldparent=False):
1043 def togglefolded(self, item=None, foldparent=False):
1044 """toggle folded flag of specified item (defaults to currently
1044 """toggle folded flag of specified item (defaults to currently
1045 selected)"""
1045 selected)"""
1046 if item is None:
1046 if item is None:
1047 item = self.currentselecteditem
1047 item = self.currentselecteditem
1048 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
1048 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
1049 if not isinstance(item, uiheader):
1049 if not isinstance(item, uiheader):
1050 # we need to select the parent item in this case
1050 # we need to select the parent item in this case
1051 self.currentselecteditem = item = item.parentitem()
1051 self.currentselecteditem = item = item.parentitem()
1052 elif item.neverunfolded:
1052 elif item.neverunfolded:
1053 item.neverunfolded = False
1053 item.neverunfolded = False
1054
1054
1055 # also fold any foldable children of the parent/current item
1055 # also fold any foldable children of the parent/current item
1056 if isinstance(item, uiheader): # the original or 'new' item
1056 if isinstance(item, uiheader): # the original or 'new' item
1057 for child in item.allchildren():
1057 for child in item.allchildren():
1058 child.folded = not item.folded
1058 child.folded = not item.folded
1059
1059
1060 if isinstance(item, (uiheader, uihunk)):
1060 if isinstance(item, (uiheader, uihunk)):
1061 item.folded = not item.folded
1061 item.folded = not item.folded
1062
1062
1063 def alignstring(self, instr, window):
1063 def alignstring(self, instr, window):
1064 """
1064 """
1065 add whitespace to the end of a string in order to make it fill
1065 add whitespace to the end of a string in order to make it fill
1066 the screen in the x direction. the current cursor position is
1066 the screen in the x direction. the current cursor position is
1067 taken into account when making this calculation. the string can span
1067 taken into account when making this calculation. the string can span
1068 multiple lines.
1068 multiple lines.
1069 """
1069 """
1070 y, xstart = window.getyx()
1070 y, xstart = window.getyx()
1071 width = self.xscreensize
1071 width = self.xscreensize
1072 # turn tabs into spaces
1072 # turn tabs into spaces
1073 instr = instr.expandtabs(4)
1073 instr = instr.expandtabs(4)
1074 strwidth = encoding.colwidth(instr)
1074 strwidth = encoding.colwidth(instr)
1075 numspaces = width - ((strwidth + xstart) % width)
1075 numspaces = width - ((strwidth + xstart) % width)
1076 return instr + b" " * numspaces
1076 return instr + b" " * numspaces
1077
1077
1078 def printstring(
1078 def printstring(
1079 self,
1079 self,
1080 window,
1080 window,
1081 text,
1081 text,
1082 fgcolor=None,
1082 fgcolor=None,
1083 bgcolor=None,
1083 bgcolor=None,
1084 pair=None,
1084 pair=None,
1085 pairname=None,
1085 pairname=None,
1086 attrlist=None,
1086 attrlist=None,
1087 towin=True,
1087 towin=True,
1088 align=True,
1088 align=True,
1089 showwhtspc=False,
1089 showwhtspc=False,
1090 ):
1090 ):
1091 """
1091 """
1092 print the string, text, with the specified colors and attributes, to
1092 print the string, text, with the specified colors and attributes, to
1093 the specified curses window object.
1093 the specified curses window object.
1094
1094
1095 the foreground and background colors are of the form
1095 the foreground and background colors are of the form
1096 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
1096 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
1097 magenta, red, white, yellow]. if pairname is provided, a color
1097 magenta, red, white, yellow]. if pairname is provided, a color
1098 pair will be looked up in the self.colorpairnames dictionary.
1098 pair will be looked up in the self.colorpairnames dictionary.
1099
1099
1100 attrlist is a list containing text attributes in the form of
1100 attrlist is a list containing text attributes in the form of
1101 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
1101 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
1102 underline].
1102 underline].
1103
1103
1104 if align == True, whitespace is added to the printed string such that
1104 if align == True, whitespace is added to the printed string such that
1105 the string stretches to the right border of the window.
1105 the string stretches to the right border of the window.
1106
1106
1107 if showwhtspc == True, trailing whitespace of a string is highlighted.
1107 if showwhtspc == True, trailing whitespace of a string is highlighted.
1108 """
1108 """
1109 # preprocess the text, converting tabs to spaces
1109 # preprocess the text, converting tabs to spaces
1110 text = text.expandtabs(4)
1110 text = text.expandtabs(4)
1111 # strip \n, and convert control characters to ^[char] representation
1111 # strip \n, and convert control characters to ^[char] representation
1112 text = re.sub(
1112 text = re.sub(
1113 br'[\x00-\x08\x0a-\x1f]',
1113 br'[\x00-\x08\x0a-\x1f]',
1114 lambda m: b'^' + pycompat.sysbytes(chr(ord(m.group()) + 64)),
1114 lambda m: b'^' + pycompat.sysbytes(chr(ord(m.group()) + 64)),
1115 text.strip(b'\n'),
1115 text.strip(b'\n'),
1116 )
1116 )
1117
1117
1118 if pair is not None:
1118 if pair is not None:
1119 colorpair = pair
1119 colorpair = pair
1120 elif pairname is not None:
1120 elif pairname is not None:
1121 colorpair = self.colorpairnames[pairname]
1121 colorpair = self.colorpairnames[pairname]
1122 else:
1122 else:
1123 if fgcolor is None:
1123 if fgcolor is None:
1124 fgcolor = -1
1124 fgcolor = -1
1125 if bgcolor is None:
1125 if bgcolor is None:
1126 bgcolor = -1
1126 bgcolor = -1
1127 if (fgcolor, bgcolor) in self.colorpairs:
1127 if (fgcolor, bgcolor) in self.colorpairs:
1128 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1128 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1129 else:
1129 else:
1130 colorpair = self.getcolorpair(fgcolor, bgcolor)
1130 colorpair = self.getcolorpair(fgcolor, bgcolor)
1131 # add attributes if possible
1131 # add attributes if possible
1132 if attrlist is None:
1132 if attrlist is None:
1133 attrlist = []
1133 attrlist = []
1134 if colorpair < 256:
1134 if colorpair < 256:
1135 # then it is safe to apply all attributes
1135 # then it is safe to apply all attributes
1136 for textattr in attrlist:
1136 for textattr in attrlist:
1137 colorpair |= textattr
1137 colorpair |= textattr
1138 else:
1138 else:
1139 # just apply a select few (safe?) attributes
1139 # just apply a select few (safe?) attributes
1140 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
1140 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
1141 if textattr in attrlist:
1141 if textattr in attrlist:
1142 colorpair |= textattr
1142 colorpair |= textattr
1143
1143
1144 y, xstart = self.chunkpad.getyx()
1144 y, xstart = self.chunkpad.getyx()
1145 t = b"" # variable for counting lines printed
1145 t = b"" # variable for counting lines printed
1146 # if requested, show trailing whitespace
1146 # if requested, show trailing whitespace
1147 if showwhtspc:
1147 if showwhtspc:
1148 origlen = len(text)
1148 origlen = len(text)
1149 text = text.rstrip(b' \n') # tabs have already been expanded
1149 text = text.rstrip(b' \n') # tabs have already been expanded
1150 strippedlen = len(text)
1150 strippedlen = len(text)
1151 numtrailingspaces = origlen - strippedlen
1151 numtrailingspaces = origlen - strippedlen
1152
1152
1153 if towin:
1153 if towin:
1154 window.addstr(text, colorpair)
1154 window.addstr(text, colorpair)
1155 t += text
1155 t += text
1156
1156
1157 if showwhtspc:
1157 if showwhtspc:
1158 wscolorpair = colorpair | curses.A_REVERSE
1158 wscolorpair = colorpair | curses.A_REVERSE
1159 if towin:
1159 if towin:
1160 for i in range(numtrailingspaces):
1160 for i in range(numtrailingspaces):
1161 window.addch(curses.ACS_CKBOARD, wscolorpair)
1161 window.addch(curses.ACS_CKBOARD, wscolorpair)
1162 t += b" " * numtrailingspaces
1162 t += b" " * numtrailingspaces
1163
1163
1164 if align:
1164 if align:
1165 if towin:
1165 if towin:
1166 extrawhitespace = self.alignstring(b"", window)
1166 extrawhitespace = self.alignstring(b"", window)
1167 window.addstr(extrawhitespace, colorpair)
1167 window.addstr(extrawhitespace, colorpair)
1168 else:
1168 else:
1169 # need to use t, since the x position hasn't incremented
1169 # need to use t, since the x position hasn't incremented
1170 extrawhitespace = self.alignstring(t, window)
1170 extrawhitespace = self.alignstring(t, window)
1171 t += extrawhitespace
1171 t += extrawhitespace
1172
1172
1173 # is reset to 0 at the beginning of printitem()
1173 # is reset to 0 at the beginning of printitem()
1174
1174
1175 linesprinted = (xstart + len(t)) // self.xscreensize
1175 linesprinted = (xstart + len(t)) // self.xscreensize
1176 self.linesprintedtopadsofar += linesprinted
1176 self.linesprintedtopadsofar += linesprinted
1177 return t
1177 return t
1178
1178
1179 def _getstatuslinesegments(self):
1179 def _getstatuslinesegments(self):
1180 """-> [str]. return segments"""
1180 """-> [str]. return segments"""
1181 selected = self.currentselecteditem.applied
1181 selected = self.currentselecteditem.applied
1182 spaceselect = _(b'space/enter: select')
1182 spaceselect = _(b'space/enter: select')
1183 spacedeselect = _(b'space/enter: deselect')
1183 spacedeselect = _(b'space/enter: deselect')
1184 # Format the selected label into a place as long as the longer of the
1184 # Format the selected label into a place as long as the longer of the
1185 # two possible labels. This may vary by language.
1185 # two possible labels. This may vary by language.
1186 spacelen = max(len(spaceselect), len(spacedeselect))
1186 spacelen = max(len(spaceselect), len(spacedeselect))
1187 selectedlabel = b'%-*s' % (
1187 selectedlabel = b'%-*s' % (
1188 spacelen,
1188 spacelen,
1189 spacedeselect if selected else spaceselect,
1189 spacedeselect if selected else spaceselect,
1190 )
1190 )
1191 segments = [
1191 segments = [
1192 _headermessages[self.operation],
1192 _headermessages[self.operation],
1193 b'-',
1193 b'-',
1194 _(b'[x]=selected **=collapsed'),
1194 _(b'[x]=selected **=collapsed'),
1195 _(b'c: confirm'),
1195 _(b'c: confirm'),
1196 _(b'q: abort'),
1196 _(b'q: abort'),
1197 _(b'arrow keys: move/expand/collapse'),
1197 _(b'arrow keys: move/expand/collapse'),
1198 selectedlabel,
1198 selectedlabel,
1199 _(b'?: help'),
1199 _(b'?: help'),
1200 ]
1200 ]
1201 return segments
1201 return segments
1202
1202
1203 def _getstatuslines(self):
1203 def _getstatuslines(self):
1204 """() -> [str]. return short help used in the top status window"""
1204 """() -> [str]. return short help used in the top status window"""
1205 if self.errorstr is not None:
1205 if self.errorstr is not None:
1206 lines = [self.errorstr, _(b'Press any key to continue')]
1206 lines = [self.errorstr, _(b'Press any key to continue')]
1207 else:
1207 else:
1208 # wrap segments to lines
1208 # wrap segments to lines
1209 segments = self._getstatuslinesegments()
1209 segments = self._getstatuslinesegments()
1210 width = self.xscreensize
1210 width = self.xscreensize
1211 lines = []
1211 lines = []
1212 lastwidth = width
1212 lastwidth = width
1213 for s in segments:
1213 for s in segments:
1214 w = encoding.colwidth(s)
1214 w = encoding.colwidth(s)
1215 sep = b' ' * (1 + (s and s[0] not in b'-['))
1215 sep = b' ' * (1 + (s and s[0] not in b'-['))
1216 if lastwidth + w + len(sep) >= width:
1216 if lastwidth + w + len(sep) >= width:
1217 lines.append(s)
1217 lines.append(s)
1218 lastwidth = w
1218 lastwidth = w
1219 else:
1219 else:
1220 lines[-1] += sep + s
1220 lines[-1] += sep + s
1221 lastwidth += w + len(sep)
1221 lastwidth += w + len(sep)
1222 if len(lines) != self.numstatuslines:
1222 if len(lines) != self.numstatuslines:
1223 self.numstatuslines = len(lines)
1223 self.numstatuslines = len(lines)
1224 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1224 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1225 return [stringutil.ellipsis(l, self.xscreensize - 1) for l in lines]
1225 return [stringutil.ellipsis(l, self.xscreensize - 1) for l in lines]
1226
1226
1227 def updatescreen(self):
1227 def updatescreen(self):
1228 self.statuswin.erase()
1228 self.statuswin.erase()
1229 self.chunkpad.erase()
1229 self.chunkpad.erase()
1230
1230
1231 printstring = self.printstring
1231 printstring = self.printstring
1232
1232
1233 # print out the status lines at the top
1233 # print out the status lines at the top
1234 try:
1234 try:
1235 for line in self._getstatuslines():
1235 for line in self._getstatuslines():
1236 printstring(self.statuswin, line, pairname=b"legend")
1236 printstring(self.statuswin, line, pairname=b"legend")
1237 self.statuswin.refresh()
1237 self.statuswin.refresh()
1238 except curses.error:
1238 except curses.error:
1239 pass
1239 pass
1240 if self.errorstr is not None:
1240 if self.errorstr is not None:
1241 return
1241 return
1242
1242
1243 # print out the patch in the remaining part of the window
1243 # print out the patch in the remaining part of the window
1244 try:
1244 try:
1245 self.printitem()
1245 self.printitem()
1246 self.updatescroll()
1246 self.updatescroll()
1247 self.chunkpad.refresh(
1247 self.chunkpad.refresh(
1248 self.firstlineofpadtoprint,
1248 self.firstlineofpadtoprint,
1249 0,
1249 0,
1250 self.numstatuslines,
1250 self.numstatuslines,
1251 0,
1251 0,
1252 self.yscreensize - self.numstatuslines,
1252 self.yscreensize - self.numstatuslines,
1253 self.xscreensize,
1253 self.xscreensize,
1254 )
1254 )
1255 except curses.error:
1255 except curses.error:
1256 pass
1256 pass
1257
1257
1258 def getstatusprefixstring(self, item):
1258 def getstatusprefixstring(self, item):
1259 """
1259 """
1260 create a string to prefix a line with which indicates whether 'item'
1260 create a string to prefix a line with which indicates whether 'item'
1261 is applied and/or folded.
1261 is applied and/or folded.
1262 """
1262 """
1263
1263
1264 # create checkbox string
1264 # create checkbox string
1265 if item.applied:
1265 if item.applied:
1266 if not isinstance(item, uihunkline) and item.partial:
1266 if not isinstance(item, uihunkline) and item.partial:
1267 checkbox = b"[~]"
1267 checkbox = b"[~]"
1268 else:
1268 else:
1269 checkbox = b"[x]"
1269 checkbox = b"[x]"
1270 else:
1270 else:
1271 checkbox = b"[ ]"
1271 checkbox = b"[ ]"
1272
1272
1273 try:
1273 try:
1274 if item.folded:
1274 if item.folded:
1275 checkbox += b"**"
1275 checkbox += b"**"
1276 if isinstance(item, uiheader):
1276 if isinstance(item, uiheader):
1277 # one of "m", "a", or "d" (modified, added, deleted)
1277 # one of "m", "a", or "d" (modified, added, deleted)
1278 filestatus = item.changetype
1278 filestatus = item.changetype
1279
1279
1280 checkbox += filestatus + b" "
1280 checkbox += filestatus + b" "
1281 else:
1281 else:
1282 checkbox += b" "
1282 checkbox += b" "
1283 if isinstance(item, uiheader):
1283 if isinstance(item, uiheader):
1284 # add two more spaces for headers
1284 # add two more spaces for headers
1285 checkbox += b" "
1285 checkbox += b" "
1286 except AttributeError: # not foldable
1286 except AttributeError: # not foldable
1287 checkbox += b" "
1287 checkbox += b" "
1288
1288
1289 return checkbox
1289 return checkbox
1290
1290
1291 def printheader(
1291 def printheader(
1292 self, header, selected=False, towin=True, ignorefolding=False
1292 self, header, selected=False, towin=True, ignorefolding=False
1293 ):
1293 ):
1294 """
1294 """
1295 print the header to the pad. if countlines is True, don't print
1295 print the header to the pad. if countlines is True, don't print
1296 anything, but just count the number of lines which would be printed.
1296 anything, but just count the number of lines which would be printed.
1297 """
1297 """
1298
1298
1299 outstr = b""
1299 outstr = b""
1300 text = header.prettystr()
1300 text = header.prettystr()
1301 chunkindex = self.chunklist.index(header)
1301 chunkindex = self.chunklist.index(header)
1302
1302
1303 if chunkindex != 0 and not header.folded:
1303 if chunkindex != 0 and not header.folded:
1304 # add separating line before headers
1304 # add separating line before headers
1305 outstr += self.printstring(
1305 outstr += self.printstring(
1306 self.chunkpad, b'_' * self.xscreensize, towin=towin, align=False
1306 self.chunkpad, b'_' * self.xscreensize, towin=towin, align=False
1307 )
1307 )
1308 # select color-pair based on if the header is selected
1308 # select color-pair based on if the header is selected
1309 colorpair = self.getcolorpair(
1309 colorpair = self.getcolorpair(
1310 name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
1310 name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
1311 )
1311 )
1312
1312
1313 # print out each line of the chunk, expanding it to screen width
1313 # print out each line of the chunk, expanding it to screen width
1314
1314
1315 # number of characters to indent lines on this level by
1315 # number of characters to indent lines on this level by
1316 indentnumchars = 0
1316 indentnumchars = 0
1317 checkbox = self.getstatusprefixstring(header)
1317 checkbox = self.getstatusprefixstring(header)
1318 if not header.folded or ignorefolding:
1318 if not header.folded or ignorefolding:
1319 textlist = text.split(b"\n")
1319 textlist = text.split(b"\n")
1320 linestr = checkbox + textlist[0]
1320 linestr = checkbox + textlist[0]
1321 else:
1321 else:
1322 linestr = checkbox + header.filename()
1322 linestr = checkbox + header.filename()
1323 outstr += self.printstring(
1323 outstr += self.printstring(
1324 self.chunkpad, linestr, pair=colorpair, towin=towin
1324 self.chunkpad, linestr, pair=colorpair, towin=towin
1325 )
1325 )
1326 if not header.folded or ignorefolding:
1326 if not header.folded or ignorefolding:
1327 if len(textlist) > 1:
1327 if len(textlist) > 1:
1328 for line in textlist[1:]:
1328 for line in textlist[1:]:
1329 linestr = b" " * (indentnumchars + len(checkbox)) + line
1329 linestr = b" " * (indentnumchars + len(checkbox)) + line
1330 outstr += self.printstring(
1330 outstr += self.printstring(
1331 self.chunkpad, linestr, pair=colorpair, towin=towin
1331 self.chunkpad, linestr, pair=colorpair, towin=towin
1332 )
1332 )
1333
1333
1334 return outstr
1334 return outstr
1335
1335
1336 def printhunklinesbefore(
1336 def printhunklinesbefore(
1337 self, hunk, selected=False, towin=True, ignorefolding=False
1337 self, hunk, selected=False, towin=True, ignorefolding=False
1338 ):
1338 ):
1339 """includes start/end line indicator"""
1339 """includes start/end line indicator"""
1340 outstr = b""
1340 outstr = b""
1341 # where hunk is in list of siblings
1341 # where hunk is in list of siblings
1342 hunkindex = hunk.header.hunks.index(hunk)
1342 hunkindex = hunk.header.hunks.index(hunk)
1343
1343
1344 if hunkindex != 0:
1344 if hunkindex != 0:
1345 # add separating line before headers
1345 # add separating line before headers
1346 outstr += self.printstring(
1346 outstr += self.printstring(
1347 self.chunkpad, b' ' * self.xscreensize, towin=towin, align=False
1347 self.chunkpad, b' ' * self.xscreensize, towin=towin, align=False
1348 )
1348 )
1349
1349
1350 colorpair = self.getcolorpair(
1350 colorpair = self.getcolorpair(
1351 name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
1351 name=selected and b"selected" or b"normal", attrlist=[curses.A_BOLD]
1352 )
1352 )
1353
1353
1354 # print out from-to line with checkbox
1354 # print out from-to line with checkbox
1355 checkbox = self.getstatusprefixstring(hunk)
1355 checkbox = self.getstatusprefixstring(hunk)
1356
1356
1357 lineprefix = b" " * self.hunkindentnumchars + checkbox
1357 lineprefix = b" " * self.hunkindentnumchars + checkbox
1358 frtoline = b" " + hunk.getfromtoline().strip(b"\n")
1358 frtoline = b" " + hunk.getfromtoline().strip(b"\n")
1359
1359
1360 outstr += self.printstring(
1360 outstr += self.printstring(
1361 self.chunkpad, lineprefix, towin=towin, align=False
1361 self.chunkpad, lineprefix, towin=towin, align=False
1362 ) # add uncolored checkbox/indent
1362 ) # add uncolored checkbox/indent
1363 outstr += self.printstring(
1363 outstr += self.printstring(
1364 self.chunkpad, frtoline, pair=colorpair, towin=towin
1364 self.chunkpad, frtoline, pair=colorpair, towin=towin
1365 )
1365 )
1366
1366
1367 if hunk.folded and not ignorefolding:
1367 if hunk.folded and not ignorefolding:
1368 # skip remainder of output
1368 # skip remainder of output
1369 return outstr
1369 return outstr
1370
1370
1371 # print out lines of the chunk preceeding changed-lines
1371 # print out lines of the chunk preceeding changed-lines
1372 for line in hunk.before:
1372 for line in hunk.before:
1373 linestr = (
1373 linestr = (
1374 b" " * (self.hunklineindentnumchars + len(checkbox)) + line
1374 b" " * (self.hunklineindentnumchars + len(checkbox)) + line
1375 )
1375 )
1376 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1376 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1377
1377
1378 return outstr
1378 return outstr
1379
1379
1380 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1380 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1381 outstr = b""
1381 outstr = b""
1382 if hunk.folded and not ignorefolding:
1382 if hunk.folded and not ignorefolding:
1383 return outstr
1383 return outstr
1384
1384
1385 # a bit superfluous, but to avoid hard-coding indent amount
1385 # a bit superfluous, but to avoid hard-coding indent amount
1386 checkbox = self.getstatusprefixstring(hunk)
1386 checkbox = self.getstatusprefixstring(hunk)
1387 for line in hunk.after:
1387 for line in hunk.after:
1388 linestr = (
1388 linestr = (
1389 b" " * (self.hunklineindentnumchars + len(checkbox)) + line
1389 b" " * (self.hunklineindentnumchars + len(checkbox)) + line
1390 )
1390 )
1391 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1391 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1392
1392
1393 return outstr
1393 return outstr
1394
1394
1395 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1395 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1396 outstr = b""
1396 outstr = b""
1397 checkbox = self.getstatusprefixstring(hunkline)
1397 checkbox = self.getstatusprefixstring(hunkline)
1398
1398
1399 linestr = hunkline.prettystr().strip(b"\n")
1399 linestr = hunkline.prettystr().strip(b"\n")
1400
1400
1401 # select color-pair based on whether line is an addition/removal
1401 # select color-pair based on whether line is an addition/removal
1402 if selected:
1402 if selected:
1403 colorpair = self.getcolorpair(name=b"selected")
1403 colorpair = self.getcolorpair(name=b"selected")
1404 elif linestr.startswith(b"+"):
1404 elif linestr.startswith(b"+"):
1405 colorpair = self.getcolorpair(name=b"addition")
1405 colorpair = self.getcolorpair(name=b"addition")
1406 elif linestr.startswith(b"-"):
1406 elif linestr.startswith(b"-"):
1407 colorpair = self.getcolorpair(name=b"deletion")
1407 colorpair = self.getcolorpair(name=b"deletion")
1408 elif linestr.startswith(b"\\"):
1408 elif linestr.startswith(b"\\"):
1409 colorpair = self.getcolorpair(name=b"normal")
1409 colorpair = self.getcolorpair(name=b"normal")
1410
1410
1411 lineprefix = b" " * self.hunklineindentnumchars + checkbox
1411 lineprefix = b" " * self.hunklineindentnumchars + checkbox
1412 outstr += self.printstring(
1412 outstr += self.printstring(
1413 self.chunkpad, lineprefix, towin=towin, align=False
1413 self.chunkpad, lineprefix, towin=towin, align=False
1414 ) # add uncolored checkbox/indent
1414 ) # add uncolored checkbox/indent
1415 outstr += self.printstring(
1415 outstr += self.printstring(
1416 self.chunkpad, linestr, pair=colorpair, towin=towin, showwhtspc=True
1416 self.chunkpad, linestr, pair=colorpair, towin=towin, showwhtspc=True
1417 )
1417 )
1418 return outstr
1418 return outstr
1419
1419
1420 def printitem(
1420 def printitem(
1421 self, item=None, ignorefolding=False, recursechildren=True, towin=True
1421 self, item=None, ignorefolding=False, recursechildren=True, towin=True
1422 ):
1422 ):
1423 """
1423 """
1424 use __printitem() to print the the specified item.applied.
1424 use __printitem() to print the the specified item.applied.
1425 if item is not specified, then print the entire patch.
1425 if item is not specified, then print the entire patch.
1426 (hiding folded elements, etc. -- see __printitem() docstring)
1426 (hiding folded elements, etc. -- see __printitem() docstring)
1427 """
1427 """
1428
1428
1429 if item is None:
1429 if item is None:
1430 item = self.headerlist
1430 item = self.headerlist
1431 if recursechildren:
1431 if recursechildren:
1432 self.linesprintedtopadsofar = 0
1432 self.linesprintedtopadsofar = 0
1433
1433
1434 outstr = []
1434 outstr = []
1435 self.__printitem(
1435 self.__printitem(
1436 item, ignorefolding, recursechildren, outstr, towin=towin
1436 item, ignorefolding, recursechildren, outstr, towin=towin
1437 )
1437 )
1438 return b''.join(outstr)
1438 return b''.join(outstr)
1439
1439
1440 def outofdisplayedarea(self):
1440 def outofdisplayedarea(self):
1441 y, _ = self.chunkpad.getyx() # cursor location
1441 y, _ = self.chunkpad.getyx() # cursor location
1442 # * 2 here works but an optimization would be the max number of
1442 # * 2 here works but an optimization would be the max number of
1443 # consecutive non selectable lines
1443 # consecutive non selectable lines
1444 # i.e the max number of context line for any hunk in the patch
1444 # i.e the max number of context line for any hunk in the patch
1445 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1445 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1446 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1446 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1447 return y < miny or y > maxy
1447 return y < miny or y > maxy
1448
1448
1449 def handleselection(self, item, recursechildren):
1449 def handleselection(self, item, recursechildren):
1450 selected = item is self.currentselecteditem
1450 selected = item is self.currentselecteditem
1451 if selected and recursechildren:
1451 if selected and recursechildren:
1452 # assumes line numbering starting from line 0
1452 # assumes line numbering starting from line 0
1453 self.selecteditemstartline = self.linesprintedtopadsofar
1453 self.selecteditemstartline = self.linesprintedtopadsofar
1454 selecteditemlines = self.getnumlinesdisplayed(
1454 selecteditemlines = self.getnumlinesdisplayed(
1455 item, recursechildren=False
1455 item, recursechildren=False
1456 )
1456 )
1457 self.selecteditemendline = (
1457 self.selecteditemendline = (
1458 self.selecteditemstartline + selecteditemlines - 1
1458 self.selecteditemstartline + selecteditemlines - 1
1459 )
1459 )
1460 return selected
1460 return selected
1461
1461
1462 def __printitem(
1462 def __printitem(
1463 self, item, ignorefolding, recursechildren, outstr, towin=True
1463 self, item, ignorefolding, recursechildren, outstr, towin=True
1464 ):
1464 ):
1465 """
1465 """
1466 recursive method for printing out patch/header/hunk/hunk-line data to
1466 recursive method for printing out patch/header/hunk/hunk-line data to
1467 screen. also returns a string with all of the content of the displayed
1467 screen. also returns a string with all of the content of the displayed
1468 patch (not including coloring, etc.).
1468 patch (not including coloring, etc.).
1469
1469
1470 if ignorefolding is True, then folded items are printed out.
1470 if ignorefolding is True, then folded items are printed out.
1471
1471
1472 if recursechildren is False, then only print the item without its
1472 if recursechildren is False, then only print the item without its
1473 child items.
1473 child items.
1474 """
1474 """
1475
1475
1476 if towin and self.outofdisplayedarea():
1476 if towin and self.outofdisplayedarea():
1477 return
1477 return
1478
1478
1479 selected = self.handleselection(item, recursechildren)
1479 selected = self.handleselection(item, recursechildren)
1480
1480
1481 # patch object is a list of headers
1481 # patch object is a list of headers
1482 if isinstance(item, patch):
1482 if isinstance(item, patch):
1483 if recursechildren:
1483 if recursechildren:
1484 for hdr in item:
1484 for hdr in item:
1485 self.__printitem(
1485 self.__printitem(
1486 hdr, ignorefolding, recursechildren, outstr, towin
1486 hdr, ignorefolding, recursechildren, outstr, towin
1487 )
1487 )
1488 # todo: eliminate all isinstance() calls
1488 # todo: eliminate all isinstance() calls
1489 if isinstance(item, uiheader):
1489 if isinstance(item, uiheader):
1490 outstr.append(
1490 outstr.append(
1491 self.printheader(
1491 self.printheader(
1492 item, selected, towin=towin, ignorefolding=ignorefolding
1492 item, selected, towin=towin, ignorefolding=ignorefolding
1493 )
1493 )
1494 )
1494 )
1495 if recursechildren:
1495 if recursechildren:
1496 for hnk in item.hunks:
1496 for hnk in item.hunks:
1497 self.__printitem(
1497 self.__printitem(
1498 hnk, ignorefolding, recursechildren, outstr, towin
1498 hnk, ignorefolding, recursechildren, outstr, towin
1499 )
1499 )
1500 elif isinstance(item, uihunk) and (
1500 elif isinstance(item, uihunk) and (
1501 (not item.header.folded) or ignorefolding
1501 (not item.header.folded) or ignorefolding
1502 ):
1502 ):
1503 # print the hunk data which comes before the changed-lines
1503 # print the hunk data which comes before the changed-lines
1504 outstr.append(
1504 outstr.append(
1505 self.printhunklinesbefore(
1505 self.printhunklinesbefore(
1506 item, selected, towin=towin, ignorefolding=ignorefolding
1506 item, selected, towin=towin, ignorefolding=ignorefolding
1507 )
1507 )
1508 )
1508 )
1509 if recursechildren:
1509 if recursechildren:
1510 for l in item.changedlines:
1510 for l in item.changedlines:
1511 self.__printitem(
1511 self.__printitem(
1512 l, ignorefolding, recursechildren, outstr, towin
1512 l, ignorefolding, recursechildren, outstr, towin
1513 )
1513 )
1514 outstr.append(
1514 outstr.append(
1515 self.printhunklinesafter(
1515 self.printhunklinesafter(
1516 item, towin=towin, ignorefolding=ignorefolding
1516 item, towin=towin, ignorefolding=ignorefolding
1517 )
1517 )
1518 )
1518 )
1519 elif isinstance(item, uihunkline) and (
1519 elif isinstance(item, uihunkline) and (
1520 (not item.hunk.folded) or ignorefolding
1520 (not item.hunk.folded) or ignorefolding
1521 ):
1521 ):
1522 outstr.append(
1522 outstr.append(
1523 self.printhunkchangedline(item, selected, towin=towin)
1523 self.printhunkchangedline(item, selected, towin=towin)
1524 )
1524 )
1525
1525
1526 return outstr
1526 return outstr
1527
1527
1528 def getnumlinesdisplayed(
1528 def getnumlinesdisplayed(
1529 self, item=None, ignorefolding=False, recursechildren=True
1529 self, item=None, ignorefolding=False, recursechildren=True
1530 ):
1530 ):
1531 """
1531 """
1532 return the number of lines which would be displayed if the item were
1532 return the number of lines which would be displayed if the item were
1533 to be printed to the display. the item will not be printed to the
1533 to be printed to the display. the item will not be printed to the
1534 display (pad).
1534 display (pad).
1535 if no item is given, assume the entire patch.
1535 if no item is given, assume the entire patch.
1536 if ignorefolding is True, folded items will be unfolded when counting
1536 if ignorefolding is True, folded items will be unfolded when counting
1537 the number of lines.
1537 the number of lines.
1538 """
1538 """
1539
1539
1540 # temporarily disable printing to windows by printstring
1540 # temporarily disable printing to windows by printstring
1541 patchdisplaystring = self.printitem(
1541 patchdisplaystring = self.printitem(
1542 item, ignorefolding, recursechildren, towin=False
1542 item, ignorefolding, recursechildren, towin=False
1543 )
1543 )
1544 numlines = len(patchdisplaystring) // self.xscreensize
1544 numlines = len(patchdisplaystring) // self.xscreensize
1545 return numlines
1545 return numlines
1546
1546
1547 def sigwinchhandler(self, n, frame):
1547 def sigwinchhandler(self, n, frame):
1548 """handle window resizing"""
1548 """handle window resizing"""
1549 try:
1549 try:
1550 curses.endwin()
1550 curses.endwin()
1551 self.xscreensize, self.yscreensize = scmutil.termsize(self.ui)
1551 self.xscreensize, self.yscreensize = scmutil.termsize(self.ui)
1552 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1552 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1553 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1553 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1554 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1554 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1555 except curses.error:
1555 except curses.error:
1556 pass
1556 pass
1557
1557
1558 def getcolorpair(
1558 def getcolorpair(
1559 self, fgcolor=None, bgcolor=None, name=None, attrlist=None
1559 self, fgcolor=None, bgcolor=None, name=None, attrlist=None
1560 ):
1560 ):
1561 """
1561 """
1562 get a curses color pair, adding it to self.colorpairs if it is not
1562 get a curses color pair, adding it to self.colorpairs if it is not
1563 already defined. an optional string, name, can be passed as a shortcut
1563 already defined. an optional string, name, can be passed as a shortcut
1564 for referring to the color-pair. by default, if no arguments are
1564 for referring to the color-pair. by default, if no arguments are
1565 specified, the white foreground / black background color-pair is
1565 specified, the white foreground / black background color-pair is
1566 returned.
1566 returned.
1567
1567
1568 it is expected that this function will be used exclusively for
1568 it is expected that this function will be used exclusively for
1569 initializing color pairs, and not curses.init_pair().
1569 initializing color pairs, and not curses.init_pair().
1570
1570
1571 attrlist is used to 'flavor' the returned color-pair. this information
1571 attrlist is used to 'flavor' the returned color-pair. this information
1572 is not stored in self.colorpairs. it contains attribute values like
1572 is not stored in self.colorpairs. it contains attribute values like
1573 curses.A_BOLD.
1573 curses.A_BOLD.
1574 """
1574 """
1575
1575
1576 if (name is not None) and name in self.colorpairnames:
1576 if (name is not None) and name in self.colorpairnames:
1577 # then get the associated color pair and return it
1577 # then get the associated color pair and return it
1578 colorpair = self.colorpairnames[name]
1578 colorpair = self.colorpairnames[name]
1579 else:
1579 else:
1580 if fgcolor is None:
1580 if fgcolor is None:
1581 fgcolor = -1
1581 fgcolor = -1
1582 if bgcolor is None:
1582 if bgcolor is None:
1583 bgcolor = -1
1583 bgcolor = -1
1584 if (fgcolor, bgcolor) in self.colorpairs:
1584 if (fgcolor, bgcolor) in self.colorpairs:
1585 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1585 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1586 else:
1586 else:
1587 pairindex = len(self.colorpairs) + 1
1587 pairindex = len(self.colorpairs) + 1
1588 if self.usecolor:
1588 if self.usecolor:
1589 curses.init_pair(pairindex, fgcolor, bgcolor)
1589 curses.init_pair(pairindex, fgcolor, bgcolor)
1590 colorpair = self.colorpairs[
1590 colorpair = self.colorpairs[
1591 (fgcolor, bgcolor)
1591 (fgcolor, bgcolor)
1592 ] = curses.color_pair(pairindex)
1592 ] = curses.color_pair(pairindex)
1593 if name is not None:
1593 if name is not None:
1594 self.colorpairnames[name] = curses.color_pair(pairindex)
1594 self.colorpairnames[name] = curses.color_pair(pairindex)
1595 else:
1595 else:
1596 cval = 0
1596 cval = 0
1597 if name is not None:
1597 if name is not None:
1598 if name == b'selected':
1598 if name == b'selected':
1599 cval = curses.A_REVERSE
1599 cval = curses.A_REVERSE
1600 self.colorpairnames[name] = cval
1600 self.colorpairnames[name] = cval
1601 colorpair = self.colorpairs[(fgcolor, bgcolor)] = cval
1601 colorpair = self.colorpairs[(fgcolor, bgcolor)] = cval
1602
1602
1603 # add attributes if possible
1603 # add attributes if possible
1604 if attrlist is None:
1604 if attrlist is None:
1605 attrlist = []
1605 attrlist = []
1606 if colorpair < 256:
1606 if colorpair < 256:
1607 # then it is safe to apply all attributes
1607 # then it is safe to apply all attributes
1608 for textattr in attrlist:
1608 for textattr in attrlist:
1609 colorpair |= textattr
1609 colorpair |= textattr
1610 else:
1610 else:
1611 # just apply a select few (safe?) attributes
1611 # just apply a select few (safe?) attributes
1612 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1612 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1613 if textattrib in attrlist:
1613 if textattrib in attrlist:
1614 colorpair |= textattrib
1614 colorpair |= textattrib
1615 return colorpair
1615 return colorpair
1616
1616
1617 def initcolorpair(self, *args, **kwargs):
1617 def initcolorpair(self, *args, **kwargs):
1618 """same as getcolorpair."""
1618 """same as getcolorpair."""
1619 self.getcolorpair(*args, **kwargs)
1619 self.getcolorpair(*args, **kwargs)
1620
1620
1621 def helpwindow(self):
1621 def helpwindow(self):
1622 """print a help window to the screen. exit after any keypress."""
1622 """print a help window to the screen. exit after any keypress."""
1623 helptext = _(
1623 helptext = _(
1624 """ [press any key to return to the patch-display]
1624 """ [press any key to return to the patch-display]
1625
1625
1626 The curses hunk selector allows you to interactively choose among the
1626 The curses hunk selector allows you to interactively choose among the
1627 changes you have made, and confirm only those changes you select for
1627 changes you have made, and confirm only those changes you select for
1628 further processing by the command you are running (such as commit,
1628 further processing by the command you are running (such as commit,
1629 shelve, or revert). After confirming the selected changes, the
1629 shelve, or revert). After confirming the selected changes, the
1630 unselected changes are still present in your working copy, so you can
1630 unselected changes are still present in your working copy, so you can
1631 use the hunk selector multiple times to split large changes into
1631 use the hunk selector multiple times to split large changes into
1632 smaller changesets. the following are valid keystrokes:
1632 smaller changesets. the following are valid keystrokes:
1633
1633
1634 x [space] : (un-)select item ([~]/[x] = partly/fully applied)
1634 x [space] : (un-)select item ([~]/[x] = partly/fully applied)
1635 [enter] : (un-)select item and go to next item of same type
1635 [enter] : (un-)select item and go to next item of same type
1636 A : (un-)select all items
1636 A : (un-)select all items
1637 X : (un-)select all items between current and most-recent
1637 X : (un-)select all items between current and most-recent
1638 up/down-arrow [k/j] : go to previous/next unfolded item
1638 up/down-arrow [k/j] : go to previous/next unfolded item
1639 pgup/pgdn [K/J] : go to previous/next item of same type
1639 pgup/pgdn [K/J] : go to previous/next item of same type
1640 right/left-arrow [l/h] : go to child item / parent item
1640 right/left-arrow [l/h] : go to child item / parent item
1641 shift-left-arrow [H] : go to parent header / fold selected header
1641 shift-left-arrow [H] : go to parent header / fold selected header
1642 g : go to the top
1642 g : go to the top
1643 G : go to the bottom
1643 G : go to the bottom
1644 f : fold / unfold item, hiding/revealing its children
1644 f : fold / unfold item, hiding/revealing its children
1645 F : fold / unfold parent item and all of its ancestors
1645 F : fold / unfold parent item and all of its ancestors
1646 ctrl-l : scroll the selected line to the top of the screen
1646 ctrl-l : scroll the selected line to the top of the screen
1647 m : edit / resume editing the commit message
1647 m : edit / resume editing the commit message
1648 e : edit the currently selected hunk
1648 e : edit the currently selected hunk
1649 a : toggle all selections
1649 a : toggle all selections
1650 c : confirm selected changes
1650 c : confirm selected changes
1651 r : review/edit and confirm selected changes
1651 r : review/edit and confirm selected changes
1652 q : quit without confirming (no changes will be made)
1652 q : quit without confirming (no changes will be made)
1653 ? : help (what you're currently reading)"""
1653 ? : help (what you're currently reading)"""
1654 )
1654 )
1655
1655
1656 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1656 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1657 helplines = helptext.split(b"\n")
1657 helplines = helptext.split(b"\n")
1658 helplines = helplines + [b" "] * (
1658 helplines = helplines + [b" "] * (
1659 self.yscreensize - self.numstatuslines - len(helplines) - 1
1659 self.yscreensize - self.numstatuslines - len(helplines) - 1
1660 )
1660 )
1661 try:
1661 try:
1662 for line in helplines:
1662 for line in helplines:
1663 self.printstring(helpwin, line, pairname=b"legend")
1663 self.printstring(helpwin, line, pairname=b"legend")
1664 except curses.error:
1664 except curses.error:
1665 pass
1665 pass
1666 helpwin.refresh()
1666 helpwin.refresh()
1667 try:
1667 try:
1668 with self.ui.timeblockedsection(b'crecord'):
1668 with self.ui.timeblockedsection(b'crecord'):
1669 helpwin.getkey()
1669 helpwin.getkey()
1670 except curses.error:
1670 except curses.error:
1671 pass
1671 pass
1672
1672
1673 def commitMessageWindow(self):
1673 def commitMessageWindow(self):
1674 """Create a temporary commit message editing window on the screen."""
1674 """Create a temporary commit message editing window on the screen."""
1675
1675
1676 curses.raw()
1676 curses.raw()
1677 curses.def_prog_mode()
1677 curses.def_prog_mode()
1678 curses.endwin()
1678 curses.endwin()
1679 self.commenttext = self.ui.edit(self.commenttext, self.ui.username())
1679 self.commenttext = self.ui.edit(self.commenttext, self.ui.username())
1680 curses.cbreak()
1680 curses.cbreak()
1681 self.stdscr.refresh()
1681 self.stdscr.refresh()
1682 self.stdscr.keypad(1) # allow arrow-keys to continue to function
1682 self.stdscr.keypad(1) # allow arrow-keys to continue to function
1683
1683
1684 def handlefirstlineevent(self):
1684 def handlefirstlineevent(self):
1685 """
1685 """
1686 Handle 'g' to navigate to the top most file in the ncurses window.
1686 Handle 'g' to navigate to the top most file in the ncurses window.
1687 """
1687 """
1688 self.currentselecteditem = self.headerlist[0]
1688 self.currentselecteditem = self.headerlist[0]
1689 currentitem = self.currentselecteditem
1689 currentitem = self.currentselecteditem
1690 # select the parent item recursively until we're at a header
1690 # select the parent item recursively until we're at a header
1691 while True:
1691 while True:
1692 nextitem = currentitem.parentitem()
1692 nextitem = currentitem.parentitem()
1693 if nextitem is None:
1693 if nextitem is None:
1694 break
1694 break
1695 else:
1695 else:
1696 currentitem = nextitem
1696 currentitem = nextitem
1697
1697
1698 self.currentselecteditem = currentitem
1698 self.currentselecteditem = currentitem
1699
1699
1700 def handlelastlineevent(self):
1700 def handlelastlineevent(self):
1701 """
1701 """
1702 Handle 'G' to navigate to the bottom most file/hunk/line depending
1702 Handle 'G' to navigate to the bottom most file/hunk/line depending
1703 on the whether the fold is active or not.
1703 on the whether the fold is active or not.
1704
1704
1705 If the bottom most file is folded, it navigates to that file and
1705 If the bottom most file is folded, it navigates to that file and
1706 stops there. If the bottom most file is unfolded, it navigates to
1706 stops there. If the bottom most file is unfolded, it navigates to
1707 the bottom most hunk in that file and stops there. If the bottom most
1707 the bottom most hunk in that file and stops there. If the bottom most
1708 hunk is unfolded, it navigates to the bottom most line in that hunk.
1708 hunk is unfolded, it navigates to the bottom most line in that hunk.
1709 """
1709 """
1710 currentitem = self.currentselecteditem
1710 currentitem = self.currentselecteditem
1711 nextitem = currentitem.nextitem()
1711 nextitem = currentitem.nextitem()
1712 # select the child item recursively until we're at a footer
1712 # select the child item recursively until we're at a footer
1713 while nextitem is not None:
1713 while nextitem is not None:
1714 nextitem = currentitem.nextitem()
1714 nextitem = currentitem.nextitem()
1715 if nextitem is None:
1715 if nextitem is None:
1716 break
1716 break
1717 else:
1717 else:
1718 currentitem = nextitem
1718 currentitem = nextitem
1719
1719
1720 self.currentselecteditem = currentitem
1720 self.currentselecteditem = currentitem
1721 self.recenterdisplayedarea()
1721 self.recenterdisplayedarea()
1722
1722
1723 def confirmationwindow(self, windowtext):
1723 def confirmationwindow(self, windowtext):
1724 """display an informational window, then wait for and return a
1724 """display an informational window, then wait for and return a
1725 keypress."""
1725 keypress."""
1726
1726
1727 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1727 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1728 try:
1728 try:
1729 lines = windowtext.split(b"\n")
1729 lines = windowtext.split(b"\n")
1730 for line in lines:
1730 for line in lines:
1731 self.printstring(confirmwin, line, pairname=b"selected")
1731 self.printstring(confirmwin, line, pairname=b"selected")
1732 except curses.error:
1732 except curses.error:
1733 pass
1733 pass
1734 self.stdscr.refresh()
1734 self.stdscr.refresh()
1735 confirmwin.refresh()
1735 confirmwin.refresh()
1736 try:
1736 try:
1737 with self.ui.timeblockedsection(b'crecord'):
1737 with self.ui.timeblockedsection(b'crecord'):
1738 response = chr(self.stdscr.getch())
1738 response = chr(self.stdscr.getch())
1739 except ValueError:
1739 except ValueError:
1740 response = None
1740 response = None
1741
1741
1742 return response
1742 return response
1743
1743
1744 def reviewcommit(self):
1744 def reviewcommit(self):
1745 """ask for 'y' to be pressed to confirm selected. return True if
1745 """ask for 'y' to be pressed to confirm selected. return True if
1746 confirmed."""
1746 confirmed."""
1747 confirmtext = _(
1747 confirmtext = _(
1748 """If you answer yes to the following, your currently chosen patch chunks
1748 """If you answer yes to the following, your currently chosen patch chunks
1749 will be loaded into an editor. To modify the patch, make the changes in your
1749 will be loaded into an editor. To modify the patch, make the changes in your
1750 editor and save. To accept the current patch as-is, close the editor without
1750 editor and save. To accept the current patch as-is, close the editor without
1751 saving.
1751 saving.
1752
1752
1753 note: don't add/remove lines unless you also modify the range information.
1753 note: don't add/remove lines unless you also modify the range information.
1754 failing to follow this rule will result in the commit aborting.
1754 failing to follow this rule will result in the commit aborting.
1755
1755
1756 are you sure you want to review/edit and confirm the selected changes [yn]?
1756 are you sure you want to review/edit and confirm the selected changes [yn]?
1757 """
1757 """
1758 )
1758 )
1759 with self.ui.timeblockedsection(b'crecord'):
1759 with self.ui.timeblockedsection(b'crecord'):
1760 response = self.confirmationwindow(confirmtext)
1760 response = self.confirmationwindow(confirmtext)
1761 if response is None:
1761 if response is None:
1762 response = "n"
1762 response = "n"
1763 if response.lower().startswith("y"):
1763 if response.lower().startswith("y"):
1764 return True
1764 return True
1765 else:
1765 else:
1766 return False
1766 return False
1767
1767
1768 def recenterdisplayedarea(self):
1768 def recenterdisplayedarea(self):
1769 """
1769 """
1770 once we scrolled with pg up pg down we can be pointing outside of the
1770 once we scrolled with pg up pg down we can be pointing outside of the
1771 display zone. we print the patch with towin=False to compute the
1771 display zone. we print the patch with towin=False to compute the
1772 location of the selected item even though it is outside of the displayed
1772 location of the selected item even though it is outside of the displayed
1773 zone and then update the scroll.
1773 zone and then update the scroll.
1774 """
1774 """
1775 self.printitem(towin=False)
1775 self.printitem(towin=False)
1776 self.updatescroll()
1776 self.updatescroll()
1777
1777
1778 def toggleedit(self, item=None, test=False):
1778 def toggleedit(self, item=None, test=False):
1779 """
1779 """
1780 edit the currently selected chunk
1780 edit the currently selected chunk
1781 """
1781 """
1782
1782
1783 def updateui(self):
1783 def updateui(self):
1784 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1784 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1785 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1785 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1786 self.updatescroll()
1786 self.updatescroll()
1787 self.stdscr.refresh()
1787 self.stdscr.refresh()
1788 self.statuswin.refresh()
1788 self.statuswin.refresh()
1789 self.stdscr.keypad(1)
1789 self.stdscr.keypad(1)
1790
1790
1791 def editpatchwitheditor(self, chunk):
1791 def editpatchwitheditor(self, chunk):
1792 if chunk is None:
1792 if chunk is None:
1793 self.ui.write(_(b'cannot edit patch for whole file'))
1793 self.ui.write(_(b'cannot edit patch for whole file'))
1794 self.ui.write(b"\n")
1794 self.ui.write(b"\n")
1795 return None
1795 return None
1796 if chunk.header.binary():
1796 if chunk.header.binary():
1797 self.ui.write(_(b'cannot edit patch for binary file'))
1797 self.ui.write(_(b'cannot edit patch for binary file'))
1798 self.ui.write(b"\n")
1798 self.ui.write(b"\n")
1799 return None
1799 return None
1800
1800
1801 # write the initial patch
1801 # write the initial patch
1802 patch = stringio()
1802 patch = stringio()
1803 patch.write(diffhelptext + hunkhelptext)
1803 patch.write(diffhelptext + hunkhelptext)
1804 chunk.header.write(patch)
1804 chunk.header.write(patch)
1805 chunk.write(patch)
1805 chunk.write(patch)
1806
1806
1807 # start the editor and wait for it to complete
1807 # start the editor and wait for it to complete
1808 try:
1808 try:
1809 patch = self.ui.edit(patch.getvalue(), b"", action=b"diff")
1809 patch = self.ui.edit(patch.getvalue(), b"", action=b"diff")
1810 except error.Abort as exc:
1810 except error.Abort as exc:
1811 self.errorstr = stringutil.forcebytestr(exc)
1811 self.errorstr = exc.message
1812 return None
1812 return None
1813 finally:
1813 finally:
1814 self.stdscr.clear()
1814 self.stdscr.clear()
1815 self.stdscr.refresh()
1815 self.stdscr.refresh()
1816
1816
1817 # remove comment lines
1817 # remove comment lines
1818 patch = [
1818 patch = [
1819 line + b'\n'
1819 line + b'\n'
1820 for line in patch.splitlines()
1820 for line in patch.splitlines()
1821 if not line.startswith(b'#')
1821 if not line.startswith(b'#')
1822 ]
1822 ]
1823 return patchmod.parsepatch(patch)
1823 return patchmod.parsepatch(patch)
1824
1824
1825 if item is None:
1825 if item is None:
1826 item = self.currentselecteditem
1826 item = self.currentselecteditem
1827 if isinstance(item, uiheader):
1827 if isinstance(item, uiheader):
1828 return
1828 return
1829 if isinstance(item, uihunkline):
1829 if isinstance(item, uihunkline):
1830 item = item.parentitem()
1830 item = item.parentitem()
1831 if not isinstance(item, uihunk):
1831 if not isinstance(item, uihunk):
1832 return
1832 return
1833
1833
1834 # To go back to that hunk or its replacement at the end of the edit
1834 # To go back to that hunk or its replacement at the end of the edit
1835 itemindex = item.parentitem().hunks.index(item)
1835 itemindex = item.parentitem().hunks.index(item)
1836
1836
1837 beforeadded, beforeremoved = item.added, item.removed
1837 beforeadded, beforeremoved = item.added, item.removed
1838 newpatches = editpatchwitheditor(self, item)
1838 newpatches = editpatchwitheditor(self, item)
1839 if newpatches is None:
1839 if newpatches is None:
1840 if not test:
1840 if not test:
1841 updateui(self)
1841 updateui(self)
1842 return
1842 return
1843 header = item.header
1843 header = item.header
1844 editedhunkindex = header.hunks.index(item)
1844 editedhunkindex = header.hunks.index(item)
1845 hunksbefore = header.hunks[:editedhunkindex]
1845 hunksbefore = header.hunks[:editedhunkindex]
1846 hunksafter = header.hunks[editedhunkindex + 1 :]
1846 hunksafter = header.hunks[editedhunkindex + 1 :]
1847 newpatchheader = newpatches[0]
1847 newpatchheader = newpatches[0]
1848 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1848 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1849 newadded = sum([h.added for h in newhunks])
1849 newadded = sum([h.added for h in newhunks])
1850 newremoved = sum([h.removed for h in newhunks])
1850 newremoved = sum([h.removed for h in newhunks])
1851 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1851 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1852
1852
1853 for h in hunksafter:
1853 for h in hunksafter:
1854 h.toline += offset
1854 h.toline += offset
1855 for h in newhunks:
1855 for h in newhunks:
1856 h.folded = False
1856 h.folded = False
1857 header.hunks = hunksbefore + newhunks + hunksafter
1857 header.hunks = hunksbefore + newhunks + hunksafter
1858 if self.emptypatch():
1858 if self.emptypatch():
1859 header.hunks = hunksbefore + [item] + hunksafter
1859 header.hunks = hunksbefore + [item] + hunksafter
1860 self.currentselecteditem = header
1860 self.currentselecteditem = header
1861 if len(header.hunks) > itemindex:
1861 if len(header.hunks) > itemindex:
1862 self.currentselecteditem = header.hunks[itemindex]
1862 self.currentselecteditem = header.hunks[itemindex]
1863
1863
1864 if not test:
1864 if not test:
1865 updateui(self)
1865 updateui(self)
1866
1866
1867 def emptypatch(self):
1867 def emptypatch(self):
1868 item = self.headerlist
1868 item = self.headerlist
1869 if not item:
1869 if not item:
1870 return True
1870 return True
1871 for header in item:
1871 for header in item:
1872 if header.hunks:
1872 if header.hunks:
1873 return False
1873 return False
1874 return True
1874 return True
1875
1875
1876 def handlekeypressed(self, keypressed, test=False):
1876 def handlekeypressed(self, keypressed, test=False):
1877 """
1877 """
1878 Perform actions based on pressed keys.
1878 Perform actions based on pressed keys.
1879
1879
1880 Return true to exit the main loop.
1880 Return true to exit the main loop.
1881 """
1881 """
1882 if keypressed in ["k", "KEY_UP"]:
1882 if keypressed in ["k", "KEY_UP"]:
1883 self.uparrowevent()
1883 self.uparrowevent()
1884 elif keypressed in ["K", "KEY_PPAGE"]:
1884 elif keypressed in ["K", "KEY_PPAGE"]:
1885 self.uparrowshiftevent()
1885 self.uparrowshiftevent()
1886 elif keypressed in ["j", "KEY_DOWN"]:
1886 elif keypressed in ["j", "KEY_DOWN"]:
1887 self.downarrowevent()
1887 self.downarrowevent()
1888 elif keypressed in ["J", "KEY_NPAGE"]:
1888 elif keypressed in ["J", "KEY_NPAGE"]:
1889 self.downarrowshiftevent()
1889 self.downarrowshiftevent()
1890 elif keypressed in ["l", "KEY_RIGHT"]:
1890 elif keypressed in ["l", "KEY_RIGHT"]:
1891 self.rightarrowevent()
1891 self.rightarrowevent()
1892 elif keypressed in ["h", "KEY_LEFT"]:
1892 elif keypressed in ["h", "KEY_LEFT"]:
1893 self.leftarrowevent()
1893 self.leftarrowevent()
1894 elif keypressed in ["H", "KEY_SLEFT"]:
1894 elif keypressed in ["H", "KEY_SLEFT"]:
1895 self.leftarrowshiftevent()
1895 self.leftarrowshiftevent()
1896 elif keypressed in ["q"]:
1896 elif keypressed in ["q"]:
1897 raise error.Abort(_(b'user quit'))
1897 raise error.Abort(_(b'user quit'))
1898 elif keypressed in ['a']:
1898 elif keypressed in ['a']:
1899 self.flipselections()
1899 self.flipselections()
1900 elif keypressed in ["c"]:
1900 elif keypressed in ["c"]:
1901 return True
1901 return True
1902 elif keypressed in ["r"]:
1902 elif keypressed in ["r"]:
1903 if self.reviewcommit():
1903 if self.reviewcommit():
1904 self.opts[b'review'] = True
1904 self.opts[b'review'] = True
1905 return True
1905 return True
1906 elif test and keypressed in ["R"]:
1906 elif test and keypressed in ["R"]:
1907 self.opts[b'review'] = True
1907 self.opts[b'review'] = True
1908 return True
1908 return True
1909 elif keypressed in [" ", "x"]:
1909 elif keypressed in [" ", "x"]:
1910 self.toggleapply()
1910 self.toggleapply()
1911 elif keypressed in ["\n", "KEY_ENTER"]:
1911 elif keypressed in ["\n", "KEY_ENTER"]:
1912 self.toggleapply()
1912 self.toggleapply()
1913 self.nextsametype(test=test)
1913 self.nextsametype(test=test)
1914 elif keypressed in ["X"]:
1914 elif keypressed in ["X"]:
1915 self.toggleallbetween()
1915 self.toggleallbetween()
1916 elif keypressed in ["A"]:
1916 elif keypressed in ["A"]:
1917 self.toggleall()
1917 self.toggleall()
1918 elif keypressed in ["e"]:
1918 elif keypressed in ["e"]:
1919 self.toggleedit(test=test)
1919 self.toggleedit(test=test)
1920 elif keypressed in ["f"]:
1920 elif keypressed in ["f"]:
1921 self.togglefolded()
1921 self.togglefolded()
1922 elif keypressed in ["F"]:
1922 elif keypressed in ["F"]:
1923 self.togglefolded(foldparent=True)
1923 self.togglefolded(foldparent=True)
1924 elif keypressed in ["m"]:
1924 elif keypressed in ["m"]:
1925 self.commitMessageWindow()
1925 self.commitMessageWindow()
1926 elif keypressed in ["g", "KEY_HOME"]:
1926 elif keypressed in ["g", "KEY_HOME"]:
1927 self.handlefirstlineevent()
1927 self.handlefirstlineevent()
1928 elif keypressed in ["G", "KEY_END"]:
1928 elif keypressed in ["G", "KEY_END"]:
1929 self.handlelastlineevent()
1929 self.handlelastlineevent()
1930 elif keypressed in ["?"]:
1930 elif keypressed in ["?"]:
1931 self.helpwindow()
1931 self.helpwindow()
1932 self.stdscr.clear()
1932 self.stdscr.clear()
1933 self.stdscr.refresh()
1933 self.stdscr.refresh()
1934 elif keypressed in [curses.ascii.ctrl("L")]:
1934 elif keypressed in [curses.ascii.ctrl("L")]:
1935 # scroll the current line to the top of the screen, and redraw
1935 # scroll the current line to the top of the screen, and redraw
1936 # everything
1936 # everything
1937 self.scrolllines(self.selecteditemstartline)
1937 self.scrolllines(self.selecteditemstartline)
1938 self.stdscr.clear()
1938 self.stdscr.clear()
1939 self.stdscr.refresh()
1939 self.stdscr.refresh()
1940
1940
1941 def main(self, stdscr):
1941 def main(self, stdscr):
1942 """
1942 """
1943 method to be wrapped by curses.wrapper() for selecting chunks.
1943 method to be wrapped by curses.wrapper() for selecting chunks.
1944 """
1944 """
1945
1945
1946 origsigwinch = sentinel = object()
1946 origsigwinch = sentinel = object()
1947 if util.safehasattr(signal, b'SIGWINCH'):
1947 if util.safehasattr(signal, b'SIGWINCH'):
1948 origsigwinch = signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1948 origsigwinch = signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1949 try:
1949 try:
1950 return self._main(stdscr)
1950 return self._main(stdscr)
1951 finally:
1951 finally:
1952 if origsigwinch is not sentinel:
1952 if origsigwinch is not sentinel:
1953 signal.signal(signal.SIGWINCH, origsigwinch)
1953 signal.signal(signal.SIGWINCH, origsigwinch)
1954
1954
1955 def _main(self, stdscr):
1955 def _main(self, stdscr):
1956 self.stdscr = stdscr
1956 self.stdscr = stdscr
1957 # error during initialization, cannot be printed in the curses
1957 # error during initialization, cannot be printed in the curses
1958 # interface, it should be printed by the calling code
1958 # interface, it should be printed by the calling code
1959 self.initexc = None
1959 self.initexc = None
1960 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1960 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1961
1961
1962 curses.start_color()
1962 curses.start_color()
1963 try:
1963 try:
1964 curses.use_default_colors()
1964 curses.use_default_colors()
1965 except curses.error:
1965 except curses.error:
1966 self.usecolor = False
1966 self.usecolor = False
1967
1967
1968 # In some situations we may have some cruft left on the "alternate
1968 # In some situations we may have some cruft left on the "alternate
1969 # screen" from another program (or previous iterations of ourself), and
1969 # screen" from another program (or previous iterations of ourself), and
1970 # we won't clear it if the scroll region is small enough to comfortably
1970 # we won't clear it if the scroll region is small enough to comfortably
1971 # fit on the terminal.
1971 # fit on the terminal.
1972 self.stdscr.clear()
1972 self.stdscr.clear()
1973
1973
1974 # don't display the cursor
1974 # don't display the cursor
1975 try:
1975 try:
1976 curses.curs_set(0)
1976 curses.curs_set(0)
1977 except curses.error:
1977 except curses.error:
1978 pass
1978 pass
1979
1979
1980 # available colors: black, blue, cyan, green, magenta, white, yellow
1980 # available colors: black, blue, cyan, green, magenta, white, yellow
1981 # init_pair(color_id, foreground_color, background_color)
1981 # init_pair(color_id, foreground_color, background_color)
1982 self.initcolorpair(None, None, name=b"normal")
1982 self.initcolorpair(None, None, name=b"normal")
1983 self.initcolorpair(
1983 self.initcolorpair(
1984 curses.COLOR_WHITE, curses.COLOR_MAGENTA, name=b"selected"
1984 curses.COLOR_WHITE, curses.COLOR_MAGENTA, name=b"selected"
1985 )
1985 )
1986 self.initcolorpair(curses.COLOR_RED, None, name=b"deletion")
1986 self.initcolorpair(curses.COLOR_RED, None, name=b"deletion")
1987 self.initcolorpair(curses.COLOR_GREEN, None, name=b"addition")
1987 self.initcolorpair(curses.COLOR_GREEN, None, name=b"addition")
1988 self.initcolorpair(
1988 self.initcolorpair(
1989 curses.COLOR_WHITE, curses.COLOR_BLUE, name=b"legend"
1989 curses.COLOR_WHITE, curses.COLOR_BLUE, name=b"legend"
1990 )
1990 )
1991 # newwin([height, width,] begin_y, begin_x)
1991 # newwin([height, width,] begin_y, begin_x)
1992 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1992 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1993 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1993 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1994
1994
1995 # figure out how much space to allocate for the chunk-pad which is
1995 # figure out how much space to allocate for the chunk-pad which is
1996 # used for displaying the patch
1996 # used for displaying the patch
1997
1997
1998 # stupid hack to prevent getnumlinesdisplayed from failing
1998 # stupid hack to prevent getnumlinesdisplayed from failing
1999 self.chunkpad = curses.newpad(1, self.xscreensize)
1999 self.chunkpad = curses.newpad(1, self.xscreensize)
2000
2000
2001 # add 1 so to account for last line text reaching end of line
2001 # add 1 so to account for last line text reaching end of line
2002 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
2002 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
2003
2003
2004 try:
2004 try:
2005 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
2005 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
2006 except curses.error:
2006 except curses.error:
2007 self.initexc = fallbackerror(
2007 self.initexc = fallbackerror(
2008 _(b'this diff is too large to be displayed')
2008 _(b'this diff is too large to be displayed')
2009 )
2009 )
2010 return
2010 return
2011 # initialize selecteditemendline (initial start-line is 0)
2011 # initialize selecteditemendline (initial start-line is 0)
2012 self.selecteditemendline = self.getnumlinesdisplayed(
2012 self.selecteditemendline = self.getnumlinesdisplayed(
2013 self.currentselecteditem, recursechildren=False
2013 self.currentselecteditem, recursechildren=False
2014 )
2014 )
2015
2015
2016 while True:
2016 while True:
2017 self.updatescreen()
2017 self.updatescreen()
2018 try:
2018 try:
2019 with self.ui.timeblockedsection(b'crecord'):
2019 with self.ui.timeblockedsection(b'crecord'):
2020 keypressed = self.statuswin.getkey()
2020 keypressed = self.statuswin.getkey()
2021 if self.errorstr is not None:
2021 if self.errorstr is not None:
2022 self.errorstr = None
2022 self.errorstr = None
2023 continue
2023 continue
2024 except curses.error:
2024 except curses.error:
2025 keypressed = b"foobar"
2025 keypressed = b"foobar"
2026 if self.handlekeypressed(keypressed):
2026 if self.handlekeypressed(keypressed):
2027 break
2027 break
2028
2028
2029 if self.commenttext != b"":
2029 if self.commenttext != b"":
2030 whitespaceremoved = re.sub(
2030 whitespaceremoved = re.sub(
2031 br"(?m)^\s.*(\n|$)", b"", self.commenttext
2031 br"(?m)^\s.*(\n|$)", b"", self.commenttext
2032 )
2032 )
2033 if whitespaceremoved != b"":
2033 if whitespaceremoved != b"":
2034 self.opts[b'message'] = self.commenttext
2034 self.opts[b'message'] = self.commenttext
@@ -1,4578 +1,4578
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import glob
14 import glob
15 import operator
15 import operator
16 import os
16 import os
17 import platform
17 import platform
18 import random
18 import random
19 import re
19 import re
20 import socket
20 import socket
21 import ssl
21 import ssl
22 import stat
22 import stat
23 import string
23 import string
24 import subprocess
24 import subprocess
25 import sys
25 import sys
26 import time
26 import time
27
27
28 from .i18n import _
28 from .i18n import _
29 from .node import (
29 from .node import (
30 bin,
30 bin,
31 hex,
31 hex,
32 nullid,
32 nullid,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 encoding,
49 encoding,
50 error,
50 error,
51 exchange,
51 exchange,
52 extensions,
52 extensions,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 mergestate as mergestatemod,
61 mergestate as mergestatemod,
62 metadata,
62 metadata,
63 obsolete,
63 obsolete,
64 obsutil,
64 obsutil,
65 pathutil,
65 pathutil,
66 phases,
66 phases,
67 policy,
67 policy,
68 pvec,
68 pvec,
69 pycompat,
69 pycompat,
70 registrar,
70 registrar,
71 repair,
71 repair,
72 revlog,
72 revlog,
73 revset,
73 revset,
74 revsetlang,
74 revsetlang,
75 scmutil,
75 scmutil,
76 setdiscovery,
76 setdiscovery,
77 simplemerge,
77 simplemerge,
78 sshpeer,
78 sshpeer,
79 sslutil,
79 sslutil,
80 streamclone,
80 streamclone,
81 tags as tagsmod,
81 tags as tagsmod,
82 templater,
82 templater,
83 treediscovery,
83 treediscovery,
84 upgrade,
84 upgrade,
85 url as urlmod,
85 url as urlmod,
86 util,
86 util,
87 vfs as vfsmod,
87 vfs as vfsmod,
88 wireprotoframing,
88 wireprotoframing,
89 wireprotoserver,
89 wireprotoserver,
90 wireprotov2peer,
90 wireprotov2peer,
91 )
91 )
92 from .utils import (
92 from .utils import (
93 cborutil,
93 cborutil,
94 compression,
94 compression,
95 dateutil,
95 dateutil,
96 procutil,
96 procutil,
97 stringutil,
97 stringutil,
98 )
98 )
99
99
100 from .revlogutils import (
100 from .revlogutils import (
101 deltas as deltautil,
101 deltas as deltautil,
102 nodemap,
102 nodemap,
103 sidedata,
103 sidedata,
104 )
104 )
105
105
106 release = lockmod.release
106 release = lockmod.release
107
107
108 command = registrar.command()
108 command = registrar.command()
109
109
110
110
111 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
111 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
112 def debugancestor(ui, repo, *args):
112 def debugancestor(ui, repo, *args):
113 """find the ancestor revision of two revisions in a given index"""
113 """find the ancestor revision of two revisions in a given index"""
114 if len(args) == 3:
114 if len(args) == 3:
115 index, rev1, rev2 = args
115 index, rev1, rev2 = args
116 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
116 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
117 lookup = r.lookup
117 lookup = r.lookup
118 elif len(args) == 2:
118 elif len(args) == 2:
119 if not repo:
119 if not repo:
120 raise error.Abort(
120 raise error.Abort(
121 _(b'there is no Mercurial repository here (.hg not found)')
121 _(b'there is no Mercurial repository here (.hg not found)')
122 )
122 )
123 rev1, rev2 = args
123 rev1, rev2 = args
124 r = repo.changelog
124 r = repo.changelog
125 lookup = repo.lookup
125 lookup = repo.lookup
126 else:
126 else:
127 raise error.Abort(_(b'either two or three arguments required'))
127 raise error.Abort(_(b'either two or three arguments required'))
128 a = r.ancestor(lookup(rev1), lookup(rev2))
128 a = r.ancestor(lookup(rev1), lookup(rev2))
129 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
129 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
130
130
131
131
132 @command(b'debugantivirusrunning', [])
132 @command(b'debugantivirusrunning', [])
133 def debugantivirusrunning(ui, repo):
133 def debugantivirusrunning(ui, repo):
134 """attempt to trigger an antivirus scanner to see if one is active"""
134 """attempt to trigger an antivirus scanner to see if one is active"""
135 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
135 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
136 f.write(
136 f.write(
137 util.b85decode(
137 util.b85decode(
138 # This is a base85-armored version of the EICAR test file. See
138 # This is a base85-armored version of the EICAR test file. See
139 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
139 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
140 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
140 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
141 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
141 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
142 )
142 )
143 )
143 )
144 # Give an AV engine time to scan the file.
144 # Give an AV engine time to scan the file.
145 time.sleep(2)
145 time.sleep(2)
146 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
146 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
147
147
148
148
149 @command(b'debugapplystreamclonebundle', [], b'FILE')
149 @command(b'debugapplystreamclonebundle', [], b'FILE')
150 def debugapplystreamclonebundle(ui, repo, fname):
150 def debugapplystreamclonebundle(ui, repo, fname):
151 """apply a stream clone bundle file"""
151 """apply a stream clone bundle file"""
152 f = hg.openpath(ui, fname)
152 f = hg.openpath(ui, fname)
153 gen = exchange.readbundle(ui, f, fname)
153 gen = exchange.readbundle(ui, f, fname)
154 gen.apply(repo)
154 gen.apply(repo)
155
155
156
156
157 @command(
157 @command(
158 b'debugbuilddag',
158 b'debugbuilddag',
159 [
159 [
160 (
160 (
161 b'm',
161 b'm',
162 b'mergeable-file',
162 b'mergeable-file',
163 None,
163 None,
164 _(b'add single file mergeable changes'),
164 _(b'add single file mergeable changes'),
165 ),
165 ),
166 (
166 (
167 b'o',
167 b'o',
168 b'overwritten-file',
168 b'overwritten-file',
169 None,
169 None,
170 _(b'add single file all revs overwrite'),
170 _(b'add single file all revs overwrite'),
171 ),
171 ),
172 (b'n', b'new-file', None, _(b'add new file at each rev')),
172 (b'n', b'new-file', None, _(b'add new file at each rev')),
173 ],
173 ],
174 _(b'[OPTION]... [TEXT]'),
174 _(b'[OPTION]... [TEXT]'),
175 )
175 )
176 def debugbuilddag(
176 def debugbuilddag(
177 ui,
177 ui,
178 repo,
178 repo,
179 text=None,
179 text=None,
180 mergeable_file=False,
180 mergeable_file=False,
181 overwritten_file=False,
181 overwritten_file=False,
182 new_file=False,
182 new_file=False,
183 ):
183 ):
184 """builds a repo with a given DAG from scratch in the current empty repo
184 """builds a repo with a given DAG from scratch in the current empty repo
185
185
186 The description of the DAG is read from stdin if not given on the
186 The description of the DAG is read from stdin if not given on the
187 command line.
187 command line.
188
188
189 Elements:
189 Elements:
190
190
191 - "+n" is a linear run of n nodes based on the current default parent
191 - "+n" is a linear run of n nodes based on the current default parent
192 - "." is a single node based on the current default parent
192 - "." is a single node based on the current default parent
193 - "$" resets the default parent to null (implied at the start);
193 - "$" resets the default parent to null (implied at the start);
194 otherwise the default parent is always the last node created
194 otherwise the default parent is always the last node created
195 - "<p" sets the default parent to the backref p
195 - "<p" sets the default parent to the backref p
196 - "*p" is a fork at parent p, which is a backref
196 - "*p" is a fork at parent p, which is a backref
197 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
197 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
198 - "/p2" is a merge of the preceding node and p2
198 - "/p2" is a merge of the preceding node and p2
199 - ":tag" defines a local tag for the preceding node
199 - ":tag" defines a local tag for the preceding node
200 - "@branch" sets the named branch for subsequent nodes
200 - "@branch" sets the named branch for subsequent nodes
201 - "#...\\n" is a comment up to the end of the line
201 - "#...\\n" is a comment up to the end of the line
202
202
203 Whitespace between the above elements is ignored.
203 Whitespace between the above elements is ignored.
204
204
205 A backref is either
205 A backref is either
206
206
207 - a number n, which references the node curr-n, where curr is the current
207 - a number n, which references the node curr-n, where curr is the current
208 node, or
208 node, or
209 - the name of a local tag you placed earlier using ":tag", or
209 - the name of a local tag you placed earlier using ":tag", or
210 - empty to denote the default parent.
210 - empty to denote the default parent.
211
211
212 All string valued-elements are either strictly alphanumeric, or must
212 All string valued-elements are either strictly alphanumeric, or must
213 be enclosed in double quotes ("..."), with "\\" as escape character.
213 be enclosed in double quotes ("..."), with "\\" as escape character.
214 """
214 """
215
215
216 if text is None:
216 if text is None:
217 ui.status(_(b"reading DAG from stdin\n"))
217 ui.status(_(b"reading DAG from stdin\n"))
218 text = ui.fin.read()
218 text = ui.fin.read()
219
219
220 cl = repo.changelog
220 cl = repo.changelog
221 if len(cl) > 0:
221 if len(cl) > 0:
222 raise error.Abort(_(b'repository is not empty'))
222 raise error.Abort(_(b'repository is not empty'))
223
223
224 # determine number of revs in DAG
224 # determine number of revs in DAG
225 total = 0
225 total = 0
226 for type, data in dagparser.parsedag(text):
226 for type, data in dagparser.parsedag(text):
227 if type == b'n':
227 if type == b'n':
228 total += 1
228 total += 1
229
229
230 if mergeable_file:
230 if mergeable_file:
231 linesperrev = 2
231 linesperrev = 2
232 # make a file with k lines per rev
232 # make a file with k lines per rev
233 initialmergedlines = [
233 initialmergedlines = [
234 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
234 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
235 ]
235 ]
236 initialmergedlines.append(b"")
236 initialmergedlines.append(b"")
237
237
238 tags = []
238 tags = []
239 progress = ui.makeprogress(
239 progress = ui.makeprogress(
240 _(b'building'), unit=_(b'revisions'), total=total
240 _(b'building'), unit=_(b'revisions'), total=total
241 )
241 )
242 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
242 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
243 at = -1
243 at = -1
244 atbranch = b'default'
244 atbranch = b'default'
245 nodeids = []
245 nodeids = []
246 id = 0
246 id = 0
247 progress.update(id)
247 progress.update(id)
248 for type, data in dagparser.parsedag(text):
248 for type, data in dagparser.parsedag(text):
249 if type == b'n':
249 if type == b'n':
250 ui.note((b'node %s\n' % pycompat.bytestr(data)))
250 ui.note((b'node %s\n' % pycompat.bytestr(data)))
251 id, ps = data
251 id, ps = data
252
252
253 files = []
253 files = []
254 filecontent = {}
254 filecontent = {}
255
255
256 p2 = None
256 p2 = None
257 if mergeable_file:
257 if mergeable_file:
258 fn = b"mf"
258 fn = b"mf"
259 p1 = repo[ps[0]]
259 p1 = repo[ps[0]]
260 if len(ps) > 1:
260 if len(ps) > 1:
261 p2 = repo[ps[1]]
261 p2 = repo[ps[1]]
262 pa = p1.ancestor(p2)
262 pa = p1.ancestor(p2)
263 base, local, other = [
263 base, local, other = [
264 x[fn].data() for x in (pa, p1, p2)
264 x[fn].data() for x in (pa, p1, p2)
265 ]
265 ]
266 m3 = simplemerge.Merge3Text(base, local, other)
266 m3 = simplemerge.Merge3Text(base, local, other)
267 ml = [l.strip() for l in m3.merge_lines()]
267 ml = [l.strip() for l in m3.merge_lines()]
268 ml.append(b"")
268 ml.append(b"")
269 elif at > 0:
269 elif at > 0:
270 ml = p1[fn].data().split(b"\n")
270 ml = p1[fn].data().split(b"\n")
271 else:
271 else:
272 ml = initialmergedlines
272 ml = initialmergedlines
273 ml[id * linesperrev] += b" r%i" % id
273 ml[id * linesperrev] += b" r%i" % id
274 mergedtext = b"\n".join(ml)
274 mergedtext = b"\n".join(ml)
275 files.append(fn)
275 files.append(fn)
276 filecontent[fn] = mergedtext
276 filecontent[fn] = mergedtext
277
277
278 if overwritten_file:
278 if overwritten_file:
279 fn = b"of"
279 fn = b"of"
280 files.append(fn)
280 files.append(fn)
281 filecontent[fn] = b"r%i\n" % id
281 filecontent[fn] = b"r%i\n" % id
282
282
283 if new_file:
283 if new_file:
284 fn = b"nf%i" % id
284 fn = b"nf%i" % id
285 files.append(fn)
285 files.append(fn)
286 filecontent[fn] = b"r%i\n" % id
286 filecontent[fn] = b"r%i\n" % id
287 if len(ps) > 1:
287 if len(ps) > 1:
288 if not p2:
288 if not p2:
289 p2 = repo[ps[1]]
289 p2 = repo[ps[1]]
290 for fn in p2:
290 for fn in p2:
291 if fn.startswith(b"nf"):
291 if fn.startswith(b"nf"):
292 files.append(fn)
292 files.append(fn)
293 filecontent[fn] = p2[fn].data()
293 filecontent[fn] = p2[fn].data()
294
294
295 def fctxfn(repo, cx, path):
295 def fctxfn(repo, cx, path):
296 if path in filecontent:
296 if path in filecontent:
297 return context.memfilectx(
297 return context.memfilectx(
298 repo, cx, path, filecontent[path]
298 repo, cx, path, filecontent[path]
299 )
299 )
300 return None
300 return None
301
301
302 if len(ps) == 0 or ps[0] < 0:
302 if len(ps) == 0 or ps[0] < 0:
303 pars = [None, None]
303 pars = [None, None]
304 elif len(ps) == 1:
304 elif len(ps) == 1:
305 pars = [nodeids[ps[0]], None]
305 pars = [nodeids[ps[0]], None]
306 else:
306 else:
307 pars = [nodeids[p] for p in ps]
307 pars = [nodeids[p] for p in ps]
308 cx = context.memctx(
308 cx = context.memctx(
309 repo,
309 repo,
310 pars,
310 pars,
311 b"r%i" % id,
311 b"r%i" % id,
312 files,
312 files,
313 fctxfn,
313 fctxfn,
314 date=(id, 0),
314 date=(id, 0),
315 user=b"debugbuilddag",
315 user=b"debugbuilddag",
316 extra={b'branch': atbranch},
316 extra={b'branch': atbranch},
317 )
317 )
318 nodeid = repo.commitctx(cx)
318 nodeid = repo.commitctx(cx)
319 nodeids.append(nodeid)
319 nodeids.append(nodeid)
320 at = id
320 at = id
321 elif type == b'l':
321 elif type == b'l':
322 id, name = data
322 id, name = data
323 ui.note((b'tag %s\n' % name))
323 ui.note((b'tag %s\n' % name))
324 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
324 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
325 elif type == b'a':
325 elif type == b'a':
326 ui.note((b'branch %s\n' % data))
326 ui.note((b'branch %s\n' % data))
327 atbranch = data
327 atbranch = data
328 progress.update(id)
328 progress.update(id)
329
329
330 if tags:
330 if tags:
331 repo.vfs.write(b"localtags", b"".join(tags))
331 repo.vfs.write(b"localtags", b"".join(tags))
332
332
333
333
334 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
334 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
335 indent_string = b' ' * indent
335 indent_string = b' ' * indent
336 if all:
336 if all:
337 ui.writenoi18n(
337 ui.writenoi18n(
338 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
338 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
339 % indent_string
339 % indent_string
340 )
340 )
341
341
342 def showchunks(named):
342 def showchunks(named):
343 ui.write(b"\n%s%s\n" % (indent_string, named))
343 ui.write(b"\n%s%s\n" % (indent_string, named))
344 for deltadata in gen.deltaiter():
344 for deltadata in gen.deltaiter():
345 node, p1, p2, cs, deltabase, delta, flags = deltadata
345 node, p1, p2, cs, deltabase, delta, flags = deltadata
346 ui.write(
346 ui.write(
347 b"%s%s %s %s %s %s %d\n"
347 b"%s%s %s %s %s %s %d\n"
348 % (
348 % (
349 indent_string,
349 indent_string,
350 hex(node),
350 hex(node),
351 hex(p1),
351 hex(p1),
352 hex(p2),
352 hex(p2),
353 hex(cs),
353 hex(cs),
354 hex(deltabase),
354 hex(deltabase),
355 len(delta),
355 len(delta),
356 )
356 )
357 )
357 )
358
358
359 gen.changelogheader()
359 gen.changelogheader()
360 showchunks(b"changelog")
360 showchunks(b"changelog")
361 gen.manifestheader()
361 gen.manifestheader()
362 showchunks(b"manifest")
362 showchunks(b"manifest")
363 for chunkdata in iter(gen.filelogheader, {}):
363 for chunkdata in iter(gen.filelogheader, {}):
364 fname = chunkdata[b'filename']
364 fname = chunkdata[b'filename']
365 showchunks(fname)
365 showchunks(fname)
366 else:
366 else:
367 if isinstance(gen, bundle2.unbundle20):
367 if isinstance(gen, bundle2.unbundle20):
368 raise error.Abort(_(b'use debugbundle2 for this file'))
368 raise error.Abort(_(b'use debugbundle2 for this file'))
369 gen.changelogheader()
369 gen.changelogheader()
370 for deltadata in gen.deltaiter():
370 for deltadata in gen.deltaiter():
371 node, p1, p2, cs, deltabase, delta, flags = deltadata
371 node, p1, p2, cs, deltabase, delta, flags = deltadata
372 ui.write(b"%s%s\n" % (indent_string, hex(node)))
372 ui.write(b"%s%s\n" % (indent_string, hex(node)))
373
373
374
374
375 def _debugobsmarkers(ui, part, indent=0, **opts):
375 def _debugobsmarkers(ui, part, indent=0, **opts):
376 """display version and markers contained in 'data'"""
376 """display version and markers contained in 'data'"""
377 opts = pycompat.byteskwargs(opts)
377 opts = pycompat.byteskwargs(opts)
378 data = part.read()
378 data = part.read()
379 indent_string = b' ' * indent
379 indent_string = b' ' * indent
380 try:
380 try:
381 version, markers = obsolete._readmarkers(data)
381 version, markers = obsolete._readmarkers(data)
382 except error.UnknownVersion as exc:
382 except error.UnknownVersion as exc:
383 msg = b"%sunsupported version: %s (%d bytes)\n"
383 msg = b"%sunsupported version: %s (%d bytes)\n"
384 msg %= indent_string, exc.version, len(data)
384 msg %= indent_string, exc.version, len(data)
385 ui.write(msg)
385 ui.write(msg)
386 else:
386 else:
387 msg = b"%sversion: %d (%d bytes)\n"
387 msg = b"%sversion: %d (%d bytes)\n"
388 msg %= indent_string, version, len(data)
388 msg %= indent_string, version, len(data)
389 ui.write(msg)
389 ui.write(msg)
390 fm = ui.formatter(b'debugobsolete', opts)
390 fm = ui.formatter(b'debugobsolete', opts)
391 for rawmarker in sorted(markers):
391 for rawmarker in sorted(markers):
392 m = obsutil.marker(None, rawmarker)
392 m = obsutil.marker(None, rawmarker)
393 fm.startitem()
393 fm.startitem()
394 fm.plain(indent_string)
394 fm.plain(indent_string)
395 cmdutil.showmarker(fm, m)
395 cmdutil.showmarker(fm, m)
396 fm.end()
396 fm.end()
397
397
398
398
399 def _debugphaseheads(ui, data, indent=0):
399 def _debugphaseheads(ui, data, indent=0):
400 """display version and markers contained in 'data'"""
400 """display version and markers contained in 'data'"""
401 indent_string = b' ' * indent
401 indent_string = b' ' * indent
402 headsbyphase = phases.binarydecode(data)
402 headsbyphase = phases.binarydecode(data)
403 for phase in phases.allphases:
403 for phase in phases.allphases:
404 for head in headsbyphase[phase]:
404 for head in headsbyphase[phase]:
405 ui.write(indent_string)
405 ui.write(indent_string)
406 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
406 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
407
407
408
408
409 def _quasirepr(thing):
409 def _quasirepr(thing):
410 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
410 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
411 return b'{%s}' % (
411 return b'{%s}' % (
412 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
412 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
413 )
413 )
414 return pycompat.bytestr(repr(thing))
414 return pycompat.bytestr(repr(thing))
415
415
416
416
417 def _debugbundle2(ui, gen, all=None, **opts):
417 def _debugbundle2(ui, gen, all=None, **opts):
418 """lists the contents of a bundle2"""
418 """lists the contents of a bundle2"""
419 if not isinstance(gen, bundle2.unbundle20):
419 if not isinstance(gen, bundle2.unbundle20):
420 raise error.Abort(_(b'not a bundle2 file'))
420 raise error.Abort(_(b'not a bundle2 file'))
421 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
421 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
422 parttypes = opts.get('part_type', [])
422 parttypes = opts.get('part_type', [])
423 for part in gen.iterparts():
423 for part in gen.iterparts():
424 if parttypes and part.type not in parttypes:
424 if parttypes and part.type not in parttypes:
425 continue
425 continue
426 msg = b'%s -- %s (mandatory: %r)\n'
426 msg = b'%s -- %s (mandatory: %r)\n'
427 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
427 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
428 if part.type == b'changegroup':
428 if part.type == b'changegroup':
429 version = part.params.get(b'version', b'01')
429 version = part.params.get(b'version', b'01')
430 cg = changegroup.getunbundler(version, part, b'UN')
430 cg = changegroup.getunbundler(version, part, b'UN')
431 if not ui.quiet:
431 if not ui.quiet:
432 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
432 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
433 if part.type == b'obsmarkers':
433 if part.type == b'obsmarkers':
434 if not ui.quiet:
434 if not ui.quiet:
435 _debugobsmarkers(ui, part, indent=4, **opts)
435 _debugobsmarkers(ui, part, indent=4, **opts)
436 if part.type == b'phase-heads':
436 if part.type == b'phase-heads':
437 if not ui.quiet:
437 if not ui.quiet:
438 _debugphaseheads(ui, part, indent=4)
438 _debugphaseheads(ui, part, indent=4)
439
439
440
440
441 @command(
441 @command(
442 b'debugbundle',
442 b'debugbundle',
443 [
443 [
444 (b'a', b'all', None, _(b'show all details')),
444 (b'a', b'all', None, _(b'show all details')),
445 (b'', b'part-type', [], _(b'show only the named part type')),
445 (b'', b'part-type', [], _(b'show only the named part type')),
446 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
446 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
447 ],
447 ],
448 _(b'FILE'),
448 _(b'FILE'),
449 norepo=True,
449 norepo=True,
450 )
450 )
451 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
451 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
452 """lists the contents of a bundle"""
452 """lists the contents of a bundle"""
453 with hg.openpath(ui, bundlepath) as f:
453 with hg.openpath(ui, bundlepath) as f:
454 if spec:
454 if spec:
455 spec = exchange.getbundlespec(ui, f)
455 spec = exchange.getbundlespec(ui, f)
456 ui.write(b'%s\n' % spec)
456 ui.write(b'%s\n' % spec)
457 return
457 return
458
458
459 gen = exchange.readbundle(ui, f, bundlepath)
459 gen = exchange.readbundle(ui, f, bundlepath)
460 if isinstance(gen, bundle2.unbundle20):
460 if isinstance(gen, bundle2.unbundle20):
461 return _debugbundle2(ui, gen, all=all, **opts)
461 return _debugbundle2(ui, gen, all=all, **opts)
462 _debugchangegroup(ui, gen, all=all, **opts)
462 _debugchangegroup(ui, gen, all=all, **opts)
463
463
464
464
465 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
465 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
466 def debugcapabilities(ui, path, **opts):
466 def debugcapabilities(ui, path, **opts):
467 """lists the capabilities of a remote peer"""
467 """lists the capabilities of a remote peer"""
468 opts = pycompat.byteskwargs(opts)
468 opts = pycompat.byteskwargs(opts)
469 peer = hg.peer(ui, opts, path)
469 peer = hg.peer(ui, opts, path)
470 caps = peer.capabilities()
470 caps = peer.capabilities()
471 ui.writenoi18n(b'Main capabilities:\n')
471 ui.writenoi18n(b'Main capabilities:\n')
472 for c in sorted(caps):
472 for c in sorted(caps):
473 ui.write(b' %s\n' % c)
473 ui.write(b' %s\n' % c)
474 b2caps = bundle2.bundle2caps(peer)
474 b2caps = bundle2.bundle2caps(peer)
475 if b2caps:
475 if b2caps:
476 ui.writenoi18n(b'Bundle2 capabilities:\n')
476 ui.writenoi18n(b'Bundle2 capabilities:\n')
477 for key, values in sorted(pycompat.iteritems(b2caps)):
477 for key, values in sorted(pycompat.iteritems(b2caps)):
478 ui.write(b' %s\n' % key)
478 ui.write(b' %s\n' % key)
479 for v in values:
479 for v in values:
480 ui.write(b' %s\n' % v)
480 ui.write(b' %s\n' % v)
481
481
482
482
483 @command(b'debugchangedfiles', [], b'REV')
483 @command(b'debugchangedfiles', [], b'REV')
484 def debugchangedfiles(ui, repo, rev):
484 def debugchangedfiles(ui, repo, rev):
485 """list the stored files changes for a revision"""
485 """list the stored files changes for a revision"""
486 ctx = scmutil.revsingle(repo, rev, None)
486 ctx = scmutil.revsingle(repo, rev, None)
487 sd = repo.changelog.sidedata(ctx.rev())
487 sd = repo.changelog.sidedata(ctx.rev())
488 files_block = sd.get(sidedata.SD_FILES)
488 files_block = sd.get(sidedata.SD_FILES)
489 if files_block is not None:
489 if files_block is not None:
490 files = metadata.decode_files_sidedata(sd)
490 files = metadata.decode_files_sidedata(sd)
491 for f in sorted(files.touched):
491 for f in sorted(files.touched):
492 if f in files.added:
492 if f in files.added:
493 action = b"added"
493 action = b"added"
494 elif f in files.removed:
494 elif f in files.removed:
495 action = b"removed"
495 action = b"removed"
496 elif f in files.merged:
496 elif f in files.merged:
497 action = b"merged"
497 action = b"merged"
498 elif f in files.salvaged:
498 elif f in files.salvaged:
499 action = b"salvaged"
499 action = b"salvaged"
500 else:
500 else:
501 action = b"touched"
501 action = b"touched"
502
502
503 copy_parent = b""
503 copy_parent = b""
504 copy_source = b""
504 copy_source = b""
505 if f in files.copied_from_p1:
505 if f in files.copied_from_p1:
506 copy_parent = b"p1"
506 copy_parent = b"p1"
507 copy_source = files.copied_from_p1[f]
507 copy_source = files.copied_from_p1[f]
508 elif f in files.copied_from_p2:
508 elif f in files.copied_from_p2:
509 copy_parent = b"p2"
509 copy_parent = b"p2"
510 copy_source = files.copied_from_p2[f]
510 copy_source = files.copied_from_p2[f]
511
511
512 data = (action, copy_parent, f, copy_source)
512 data = (action, copy_parent, f, copy_source)
513 template = b"%-8s %2s: %s, %s;\n"
513 template = b"%-8s %2s: %s, %s;\n"
514 ui.write(template % data)
514 ui.write(template % data)
515
515
516
516
517 @command(b'debugcheckstate', [], b'')
517 @command(b'debugcheckstate', [], b'')
518 def debugcheckstate(ui, repo):
518 def debugcheckstate(ui, repo):
519 """validate the correctness of the current dirstate"""
519 """validate the correctness of the current dirstate"""
520 parent1, parent2 = repo.dirstate.parents()
520 parent1, parent2 = repo.dirstate.parents()
521 m1 = repo[parent1].manifest()
521 m1 = repo[parent1].manifest()
522 m2 = repo[parent2].manifest()
522 m2 = repo[parent2].manifest()
523 errors = 0
523 errors = 0
524 for f in repo.dirstate:
524 for f in repo.dirstate:
525 state = repo.dirstate[f]
525 state = repo.dirstate[f]
526 if state in b"nr" and f not in m1:
526 if state in b"nr" and f not in m1:
527 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
527 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
528 errors += 1
528 errors += 1
529 if state in b"a" and f in m1:
529 if state in b"a" and f in m1:
530 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
530 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
531 errors += 1
531 errors += 1
532 if state in b"m" and f not in m1 and f not in m2:
532 if state in b"m" and f not in m1 and f not in m2:
533 ui.warn(
533 ui.warn(
534 _(b"%s in state %s, but not in either manifest\n") % (f, state)
534 _(b"%s in state %s, but not in either manifest\n") % (f, state)
535 )
535 )
536 errors += 1
536 errors += 1
537 for f in m1:
537 for f in m1:
538 state = repo.dirstate[f]
538 state = repo.dirstate[f]
539 if state not in b"nrm":
539 if state not in b"nrm":
540 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
540 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
541 errors += 1
541 errors += 1
542 if errors:
542 if errors:
543 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
543 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
544 raise error.Abort(errstr)
544 raise error.Abort(errstr)
545
545
546
546
547 @command(
547 @command(
548 b'debugcolor',
548 b'debugcolor',
549 [(b'', b'style', None, _(b'show all configured styles'))],
549 [(b'', b'style', None, _(b'show all configured styles'))],
550 b'hg debugcolor',
550 b'hg debugcolor',
551 )
551 )
552 def debugcolor(ui, repo, **opts):
552 def debugcolor(ui, repo, **opts):
553 """show available color, effects or style"""
553 """show available color, effects or style"""
554 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
554 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
555 if opts.get('style'):
555 if opts.get('style'):
556 return _debugdisplaystyle(ui)
556 return _debugdisplaystyle(ui)
557 else:
557 else:
558 return _debugdisplaycolor(ui)
558 return _debugdisplaycolor(ui)
559
559
560
560
561 def _debugdisplaycolor(ui):
561 def _debugdisplaycolor(ui):
562 ui = ui.copy()
562 ui = ui.copy()
563 ui._styles.clear()
563 ui._styles.clear()
564 for effect in color._activeeffects(ui).keys():
564 for effect in color._activeeffects(ui).keys():
565 ui._styles[effect] = effect
565 ui._styles[effect] = effect
566 if ui._terminfoparams:
566 if ui._terminfoparams:
567 for k, v in ui.configitems(b'color'):
567 for k, v in ui.configitems(b'color'):
568 if k.startswith(b'color.'):
568 if k.startswith(b'color.'):
569 ui._styles[k] = k[6:]
569 ui._styles[k] = k[6:]
570 elif k.startswith(b'terminfo.'):
570 elif k.startswith(b'terminfo.'):
571 ui._styles[k] = k[9:]
571 ui._styles[k] = k[9:]
572 ui.write(_(b'available colors:\n'))
572 ui.write(_(b'available colors:\n'))
573 # sort label with a '_' after the other to group '_background' entry.
573 # sort label with a '_' after the other to group '_background' entry.
574 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
574 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
575 for colorname, label in items:
575 for colorname, label in items:
576 ui.write(b'%s\n' % colorname, label=label)
576 ui.write(b'%s\n' % colorname, label=label)
577
577
578
578
579 def _debugdisplaystyle(ui):
579 def _debugdisplaystyle(ui):
580 ui.write(_(b'available style:\n'))
580 ui.write(_(b'available style:\n'))
581 if not ui._styles:
581 if not ui._styles:
582 return
582 return
583 width = max(len(s) for s in ui._styles)
583 width = max(len(s) for s in ui._styles)
584 for label, effects in sorted(ui._styles.items()):
584 for label, effects in sorted(ui._styles.items()):
585 ui.write(b'%s' % label, label=label)
585 ui.write(b'%s' % label, label=label)
586 if effects:
586 if effects:
587 # 50
587 # 50
588 ui.write(b': ')
588 ui.write(b': ')
589 ui.write(b' ' * (max(0, width - len(label))))
589 ui.write(b' ' * (max(0, width - len(label))))
590 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
590 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
591 ui.write(b'\n')
591 ui.write(b'\n')
592
592
593
593
594 @command(b'debugcreatestreamclonebundle', [], b'FILE')
594 @command(b'debugcreatestreamclonebundle', [], b'FILE')
595 def debugcreatestreamclonebundle(ui, repo, fname):
595 def debugcreatestreamclonebundle(ui, repo, fname):
596 """create a stream clone bundle file
596 """create a stream clone bundle file
597
597
598 Stream bundles are special bundles that are essentially archives of
598 Stream bundles are special bundles that are essentially archives of
599 revlog files. They are commonly used for cloning very quickly.
599 revlog files. They are commonly used for cloning very quickly.
600 """
600 """
601 # TODO we may want to turn this into an abort when this functionality
601 # TODO we may want to turn this into an abort when this functionality
602 # is moved into `hg bundle`.
602 # is moved into `hg bundle`.
603 if phases.hassecret(repo):
603 if phases.hassecret(repo):
604 ui.warn(
604 ui.warn(
605 _(
605 _(
606 b'(warning: stream clone bundle will contain secret '
606 b'(warning: stream clone bundle will contain secret '
607 b'revisions)\n'
607 b'revisions)\n'
608 )
608 )
609 )
609 )
610
610
611 requirements, gen = streamclone.generatebundlev1(repo)
611 requirements, gen = streamclone.generatebundlev1(repo)
612 changegroup.writechunks(ui, gen, fname)
612 changegroup.writechunks(ui, gen, fname)
613
613
614 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
614 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
615
615
616
616
617 @command(
617 @command(
618 b'debugdag',
618 b'debugdag',
619 [
619 [
620 (b't', b'tags', None, _(b'use tags as labels')),
620 (b't', b'tags', None, _(b'use tags as labels')),
621 (b'b', b'branches', None, _(b'annotate with branch names')),
621 (b'b', b'branches', None, _(b'annotate with branch names')),
622 (b'', b'dots', None, _(b'use dots for runs')),
622 (b'', b'dots', None, _(b'use dots for runs')),
623 (b's', b'spaces', None, _(b'separate elements by spaces')),
623 (b's', b'spaces', None, _(b'separate elements by spaces')),
624 ],
624 ],
625 _(b'[OPTION]... [FILE [REV]...]'),
625 _(b'[OPTION]... [FILE [REV]...]'),
626 optionalrepo=True,
626 optionalrepo=True,
627 )
627 )
628 def debugdag(ui, repo, file_=None, *revs, **opts):
628 def debugdag(ui, repo, file_=None, *revs, **opts):
629 """format the changelog or an index DAG as a concise textual description
629 """format the changelog or an index DAG as a concise textual description
630
630
631 If you pass a revlog index, the revlog's DAG is emitted. If you list
631 If you pass a revlog index, the revlog's DAG is emitted. If you list
632 revision numbers, they get labeled in the output as rN.
632 revision numbers, they get labeled in the output as rN.
633
633
634 Otherwise, the changelog DAG of the current repo is emitted.
634 Otherwise, the changelog DAG of the current repo is emitted.
635 """
635 """
636 spaces = opts.get('spaces')
636 spaces = opts.get('spaces')
637 dots = opts.get('dots')
637 dots = opts.get('dots')
638 if file_:
638 if file_:
639 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
639 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
640 revs = {int(r) for r in revs}
640 revs = {int(r) for r in revs}
641
641
642 def events():
642 def events():
643 for r in rlog:
643 for r in rlog:
644 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
644 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
645 if r in revs:
645 if r in revs:
646 yield b'l', (r, b"r%i" % r)
646 yield b'l', (r, b"r%i" % r)
647
647
648 elif repo:
648 elif repo:
649 cl = repo.changelog
649 cl = repo.changelog
650 tags = opts.get('tags')
650 tags = opts.get('tags')
651 branches = opts.get('branches')
651 branches = opts.get('branches')
652 if tags:
652 if tags:
653 labels = {}
653 labels = {}
654 for l, n in repo.tags().items():
654 for l, n in repo.tags().items():
655 labels.setdefault(cl.rev(n), []).append(l)
655 labels.setdefault(cl.rev(n), []).append(l)
656
656
657 def events():
657 def events():
658 b = b"default"
658 b = b"default"
659 for r in cl:
659 for r in cl:
660 if branches:
660 if branches:
661 newb = cl.read(cl.node(r))[5][b'branch']
661 newb = cl.read(cl.node(r))[5][b'branch']
662 if newb != b:
662 if newb != b:
663 yield b'a', newb
663 yield b'a', newb
664 b = newb
664 b = newb
665 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
665 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
666 if tags:
666 if tags:
667 ls = labels.get(r)
667 ls = labels.get(r)
668 if ls:
668 if ls:
669 for l in ls:
669 for l in ls:
670 yield b'l', (r, l)
670 yield b'l', (r, l)
671
671
672 else:
672 else:
673 raise error.Abort(_(b'need repo for changelog dag'))
673 raise error.Abort(_(b'need repo for changelog dag'))
674
674
675 for line in dagparser.dagtextlines(
675 for line in dagparser.dagtextlines(
676 events(),
676 events(),
677 addspaces=spaces,
677 addspaces=spaces,
678 wraplabels=True,
678 wraplabels=True,
679 wrapannotations=True,
679 wrapannotations=True,
680 wrapnonlinear=dots,
680 wrapnonlinear=dots,
681 usedots=dots,
681 usedots=dots,
682 maxlinewidth=70,
682 maxlinewidth=70,
683 ):
683 ):
684 ui.write(line)
684 ui.write(line)
685 ui.write(b"\n")
685 ui.write(b"\n")
686
686
687
687
688 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
688 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
689 def debugdata(ui, repo, file_, rev=None, **opts):
689 def debugdata(ui, repo, file_, rev=None, **opts):
690 """dump the contents of a data file revision"""
690 """dump the contents of a data file revision"""
691 opts = pycompat.byteskwargs(opts)
691 opts = pycompat.byteskwargs(opts)
692 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
692 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
693 if rev is not None:
693 if rev is not None:
694 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
694 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
695 file_, rev = None, file_
695 file_, rev = None, file_
696 elif rev is None:
696 elif rev is None:
697 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
697 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
698 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
698 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
699 try:
699 try:
700 ui.write(r.rawdata(r.lookup(rev)))
700 ui.write(r.rawdata(r.lookup(rev)))
701 except KeyError:
701 except KeyError:
702 raise error.Abort(_(b'invalid revision identifier %s') % rev)
702 raise error.Abort(_(b'invalid revision identifier %s') % rev)
703
703
704
704
705 @command(
705 @command(
706 b'debugdate',
706 b'debugdate',
707 [(b'e', b'extended', None, _(b'try extended date formats'))],
707 [(b'e', b'extended', None, _(b'try extended date formats'))],
708 _(b'[-e] DATE [RANGE]'),
708 _(b'[-e] DATE [RANGE]'),
709 norepo=True,
709 norepo=True,
710 optionalrepo=True,
710 optionalrepo=True,
711 )
711 )
712 def debugdate(ui, date, range=None, **opts):
712 def debugdate(ui, date, range=None, **opts):
713 """parse and display a date"""
713 """parse and display a date"""
714 if opts["extended"]:
714 if opts["extended"]:
715 d = dateutil.parsedate(date, dateutil.extendeddateformats)
715 d = dateutil.parsedate(date, dateutil.extendeddateformats)
716 else:
716 else:
717 d = dateutil.parsedate(date)
717 d = dateutil.parsedate(date)
718 ui.writenoi18n(b"internal: %d %d\n" % d)
718 ui.writenoi18n(b"internal: %d %d\n" % d)
719 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
719 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
720 if range:
720 if range:
721 m = dateutil.matchdate(range)
721 m = dateutil.matchdate(range)
722 ui.writenoi18n(b"match: %s\n" % m(d[0]))
722 ui.writenoi18n(b"match: %s\n" % m(d[0]))
723
723
724
724
725 @command(
725 @command(
726 b'debugdeltachain',
726 b'debugdeltachain',
727 cmdutil.debugrevlogopts + cmdutil.formatteropts,
727 cmdutil.debugrevlogopts + cmdutil.formatteropts,
728 _(b'-c|-m|FILE'),
728 _(b'-c|-m|FILE'),
729 optionalrepo=True,
729 optionalrepo=True,
730 )
730 )
731 def debugdeltachain(ui, repo, file_=None, **opts):
731 def debugdeltachain(ui, repo, file_=None, **opts):
732 """dump information about delta chains in a revlog
732 """dump information about delta chains in a revlog
733
733
734 Output can be templatized. Available template keywords are:
734 Output can be templatized. Available template keywords are:
735
735
736 :``rev``: revision number
736 :``rev``: revision number
737 :``chainid``: delta chain identifier (numbered by unique base)
737 :``chainid``: delta chain identifier (numbered by unique base)
738 :``chainlen``: delta chain length to this revision
738 :``chainlen``: delta chain length to this revision
739 :``prevrev``: previous revision in delta chain
739 :``prevrev``: previous revision in delta chain
740 :``deltatype``: role of delta / how it was computed
740 :``deltatype``: role of delta / how it was computed
741 :``compsize``: compressed size of revision
741 :``compsize``: compressed size of revision
742 :``uncompsize``: uncompressed size of revision
742 :``uncompsize``: uncompressed size of revision
743 :``chainsize``: total size of compressed revisions in chain
743 :``chainsize``: total size of compressed revisions in chain
744 :``chainratio``: total chain size divided by uncompressed revision size
744 :``chainratio``: total chain size divided by uncompressed revision size
745 (new delta chains typically start at ratio 2.00)
745 (new delta chains typically start at ratio 2.00)
746 :``lindist``: linear distance from base revision in delta chain to end
746 :``lindist``: linear distance from base revision in delta chain to end
747 of this revision
747 of this revision
748 :``extradist``: total size of revisions not part of this delta chain from
748 :``extradist``: total size of revisions not part of this delta chain from
749 base of delta chain to end of this revision; a measurement
749 base of delta chain to end of this revision; a measurement
750 of how much extra data we need to read/seek across to read
750 of how much extra data we need to read/seek across to read
751 the delta chain for this revision
751 the delta chain for this revision
752 :``extraratio``: extradist divided by chainsize; another representation of
752 :``extraratio``: extradist divided by chainsize; another representation of
753 how much unrelated data is needed to load this delta chain
753 how much unrelated data is needed to load this delta chain
754
754
755 If the repository is configured to use the sparse read, additional keywords
755 If the repository is configured to use the sparse read, additional keywords
756 are available:
756 are available:
757
757
758 :``readsize``: total size of data read from the disk for a revision
758 :``readsize``: total size of data read from the disk for a revision
759 (sum of the sizes of all the blocks)
759 (sum of the sizes of all the blocks)
760 :``largestblock``: size of the largest block of data read from the disk
760 :``largestblock``: size of the largest block of data read from the disk
761 :``readdensity``: density of useful bytes in the data read from the disk
761 :``readdensity``: density of useful bytes in the data read from the disk
762 :``srchunks``: in how many data hunks the whole revision would be read
762 :``srchunks``: in how many data hunks the whole revision would be read
763
763
764 The sparse read can be enabled with experimental.sparse-read = True
764 The sparse read can be enabled with experimental.sparse-read = True
765 """
765 """
766 opts = pycompat.byteskwargs(opts)
766 opts = pycompat.byteskwargs(opts)
767 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
767 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
768 index = r.index
768 index = r.index
769 start = r.start
769 start = r.start
770 length = r.length
770 length = r.length
771 generaldelta = r.version & revlog.FLAG_GENERALDELTA
771 generaldelta = r.version & revlog.FLAG_GENERALDELTA
772 withsparseread = getattr(r, '_withsparseread', False)
772 withsparseread = getattr(r, '_withsparseread', False)
773
773
774 def revinfo(rev):
774 def revinfo(rev):
775 e = index[rev]
775 e = index[rev]
776 compsize = e[1]
776 compsize = e[1]
777 uncompsize = e[2]
777 uncompsize = e[2]
778 chainsize = 0
778 chainsize = 0
779
779
780 if generaldelta:
780 if generaldelta:
781 if e[3] == e[5]:
781 if e[3] == e[5]:
782 deltatype = b'p1'
782 deltatype = b'p1'
783 elif e[3] == e[6]:
783 elif e[3] == e[6]:
784 deltatype = b'p2'
784 deltatype = b'p2'
785 elif e[3] == rev - 1:
785 elif e[3] == rev - 1:
786 deltatype = b'prev'
786 deltatype = b'prev'
787 elif e[3] == rev:
787 elif e[3] == rev:
788 deltatype = b'base'
788 deltatype = b'base'
789 else:
789 else:
790 deltatype = b'other'
790 deltatype = b'other'
791 else:
791 else:
792 if e[3] == rev:
792 if e[3] == rev:
793 deltatype = b'base'
793 deltatype = b'base'
794 else:
794 else:
795 deltatype = b'prev'
795 deltatype = b'prev'
796
796
797 chain = r._deltachain(rev)[0]
797 chain = r._deltachain(rev)[0]
798 for iterrev in chain:
798 for iterrev in chain:
799 e = index[iterrev]
799 e = index[iterrev]
800 chainsize += e[1]
800 chainsize += e[1]
801
801
802 return compsize, uncompsize, deltatype, chain, chainsize
802 return compsize, uncompsize, deltatype, chain, chainsize
803
803
804 fm = ui.formatter(b'debugdeltachain', opts)
804 fm = ui.formatter(b'debugdeltachain', opts)
805
805
806 fm.plain(
806 fm.plain(
807 b' rev chain# chainlen prev delta '
807 b' rev chain# chainlen prev delta '
808 b'size rawsize chainsize ratio lindist extradist '
808 b'size rawsize chainsize ratio lindist extradist '
809 b'extraratio'
809 b'extraratio'
810 )
810 )
811 if withsparseread:
811 if withsparseread:
812 fm.plain(b' readsize largestblk rddensity srchunks')
812 fm.plain(b' readsize largestblk rddensity srchunks')
813 fm.plain(b'\n')
813 fm.plain(b'\n')
814
814
815 chainbases = {}
815 chainbases = {}
816 for rev in r:
816 for rev in r:
817 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
817 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
818 chainbase = chain[0]
818 chainbase = chain[0]
819 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
819 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
820 basestart = start(chainbase)
820 basestart = start(chainbase)
821 revstart = start(rev)
821 revstart = start(rev)
822 lineardist = revstart + comp - basestart
822 lineardist = revstart + comp - basestart
823 extradist = lineardist - chainsize
823 extradist = lineardist - chainsize
824 try:
824 try:
825 prevrev = chain[-2]
825 prevrev = chain[-2]
826 except IndexError:
826 except IndexError:
827 prevrev = -1
827 prevrev = -1
828
828
829 if uncomp != 0:
829 if uncomp != 0:
830 chainratio = float(chainsize) / float(uncomp)
830 chainratio = float(chainsize) / float(uncomp)
831 else:
831 else:
832 chainratio = chainsize
832 chainratio = chainsize
833
833
834 if chainsize != 0:
834 if chainsize != 0:
835 extraratio = float(extradist) / float(chainsize)
835 extraratio = float(extradist) / float(chainsize)
836 else:
836 else:
837 extraratio = extradist
837 extraratio = extradist
838
838
839 fm.startitem()
839 fm.startitem()
840 fm.write(
840 fm.write(
841 b'rev chainid chainlen prevrev deltatype compsize '
841 b'rev chainid chainlen prevrev deltatype compsize '
842 b'uncompsize chainsize chainratio lindist extradist '
842 b'uncompsize chainsize chainratio lindist extradist '
843 b'extraratio',
843 b'extraratio',
844 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
844 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
845 rev,
845 rev,
846 chainid,
846 chainid,
847 len(chain),
847 len(chain),
848 prevrev,
848 prevrev,
849 deltatype,
849 deltatype,
850 comp,
850 comp,
851 uncomp,
851 uncomp,
852 chainsize,
852 chainsize,
853 chainratio,
853 chainratio,
854 lineardist,
854 lineardist,
855 extradist,
855 extradist,
856 extraratio,
856 extraratio,
857 rev=rev,
857 rev=rev,
858 chainid=chainid,
858 chainid=chainid,
859 chainlen=len(chain),
859 chainlen=len(chain),
860 prevrev=prevrev,
860 prevrev=prevrev,
861 deltatype=deltatype,
861 deltatype=deltatype,
862 compsize=comp,
862 compsize=comp,
863 uncompsize=uncomp,
863 uncompsize=uncomp,
864 chainsize=chainsize,
864 chainsize=chainsize,
865 chainratio=chainratio,
865 chainratio=chainratio,
866 lindist=lineardist,
866 lindist=lineardist,
867 extradist=extradist,
867 extradist=extradist,
868 extraratio=extraratio,
868 extraratio=extraratio,
869 )
869 )
870 if withsparseread:
870 if withsparseread:
871 readsize = 0
871 readsize = 0
872 largestblock = 0
872 largestblock = 0
873 srchunks = 0
873 srchunks = 0
874
874
875 for revschunk in deltautil.slicechunk(r, chain):
875 for revschunk in deltautil.slicechunk(r, chain):
876 srchunks += 1
876 srchunks += 1
877 blkend = start(revschunk[-1]) + length(revschunk[-1])
877 blkend = start(revschunk[-1]) + length(revschunk[-1])
878 blksize = blkend - start(revschunk[0])
878 blksize = blkend - start(revschunk[0])
879
879
880 readsize += blksize
880 readsize += blksize
881 if largestblock < blksize:
881 if largestblock < blksize:
882 largestblock = blksize
882 largestblock = blksize
883
883
884 if readsize:
884 if readsize:
885 readdensity = float(chainsize) / float(readsize)
885 readdensity = float(chainsize) / float(readsize)
886 else:
886 else:
887 readdensity = 1
887 readdensity = 1
888
888
889 fm.write(
889 fm.write(
890 b'readsize largestblock readdensity srchunks',
890 b'readsize largestblock readdensity srchunks',
891 b' %10d %10d %9.5f %8d',
891 b' %10d %10d %9.5f %8d',
892 readsize,
892 readsize,
893 largestblock,
893 largestblock,
894 readdensity,
894 readdensity,
895 srchunks,
895 srchunks,
896 readsize=readsize,
896 readsize=readsize,
897 largestblock=largestblock,
897 largestblock=largestblock,
898 readdensity=readdensity,
898 readdensity=readdensity,
899 srchunks=srchunks,
899 srchunks=srchunks,
900 )
900 )
901
901
902 fm.plain(b'\n')
902 fm.plain(b'\n')
903
903
904 fm.end()
904 fm.end()
905
905
906
906
907 @command(
907 @command(
908 b'debugdirstate|debugstate',
908 b'debugdirstate|debugstate',
909 [
909 [
910 (
910 (
911 b'',
911 b'',
912 b'nodates',
912 b'nodates',
913 None,
913 None,
914 _(b'do not display the saved mtime (DEPRECATED)'),
914 _(b'do not display the saved mtime (DEPRECATED)'),
915 ),
915 ),
916 (b'', b'dates', True, _(b'display the saved mtime')),
916 (b'', b'dates', True, _(b'display the saved mtime')),
917 (b'', b'datesort', None, _(b'sort by saved mtime')),
917 (b'', b'datesort', None, _(b'sort by saved mtime')),
918 ],
918 ],
919 _(b'[OPTION]...'),
919 _(b'[OPTION]...'),
920 )
920 )
921 def debugstate(ui, repo, **opts):
921 def debugstate(ui, repo, **opts):
922 """show the contents of the current dirstate"""
922 """show the contents of the current dirstate"""
923
923
924 nodates = not opts['dates']
924 nodates = not opts['dates']
925 if opts.get('nodates') is not None:
925 if opts.get('nodates') is not None:
926 nodates = True
926 nodates = True
927 datesort = opts.get('datesort')
927 datesort = opts.get('datesort')
928
928
929 if datesort:
929 if datesort:
930 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
930 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
931 else:
931 else:
932 keyfunc = None # sort by filename
932 keyfunc = None # sort by filename
933 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
933 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
934 if ent[3] == -1:
934 if ent[3] == -1:
935 timestr = b'unset '
935 timestr = b'unset '
936 elif nodates:
936 elif nodates:
937 timestr = b'set '
937 timestr = b'set '
938 else:
938 else:
939 timestr = time.strftime(
939 timestr = time.strftime(
940 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
940 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
941 )
941 )
942 timestr = encoding.strtolocal(timestr)
942 timestr = encoding.strtolocal(timestr)
943 if ent[1] & 0o20000:
943 if ent[1] & 0o20000:
944 mode = b'lnk'
944 mode = b'lnk'
945 else:
945 else:
946 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
946 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
947 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
947 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
948 for f in repo.dirstate.copies():
948 for f in repo.dirstate.copies():
949 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
949 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
950
950
951
951
952 @command(
952 @command(
953 b'debugdiscovery',
953 b'debugdiscovery',
954 [
954 [
955 (b'', b'old', None, _(b'use old-style discovery')),
955 (b'', b'old', None, _(b'use old-style discovery')),
956 (
956 (
957 b'',
957 b'',
958 b'nonheads',
958 b'nonheads',
959 None,
959 None,
960 _(b'use old-style discovery with non-heads included'),
960 _(b'use old-style discovery with non-heads included'),
961 ),
961 ),
962 (b'', b'rev', [], b'restrict discovery to this set of revs'),
962 (b'', b'rev', [], b'restrict discovery to this set of revs'),
963 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
963 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
964 ]
964 ]
965 + cmdutil.remoteopts,
965 + cmdutil.remoteopts,
966 _(b'[--rev REV] [OTHER]'),
966 _(b'[--rev REV] [OTHER]'),
967 )
967 )
968 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
968 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
969 """runs the changeset discovery protocol in isolation"""
969 """runs the changeset discovery protocol in isolation"""
970 opts = pycompat.byteskwargs(opts)
970 opts = pycompat.byteskwargs(opts)
971 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
971 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
972 remote = hg.peer(repo, opts, remoteurl)
972 remote = hg.peer(repo, opts, remoteurl)
973 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
973 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
974
974
975 # make sure tests are repeatable
975 # make sure tests are repeatable
976 random.seed(int(opts[b'seed']))
976 random.seed(int(opts[b'seed']))
977
977
978 if opts.get(b'old'):
978 if opts.get(b'old'):
979
979
980 def doit(pushedrevs, remoteheads, remote=remote):
980 def doit(pushedrevs, remoteheads, remote=remote):
981 if not util.safehasattr(remote, b'branches'):
981 if not util.safehasattr(remote, b'branches'):
982 # enable in-client legacy support
982 # enable in-client legacy support
983 remote = localrepo.locallegacypeer(remote.local())
983 remote = localrepo.locallegacypeer(remote.local())
984 common, _in, hds = treediscovery.findcommonincoming(
984 common, _in, hds = treediscovery.findcommonincoming(
985 repo, remote, force=True
985 repo, remote, force=True
986 )
986 )
987 common = set(common)
987 common = set(common)
988 if not opts.get(b'nonheads'):
988 if not opts.get(b'nonheads'):
989 ui.writenoi18n(
989 ui.writenoi18n(
990 b"unpruned common: %s\n"
990 b"unpruned common: %s\n"
991 % b" ".join(sorted(short(n) for n in common))
991 % b" ".join(sorted(short(n) for n in common))
992 )
992 )
993
993
994 clnode = repo.changelog.node
994 clnode = repo.changelog.node
995 common = repo.revs(b'heads(::%ln)', common)
995 common = repo.revs(b'heads(::%ln)', common)
996 common = {clnode(r) for r in common}
996 common = {clnode(r) for r in common}
997 return common, hds
997 return common, hds
998
998
999 else:
999 else:
1000
1000
1001 def doit(pushedrevs, remoteheads, remote=remote):
1001 def doit(pushedrevs, remoteheads, remote=remote):
1002 nodes = None
1002 nodes = None
1003 if pushedrevs:
1003 if pushedrevs:
1004 revs = scmutil.revrange(repo, pushedrevs)
1004 revs = scmutil.revrange(repo, pushedrevs)
1005 nodes = [repo[r].node() for r in revs]
1005 nodes = [repo[r].node() for r in revs]
1006 common, any, hds = setdiscovery.findcommonheads(
1006 common, any, hds = setdiscovery.findcommonheads(
1007 ui, repo, remote, ancestorsof=nodes
1007 ui, repo, remote, ancestorsof=nodes
1008 )
1008 )
1009 return common, hds
1009 return common, hds
1010
1010
1011 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1011 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1012 localrevs = opts[b'rev']
1012 localrevs = opts[b'rev']
1013 with util.timedcm('debug-discovery') as t:
1013 with util.timedcm('debug-discovery') as t:
1014 common, hds = doit(localrevs, remoterevs)
1014 common, hds = doit(localrevs, remoterevs)
1015
1015
1016 # compute all statistics
1016 # compute all statistics
1017 common = set(common)
1017 common = set(common)
1018 rheads = set(hds)
1018 rheads = set(hds)
1019 lheads = set(repo.heads())
1019 lheads = set(repo.heads())
1020
1020
1021 data = {}
1021 data = {}
1022 data[b'elapsed'] = t.elapsed
1022 data[b'elapsed'] = t.elapsed
1023 data[b'nb-common'] = len(common)
1023 data[b'nb-common'] = len(common)
1024 data[b'nb-common-local'] = len(common & lheads)
1024 data[b'nb-common-local'] = len(common & lheads)
1025 data[b'nb-common-remote'] = len(common & rheads)
1025 data[b'nb-common-remote'] = len(common & rheads)
1026 data[b'nb-common-both'] = len(common & rheads & lheads)
1026 data[b'nb-common-both'] = len(common & rheads & lheads)
1027 data[b'nb-local'] = len(lheads)
1027 data[b'nb-local'] = len(lheads)
1028 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
1028 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
1029 data[b'nb-remote'] = len(rheads)
1029 data[b'nb-remote'] = len(rheads)
1030 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
1030 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
1031 data[b'nb-revs'] = len(repo.revs(b'all()'))
1031 data[b'nb-revs'] = len(repo.revs(b'all()'))
1032 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
1032 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
1033 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
1033 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
1034
1034
1035 # display discovery summary
1035 # display discovery summary
1036 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1036 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
1037 ui.writenoi18n(b"heads summary:\n")
1037 ui.writenoi18n(b"heads summary:\n")
1038 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
1038 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
1039 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
1039 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
1040 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
1040 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
1041 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
1041 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
1042 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
1042 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
1043 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
1043 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
1044 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
1044 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
1045 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
1045 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
1046 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
1046 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
1047 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
1047 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
1048 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1048 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
1049 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1049 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
1050 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1050 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
1051
1051
1052 if ui.verbose:
1052 if ui.verbose:
1053 ui.writenoi18n(
1053 ui.writenoi18n(
1054 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
1054 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
1055 )
1055 )
1056
1056
1057
1057
1058 _chunksize = 4 << 10
1058 _chunksize = 4 << 10
1059
1059
1060
1060
1061 @command(
1061 @command(
1062 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1062 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1063 )
1063 )
1064 def debugdownload(ui, repo, url, output=None, **opts):
1064 def debugdownload(ui, repo, url, output=None, **opts):
1065 """download a resource using Mercurial logic and config
1065 """download a resource using Mercurial logic and config
1066 """
1066 """
1067 fh = urlmod.open(ui, url, output)
1067 fh = urlmod.open(ui, url, output)
1068
1068
1069 dest = ui
1069 dest = ui
1070 if output:
1070 if output:
1071 dest = open(output, b"wb", _chunksize)
1071 dest = open(output, b"wb", _chunksize)
1072 try:
1072 try:
1073 data = fh.read(_chunksize)
1073 data = fh.read(_chunksize)
1074 while data:
1074 while data:
1075 dest.write(data)
1075 dest.write(data)
1076 data = fh.read(_chunksize)
1076 data = fh.read(_chunksize)
1077 finally:
1077 finally:
1078 if output:
1078 if output:
1079 dest.close()
1079 dest.close()
1080
1080
1081
1081
1082 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1082 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1083 def debugextensions(ui, repo, **opts):
1083 def debugextensions(ui, repo, **opts):
1084 '''show information about active extensions'''
1084 '''show information about active extensions'''
1085 opts = pycompat.byteskwargs(opts)
1085 opts = pycompat.byteskwargs(opts)
1086 exts = extensions.extensions(ui)
1086 exts = extensions.extensions(ui)
1087 hgver = util.version()
1087 hgver = util.version()
1088 fm = ui.formatter(b'debugextensions', opts)
1088 fm = ui.formatter(b'debugextensions', opts)
1089 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1089 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1090 isinternal = extensions.ismoduleinternal(extmod)
1090 isinternal = extensions.ismoduleinternal(extmod)
1091 extsource = None
1091 extsource = None
1092
1092
1093 if util.safehasattr(extmod, '__file__'):
1093 if util.safehasattr(extmod, '__file__'):
1094 extsource = pycompat.fsencode(extmod.__file__)
1094 extsource = pycompat.fsencode(extmod.__file__)
1095 elif getattr(sys, 'oxidized', False):
1095 elif getattr(sys, 'oxidized', False):
1096 extsource = pycompat.sysexecutable
1096 extsource = pycompat.sysexecutable
1097 if isinternal:
1097 if isinternal:
1098 exttestedwith = [] # never expose magic string to users
1098 exttestedwith = [] # never expose magic string to users
1099 else:
1099 else:
1100 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1100 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1101 extbuglink = getattr(extmod, 'buglink', None)
1101 extbuglink = getattr(extmod, 'buglink', None)
1102
1102
1103 fm.startitem()
1103 fm.startitem()
1104
1104
1105 if ui.quiet or ui.verbose:
1105 if ui.quiet or ui.verbose:
1106 fm.write(b'name', b'%s\n', extname)
1106 fm.write(b'name', b'%s\n', extname)
1107 else:
1107 else:
1108 fm.write(b'name', b'%s', extname)
1108 fm.write(b'name', b'%s', extname)
1109 if isinternal or hgver in exttestedwith:
1109 if isinternal or hgver in exttestedwith:
1110 fm.plain(b'\n')
1110 fm.plain(b'\n')
1111 elif not exttestedwith:
1111 elif not exttestedwith:
1112 fm.plain(_(b' (untested!)\n'))
1112 fm.plain(_(b' (untested!)\n'))
1113 else:
1113 else:
1114 lasttestedversion = exttestedwith[-1]
1114 lasttestedversion = exttestedwith[-1]
1115 fm.plain(b' (%s!)\n' % lasttestedversion)
1115 fm.plain(b' (%s!)\n' % lasttestedversion)
1116
1116
1117 fm.condwrite(
1117 fm.condwrite(
1118 ui.verbose and extsource,
1118 ui.verbose and extsource,
1119 b'source',
1119 b'source',
1120 _(b' location: %s\n'),
1120 _(b' location: %s\n'),
1121 extsource or b"",
1121 extsource or b"",
1122 )
1122 )
1123
1123
1124 if ui.verbose:
1124 if ui.verbose:
1125 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1125 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1126 fm.data(bundled=isinternal)
1126 fm.data(bundled=isinternal)
1127
1127
1128 fm.condwrite(
1128 fm.condwrite(
1129 ui.verbose and exttestedwith,
1129 ui.verbose and exttestedwith,
1130 b'testedwith',
1130 b'testedwith',
1131 _(b' tested with: %s\n'),
1131 _(b' tested with: %s\n'),
1132 fm.formatlist(exttestedwith, name=b'ver'),
1132 fm.formatlist(exttestedwith, name=b'ver'),
1133 )
1133 )
1134
1134
1135 fm.condwrite(
1135 fm.condwrite(
1136 ui.verbose and extbuglink,
1136 ui.verbose and extbuglink,
1137 b'buglink',
1137 b'buglink',
1138 _(b' bug reporting: %s\n'),
1138 _(b' bug reporting: %s\n'),
1139 extbuglink or b"",
1139 extbuglink or b"",
1140 )
1140 )
1141
1141
1142 fm.end()
1142 fm.end()
1143
1143
1144
1144
1145 @command(
1145 @command(
1146 b'debugfileset',
1146 b'debugfileset',
1147 [
1147 [
1148 (
1148 (
1149 b'r',
1149 b'r',
1150 b'rev',
1150 b'rev',
1151 b'',
1151 b'',
1152 _(b'apply the filespec on this revision'),
1152 _(b'apply the filespec on this revision'),
1153 _(b'REV'),
1153 _(b'REV'),
1154 ),
1154 ),
1155 (
1155 (
1156 b'',
1156 b'',
1157 b'all-files',
1157 b'all-files',
1158 False,
1158 False,
1159 _(b'test files from all revisions and working directory'),
1159 _(b'test files from all revisions and working directory'),
1160 ),
1160 ),
1161 (
1161 (
1162 b's',
1162 b's',
1163 b'show-matcher',
1163 b'show-matcher',
1164 None,
1164 None,
1165 _(b'print internal representation of matcher'),
1165 _(b'print internal representation of matcher'),
1166 ),
1166 ),
1167 (
1167 (
1168 b'p',
1168 b'p',
1169 b'show-stage',
1169 b'show-stage',
1170 [],
1170 [],
1171 _(b'print parsed tree at the given stage'),
1171 _(b'print parsed tree at the given stage'),
1172 _(b'NAME'),
1172 _(b'NAME'),
1173 ),
1173 ),
1174 ],
1174 ],
1175 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1175 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1176 )
1176 )
1177 def debugfileset(ui, repo, expr, **opts):
1177 def debugfileset(ui, repo, expr, **opts):
1178 '''parse and apply a fileset specification'''
1178 '''parse and apply a fileset specification'''
1179 from . import fileset
1179 from . import fileset
1180
1180
1181 fileset.symbols # force import of fileset so we have predicates to optimize
1181 fileset.symbols # force import of fileset so we have predicates to optimize
1182 opts = pycompat.byteskwargs(opts)
1182 opts = pycompat.byteskwargs(opts)
1183 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1183 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1184
1184
1185 stages = [
1185 stages = [
1186 (b'parsed', pycompat.identity),
1186 (b'parsed', pycompat.identity),
1187 (b'analyzed', filesetlang.analyze),
1187 (b'analyzed', filesetlang.analyze),
1188 (b'optimized', filesetlang.optimize),
1188 (b'optimized', filesetlang.optimize),
1189 ]
1189 ]
1190 stagenames = {n for n, f in stages}
1190 stagenames = {n for n, f in stages}
1191
1191
1192 showalways = set()
1192 showalways = set()
1193 if ui.verbose and not opts[b'show_stage']:
1193 if ui.verbose and not opts[b'show_stage']:
1194 # show parsed tree by --verbose (deprecated)
1194 # show parsed tree by --verbose (deprecated)
1195 showalways.add(b'parsed')
1195 showalways.add(b'parsed')
1196 if opts[b'show_stage'] == [b'all']:
1196 if opts[b'show_stage'] == [b'all']:
1197 showalways.update(stagenames)
1197 showalways.update(stagenames)
1198 else:
1198 else:
1199 for n in opts[b'show_stage']:
1199 for n in opts[b'show_stage']:
1200 if n not in stagenames:
1200 if n not in stagenames:
1201 raise error.Abort(_(b'invalid stage name: %s') % n)
1201 raise error.Abort(_(b'invalid stage name: %s') % n)
1202 showalways.update(opts[b'show_stage'])
1202 showalways.update(opts[b'show_stage'])
1203
1203
1204 tree = filesetlang.parse(expr)
1204 tree = filesetlang.parse(expr)
1205 for n, f in stages:
1205 for n, f in stages:
1206 tree = f(tree)
1206 tree = f(tree)
1207 if n in showalways:
1207 if n in showalways:
1208 if opts[b'show_stage'] or n != b'parsed':
1208 if opts[b'show_stage'] or n != b'parsed':
1209 ui.write(b"* %s:\n" % n)
1209 ui.write(b"* %s:\n" % n)
1210 ui.write(filesetlang.prettyformat(tree), b"\n")
1210 ui.write(filesetlang.prettyformat(tree), b"\n")
1211
1211
1212 files = set()
1212 files = set()
1213 if opts[b'all_files']:
1213 if opts[b'all_files']:
1214 for r in repo:
1214 for r in repo:
1215 c = repo[r]
1215 c = repo[r]
1216 files.update(c.files())
1216 files.update(c.files())
1217 files.update(c.substate)
1217 files.update(c.substate)
1218 if opts[b'all_files'] or ctx.rev() is None:
1218 if opts[b'all_files'] or ctx.rev() is None:
1219 wctx = repo[None]
1219 wctx = repo[None]
1220 files.update(
1220 files.update(
1221 repo.dirstate.walk(
1221 repo.dirstate.walk(
1222 scmutil.matchall(repo),
1222 scmutil.matchall(repo),
1223 subrepos=list(wctx.substate),
1223 subrepos=list(wctx.substate),
1224 unknown=True,
1224 unknown=True,
1225 ignored=True,
1225 ignored=True,
1226 )
1226 )
1227 )
1227 )
1228 files.update(wctx.substate)
1228 files.update(wctx.substate)
1229 else:
1229 else:
1230 files.update(ctx.files())
1230 files.update(ctx.files())
1231 files.update(ctx.substate)
1231 files.update(ctx.substate)
1232
1232
1233 m = ctx.matchfileset(repo.getcwd(), expr)
1233 m = ctx.matchfileset(repo.getcwd(), expr)
1234 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1234 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1235 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1235 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1236 for f in sorted(files):
1236 for f in sorted(files):
1237 if not m(f):
1237 if not m(f):
1238 continue
1238 continue
1239 ui.write(b"%s\n" % f)
1239 ui.write(b"%s\n" % f)
1240
1240
1241
1241
1242 @command(b'debugformat', [] + cmdutil.formatteropts)
1242 @command(b'debugformat', [] + cmdutil.formatteropts)
1243 def debugformat(ui, repo, **opts):
1243 def debugformat(ui, repo, **opts):
1244 """display format information about the current repository
1244 """display format information about the current repository
1245
1245
1246 Use --verbose to get extra information about current config value and
1246 Use --verbose to get extra information about current config value and
1247 Mercurial default."""
1247 Mercurial default."""
1248 opts = pycompat.byteskwargs(opts)
1248 opts = pycompat.byteskwargs(opts)
1249 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1249 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1250 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1250 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1251
1251
1252 def makeformatname(name):
1252 def makeformatname(name):
1253 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1253 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1254
1254
1255 fm = ui.formatter(b'debugformat', opts)
1255 fm = ui.formatter(b'debugformat', opts)
1256 if fm.isplain():
1256 if fm.isplain():
1257
1257
1258 def formatvalue(value):
1258 def formatvalue(value):
1259 if util.safehasattr(value, b'startswith'):
1259 if util.safehasattr(value, b'startswith'):
1260 return value
1260 return value
1261 if value:
1261 if value:
1262 return b'yes'
1262 return b'yes'
1263 else:
1263 else:
1264 return b'no'
1264 return b'no'
1265
1265
1266 else:
1266 else:
1267 formatvalue = pycompat.identity
1267 formatvalue = pycompat.identity
1268
1268
1269 fm.plain(b'format-variant')
1269 fm.plain(b'format-variant')
1270 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1270 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1271 fm.plain(b' repo')
1271 fm.plain(b' repo')
1272 if ui.verbose:
1272 if ui.verbose:
1273 fm.plain(b' config default')
1273 fm.plain(b' config default')
1274 fm.plain(b'\n')
1274 fm.plain(b'\n')
1275 for fv in upgrade.allformatvariant:
1275 for fv in upgrade.allformatvariant:
1276 fm.startitem()
1276 fm.startitem()
1277 repovalue = fv.fromrepo(repo)
1277 repovalue = fv.fromrepo(repo)
1278 configvalue = fv.fromconfig(repo)
1278 configvalue = fv.fromconfig(repo)
1279
1279
1280 if repovalue != configvalue:
1280 if repovalue != configvalue:
1281 namelabel = b'formatvariant.name.mismatchconfig'
1281 namelabel = b'formatvariant.name.mismatchconfig'
1282 repolabel = b'formatvariant.repo.mismatchconfig'
1282 repolabel = b'formatvariant.repo.mismatchconfig'
1283 elif repovalue != fv.default:
1283 elif repovalue != fv.default:
1284 namelabel = b'formatvariant.name.mismatchdefault'
1284 namelabel = b'formatvariant.name.mismatchdefault'
1285 repolabel = b'formatvariant.repo.mismatchdefault'
1285 repolabel = b'formatvariant.repo.mismatchdefault'
1286 else:
1286 else:
1287 namelabel = b'formatvariant.name.uptodate'
1287 namelabel = b'formatvariant.name.uptodate'
1288 repolabel = b'formatvariant.repo.uptodate'
1288 repolabel = b'formatvariant.repo.uptodate'
1289
1289
1290 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1290 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1291 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1291 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1292 if fv.default != configvalue:
1292 if fv.default != configvalue:
1293 configlabel = b'formatvariant.config.special'
1293 configlabel = b'formatvariant.config.special'
1294 else:
1294 else:
1295 configlabel = b'formatvariant.config.default'
1295 configlabel = b'formatvariant.config.default'
1296 fm.condwrite(
1296 fm.condwrite(
1297 ui.verbose,
1297 ui.verbose,
1298 b'config',
1298 b'config',
1299 b' %6s',
1299 b' %6s',
1300 formatvalue(configvalue),
1300 formatvalue(configvalue),
1301 label=configlabel,
1301 label=configlabel,
1302 )
1302 )
1303 fm.condwrite(
1303 fm.condwrite(
1304 ui.verbose,
1304 ui.verbose,
1305 b'default',
1305 b'default',
1306 b' %7s',
1306 b' %7s',
1307 formatvalue(fv.default),
1307 formatvalue(fv.default),
1308 label=b'formatvariant.default',
1308 label=b'formatvariant.default',
1309 )
1309 )
1310 fm.plain(b'\n')
1310 fm.plain(b'\n')
1311 fm.end()
1311 fm.end()
1312
1312
1313
1313
1314 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1314 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1315 def debugfsinfo(ui, path=b"."):
1315 def debugfsinfo(ui, path=b"."):
1316 """show information detected about current filesystem"""
1316 """show information detected about current filesystem"""
1317 ui.writenoi18n(b'path: %s\n' % path)
1317 ui.writenoi18n(b'path: %s\n' % path)
1318 ui.writenoi18n(
1318 ui.writenoi18n(
1319 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1319 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1320 )
1320 )
1321 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1321 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1322 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1322 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1323 ui.writenoi18n(
1323 ui.writenoi18n(
1324 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1324 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1325 )
1325 )
1326 ui.writenoi18n(
1326 ui.writenoi18n(
1327 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1327 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1328 )
1328 )
1329 casesensitive = b'(unknown)'
1329 casesensitive = b'(unknown)'
1330 try:
1330 try:
1331 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1331 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1332 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1332 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1333 except OSError:
1333 except OSError:
1334 pass
1334 pass
1335 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1335 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1336
1336
1337
1337
1338 @command(
1338 @command(
1339 b'debuggetbundle',
1339 b'debuggetbundle',
1340 [
1340 [
1341 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1341 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1342 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1342 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1343 (
1343 (
1344 b't',
1344 b't',
1345 b'type',
1345 b'type',
1346 b'bzip2',
1346 b'bzip2',
1347 _(b'bundle compression type to use'),
1347 _(b'bundle compression type to use'),
1348 _(b'TYPE'),
1348 _(b'TYPE'),
1349 ),
1349 ),
1350 ],
1350 ],
1351 _(b'REPO FILE [-H|-C ID]...'),
1351 _(b'REPO FILE [-H|-C ID]...'),
1352 norepo=True,
1352 norepo=True,
1353 )
1353 )
1354 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1354 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1355 """retrieves a bundle from a repo
1355 """retrieves a bundle from a repo
1356
1356
1357 Every ID must be a full-length hex node id string. Saves the bundle to the
1357 Every ID must be a full-length hex node id string. Saves the bundle to the
1358 given file.
1358 given file.
1359 """
1359 """
1360 opts = pycompat.byteskwargs(opts)
1360 opts = pycompat.byteskwargs(opts)
1361 repo = hg.peer(ui, opts, repopath)
1361 repo = hg.peer(ui, opts, repopath)
1362 if not repo.capable(b'getbundle'):
1362 if not repo.capable(b'getbundle'):
1363 raise error.Abort(b"getbundle() not supported by target repository")
1363 raise error.Abort(b"getbundle() not supported by target repository")
1364 args = {}
1364 args = {}
1365 if common:
1365 if common:
1366 args['common'] = [bin(s) for s in common]
1366 args['common'] = [bin(s) for s in common]
1367 if head:
1367 if head:
1368 args['heads'] = [bin(s) for s in head]
1368 args['heads'] = [bin(s) for s in head]
1369 # TODO: get desired bundlecaps from command line.
1369 # TODO: get desired bundlecaps from command line.
1370 args['bundlecaps'] = None
1370 args['bundlecaps'] = None
1371 bundle = repo.getbundle(b'debug', **args)
1371 bundle = repo.getbundle(b'debug', **args)
1372
1372
1373 bundletype = opts.get(b'type', b'bzip2').lower()
1373 bundletype = opts.get(b'type', b'bzip2').lower()
1374 btypes = {
1374 btypes = {
1375 b'none': b'HG10UN',
1375 b'none': b'HG10UN',
1376 b'bzip2': b'HG10BZ',
1376 b'bzip2': b'HG10BZ',
1377 b'gzip': b'HG10GZ',
1377 b'gzip': b'HG10GZ',
1378 b'bundle2': b'HG20',
1378 b'bundle2': b'HG20',
1379 }
1379 }
1380 bundletype = btypes.get(bundletype)
1380 bundletype = btypes.get(bundletype)
1381 if bundletype not in bundle2.bundletypes:
1381 if bundletype not in bundle2.bundletypes:
1382 raise error.Abort(_(b'unknown bundle type specified with --type'))
1382 raise error.Abort(_(b'unknown bundle type specified with --type'))
1383 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1383 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1384
1384
1385
1385
1386 @command(b'debugignore', [], b'[FILE]')
1386 @command(b'debugignore', [], b'[FILE]')
1387 def debugignore(ui, repo, *files, **opts):
1387 def debugignore(ui, repo, *files, **opts):
1388 """display the combined ignore pattern and information about ignored files
1388 """display the combined ignore pattern and information about ignored files
1389
1389
1390 With no argument display the combined ignore pattern.
1390 With no argument display the combined ignore pattern.
1391
1391
1392 Given space separated file names, shows if the given file is ignored and
1392 Given space separated file names, shows if the given file is ignored and
1393 if so, show the ignore rule (file and line number) that matched it.
1393 if so, show the ignore rule (file and line number) that matched it.
1394 """
1394 """
1395 ignore = repo.dirstate._ignore
1395 ignore = repo.dirstate._ignore
1396 if not files:
1396 if not files:
1397 # Show all the patterns
1397 # Show all the patterns
1398 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1398 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1399 else:
1399 else:
1400 m = scmutil.match(repo[None], pats=files)
1400 m = scmutil.match(repo[None], pats=files)
1401 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1401 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1402 for f in m.files():
1402 for f in m.files():
1403 nf = util.normpath(f)
1403 nf = util.normpath(f)
1404 ignored = None
1404 ignored = None
1405 ignoredata = None
1405 ignoredata = None
1406 if nf != b'.':
1406 if nf != b'.':
1407 if ignore(nf):
1407 if ignore(nf):
1408 ignored = nf
1408 ignored = nf
1409 ignoredata = repo.dirstate._ignorefileandline(nf)
1409 ignoredata = repo.dirstate._ignorefileandline(nf)
1410 else:
1410 else:
1411 for p in pathutil.finddirs(nf):
1411 for p in pathutil.finddirs(nf):
1412 if ignore(p):
1412 if ignore(p):
1413 ignored = p
1413 ignored = p
1414 ignoredata = repo.dirstate._ignorefileandline(p)
1414 ignoredata = repo.dirstate._ignorefileandline(p)
1415 break
1415 break
1416 if ignored:
1416 if ignored:
1417 if ignored == nf:
1417 if ignored == nf:
1418 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1418 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1419 else:
1419 else:
1420 ui.write(
1420 ui.write(
1421 _(
1421 _(
1422 b"%s is ignored because of "
1422 b"%s is ignored because of "
1423 b"containing directory %s\n"
1423 b"containing directory %s\n"
1424 )
1424 )
1425 % (uipathfn(f), ignored)
1425 % (uipathfn(f), ignored)
1426 )
1426 )
1427 ignorefile, lineno, line = ignoredata
1427 ignorefile, lineno, line = ignoredata
1428 ui.write(
1428 ui.write(
1429 _(b"(ignore rule in %s, line %d: '%s')\n")
1429 _(b"(ignore rule in %s, line %d: '%s')\n")
1430 % (ignorefile, lineno, line)
1430 % (ignorefile, lineno, line)
1431 )
1431 )
1432 else:
1432 else:
1433 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1433 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1434
1434
1435
1435
1436 @command(
1436 @command(
1437 b'debugindex',
1437 b'debugindex',
1438 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1438 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1439 _(b'-c|-m|FILE'),
1439 _(b'-c|-m|FILE'),
1440 )
1440 )
1441 def debugindex(ui, repo, file_=None, **opts):
1441 def debugindex(ui, repo, file_=None, **opts):
1442 """dump index data for a storage primitive"""
1442 """dump index data for a storage primitive"""
1443 opts = pycompat.byteskwargs(opts)
1443 opts = pycompat.byteskwargs(opts)
1444 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1444 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1445
1445
1446 if ui.debugflag:
1446 if ui.debugflag:
1447 shortfn = hex
1447 shortfn = hex
1448 else:
1448 else:
1449 shortfn = short
1449 shortfn = short
1450
1450
1451 idlen = 12
1451 idlen = 12
1452 for i in store:
1452 for i in store:
1453 idlen = len(shortfn(store.node(i)))
1453 idlen = len(shortfn(store.node(i)))
1454 break
1454 break
1455
1455
1456 fm = ui.formatter(b'debugindex', opts)
1456 fm = ui.formatter(b'debugindex', opts)
1457 fm.plain(
1457 fm.plain(
1458 b' rev linkrev %s %s p2\n'
1458 b' rev linkrev %s %s p2\n'
1459 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1459 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1460 )
1460 )
1461
1461
1462 for rev in store:
1462 for rev in store:
1463 node = store.node(rev)
1463 node = store.node(rev)
1464 parents = store.parents(node)
1464 parents = store.parents(node)
1465
1465
1466 fm.startitem()
1466 fm.startitem()
1467 fm.write(b'rev', b'%6d ', rev)
1467 fm.write(b'rev', b'%6d ', rev)
1468 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1468 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1469 fm.write(b'node', b'%s ', shortfn(node))
1469 fm.write(b'node', b'%s ', shortfn(node))
1470 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1470 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1471 fm.write(b'p2', b'%s', shortfn(parents[1]))
1471 fm.write(b'p2', b'%s', shortfn(parents[1]))
1472 fm.plain(b'\n')
1472 fm.plain(b'\n')
1473
1473
1474 fm.end()
1474 fm.end()
1475
1475
1476
1476
1477 @command(
1477 @command(
1478 b'debugindexdot',
1478 b'debugindexdot',
1479 cmdutil.debugrevlogopts,
1479 cmdutil.debugrevlogopts,
1480 _(b'-c|-m|FILE'),
1480 _(b'-c|-m|FILE'),
1481 optionalrepo=True,
1481 optionalrepo=True,
1482 )
1482 )
1483 def debugindexdot(ui, repo, file_=None, **opts):
1483 def debugindexdot(ui, repo, file_=None, **opts):
1484 """dump an index DAG as a graphviz dot file"""
1484 """dump an index DAG as a graphviz dot file"""
1485 opts = pycompat.byteskwargs(opts)
1485 opts = pycompat.byteskwargs(opts)
1486 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1486 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1487 ui.writenoi18n(b"digraph G {\n")
1487 ui.writenoi18n(b"digraph G {\n")
1488 for i in r:
1488 for i in r:
1489 node = r.node(i)
1489 node = r.node(i)
1490 pp = r.parents(node)
1490 pp = r.parents(node)
1491 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1491 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1492 if pp[1] != nullid:
1492 if pp[1] != nullid:
1493 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1493 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1494 ui.write(b"}\n")
1494 ui.write(b"}\n")
1495
1495
1496
1496
1497 @command(b'debugindexstats', [])
1497 @command(b'debugindexstats', [])
1498 def debugindexstats(ui, repo):
1498 def debugindexstats(ui, repo):
1499 """show stats related to the changelog index"""
1499 """show stats related to the changelog index"""
1500 repo.changelog.shortest(nullid, 1)
1500 repo.changelog.shortest(nullid, 1)
1501 index = repo.changelog.index
1501 index = repo.changelog.index
1502 if not util.safehasattr(index, b'stats'):
1502 if not util.safehasattr(index, b'stats'):
1503 raise error.Abort(_(b'debugindexstats only works with native code'))
1503 raise error.Abort(_(b'debugindexstats only works with native code'))
1504 for k, v in sorted(index.stats().items()):
1504 for k, v in sorted(index.stats().items()):
1505 ui.write(b'%s: %d\n' % (k, v))
1505 ui.write(b'%s: %d\n' % (k, v))
1506
1506
1507
1507
1508 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1508 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1509 def debuginstall(ui, **opts):
1509 def debuginstall(ui, **opts):
1510 '''test Mercurial installation
1510 '''test Mercurial installation
1511
1511
1512 Returns 0 on success.
1512 Returns 0 on success.
1513 '''
1513 '''
1514 opts = pycompat.byteskwargs(opts)
1514 opts = pycompat.byteskwargs(opts)
1515
1515
1516 problems = 0
1516 problems = 0
1517
1517
1518 fm = ui.formatter(b'debuginstall', opts)
1518 fm = ui.formatter(b'debuginstall', opts)
1519 fm.startitem()
1519 fm.startitem()
1520
1520
1521 # encoding might be unknown or wrong. don't translate these messages.
1521 # encoding might be unknown or wrong. don't translate these messages.
1522 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1522 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1523 err = None
1523 err = None
1524 try:
1524 try:
1525 codecs.lookup(pycompat.sysstr(encoding.encoding))
1525 codecs.lookup(pycompat.sysstr(encoding.encoding))
1526 except LookupError as inst:
1526 except LookupError as inst:
1527 err = stringutil.forcebytestr(inst)
1527 err = stringutil.forcebytestr(inst)
1528 problems += 1
1528 problems += 1
1529 fm.condwrite(
1529 fm.condwrite(
1530 err,
1530 err,
1531 b'encodingerror',
1531 b'encodingerror',
1532 b" %s\n (check that your locale is properly set)\n",
1532 b" %s\n (check that your locale is properly set)\n",
1533 err,
1533 err,
1534 )
1534 )
1535
1535
1536 # Python
1536 # Python
1537 pythonlib = None
1537 pythonlib = None
1538 if util.safehasattr(os, '__file__'):
1538 if util.safehasattr(os, '__file__'):
1539 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1539 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1540 elif getattr(sys, 'oxidized', False):
1540 elif getattr(sys, 'oxidized', False):
1541 pythonlib = pycompat.sysexecutable
1541 pythonlib = pycompat.sysexecutable
1542
1542
1543 fm.write(
1543 fm.write(
1544 b'pythonexe',
1544 b'pythonexe',
1545 _(b"checking Python executable (%s)\n"),
1545 _(b"checking Python executable (%s)\n"),
1546 pycompat.sysexecutable or _(b"unknown"),
1546 pycompat.sysexecutable or _(b"unknown"),
1547 )
1547 )
1548 fm.write(
1548 fm.write(
1549 b'pythonimplementation',
1549 b'pythonimplementation',
1550 _(b"checking Python implementation (%s)\n"),
1550 _(b"checking Python implementation (%s)\n"),
1551 pycompat.sysbytes(platform.python_implementation()),
1551 pycompat.sysbytes(platform.python_implementation()),
1552 )
1552 )
1553 fm.write(
1553 fm.write(
1554 b'pythonver',
1554 b'pythonver',
1555 _(b"checking Python version (%s)\n"),
1555 _(b"checking Python version (%s)\n"),
1556 (b"%d.%d.%d" % sys.version_info[:3]),
1556 (b"%d.%d.%d" % sys.version_info[:3]),
1557 )
1557 )
1558 fm.write(
1558 fm.write(
1559 b'pythonlib',
1559 b'pythonlib',
1560 _(b"checking Python lib (%s)...\n"),
1560 _(b"checking Python lib (%s)...\n"),
1561 pythonlib or _(b"unknown"),
1561 pythonlib or _(b"unknown"),
1562 )
1562 )
1563
1563
1564 try:
1564 try:
1565 from . import rustext
1565 from . import rustext
1566
1566
1567 rustext.__doc__ # trigger lazy import
1567 rustext.__doc__ # trigger lazy import
1568 except ImportError:
1568 except ImportError:
1569 rustext = None
1569 rustext = None
1570
1570
1571 security = set(sslutil.supportedprotocols)
1571 security = set(sslutil.supportedprotocols)
1572 if sslutil.hassni:
1572 if sslutil.hassni:
1573 security.add(b'sni')
1573 security.add(b'sni')
1574
1574
1575 fm.write(
1575 fm.write(
1576 b'pythonsecurity',
1576 b'pythonsecurity',
1577 _(b"checking Python security support (%s)\n"),
1577 _(b"checking Python security support (%s)\n"),
1578 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1578 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1579 )
1579 )
1580
1580
1581 # These are warnings, not errors. So don't increment problem count. This
1581 # These are warnings, not errors. So don't increment problem count. This
1582 # may change in the future.
1582 # may change in the future.
1583 if b'tls1.2' not in security:
1583 if b'tls1.2' not in security:
1584 fm.plain(
1584 fm.plain(
1585 _(
1585 _(
1586 b' TLS 1.2 not supported by Python install; '
1586 b' TLS 1.2 not supported by Python install; '
1587 b'network connections lack modern security\n'
1587 b'network connections lack modern security\n'
1588 )
1588 )
1589 )
1589 )
1590 if b'sni' not in security:
1590 if b'sni' not in security:
1591 fm.plain(
1591 fm.plain(
1592 _(
1592 _(
1593 b' SNI not supported by Python install; may have '
1593 b' SNI not supported by Python install; may have '
1594 b'connectivity issues with some servers\n'
1594 b'connectivity issues with some servers\n'
1595 )
1595 )
1596 )
1596 )
1597
1597
1598 fm.plain(
1598 fm.plain(
1599 _(
1599 _(
1600 b"checking Rust extensions (%s)\n"
1600 b"checking Rust extensions (%s)\n"
1601 % (b'missing' if rustext is None else b'installed')
1601 % (b'missing' if rustext is None else b'installed')
1602 ),
1602 ),
1603 )
1603 )
1604
1604
1605 # TODO print CA cert info
1605 # TODO print CA cert info
1606
1606
1607 # hg version
1607 # hg version
1608 hgver = util.version()
1608 hgver = util.version()
1609 fm.write(
1609 fm.write(
1610 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1610 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1611 )
1611 )
1612 fm.write(
1612 fm.write(
1613 b'hgverextra',
1613 b'hgverextra',
1614 _(b"checking Mercurial custom build (%s)\n"),
1614 _(b"checking Mercurial custom build (%s)\n"),
1615 b'+'.join(hgver.split(b'+')[1:]),
1615 b'+'.join(hgver.split(b'+')[1:]),
1616 )
1616 )
1617
1617
1618 # compiled modules
1618 # compiled modules
1619 hgmodules = None
1619 hgmodules = None
1620 if util.safehasattr(sys.modules[__name__], '__file__'):
1620 if util.safehasattr(sys.modules[__name__], '__file__'):
1621 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1621 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1622 elif getattr(sys, 'oxidized', False):
1622 elif getattr(sys, 'oxidized', False):
1623 hgmodules = pycompat.sysexecutable
1623 hgmodules = pycompat.sysexecutable
1624
1624
1625 fm.write(
1625 fm.write(
1626 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1626 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1627 )
1627 )
1628 fm.write(
1628 fm.write(
1629 b'hgmodules',
1629 b'hgmodules',
1630 _(b"checking installed modules (%s)...\n"),
1630 _(b"checking installed modules (%s)...\n"),
1631 hgmodules or _(b"unknown"),
1631 hgmodules or _(b"unknown"),
1632 )
1632 )
1633
1633
1634 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1634 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1635 rustext = rustandc # for now, that's the only case
1635 rustext = rustandc # for now, that's the only case
1636 cext = policy.policy in (b'c', b'allow') or rustandc
1636 cext = policy.policy in (b'c', b'allow') or rustandc
1637 nopure = cext or rustext
1637 nopure = cext or rustext
1638 if nopure:
1638 if nopure:
1639 err = None
1639 err = None
1640 try:
1640 try:
1641 if cext:
1641 if cext:
1642 from .cext import ( # pytype: disable=import-error
1642 from .cext import ( # pytype: disable=import-error
1643 base85,
1643 base85,
1644 bdiff,
1644 bdiff,
1645 mpatch,
1645 mpatch,
1646 osutil,
1646 osutil,
1647 )
1647 )
1648
1648
1649 # quiet pyflakes
1649 # quiet pyflakes
1650 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1650 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1651 if rustext:
1651 if rustext:
1652 from .rustext import ( # pytype: disable=import-error
1652 from .rustext import ( # pytype: disable=import-error
1653 ancestor,
1653 ancestor,
1654 dirstate,
1654 dirstate,
1655 )
1655 )
1656
1656
1657 dir(ancestor), dir(dirstate) # quiet pyflakes
1657 dir(ancestor), dir(dirstate) # quiet pyflakes
1658 except Exception as inst:
1658 except Exception as inst:
1659 err = stringutil.forcebytestr(inst)
1659 err = stringutil.forcebytestr(inst)
1660 problems += 1
1660 problems += 1
1661 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1661 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1662
1662
1663 compengines = util.compengines._engines.values()
1663 compengines = util.compengines._engines.values()
1664 fm.write(
1664 fm.write(
1665 b'compengines',
1665 b'compengines',
1666 _(b'checking registered compression engines (%s)\n'),
1666 _(b'checking registered compression engines (%s)\n'),
1667 fm.formatlist(
1667 fm.formatlist(
1668 sorted(e.name() for e in compengines),
1668 sorted(e.name() for e in compengines),
1669 name=b'compengine',
1669 name=b'compengine',
1670 fmt=b'%s',
1670 fmt=b'%s',
1671 sep=b', ',
1671 sep=b', ',
1672 ),
1672 ),
1673 )
1673 )
1674 fm.write(
1674 fm.write(
1675 b'compenginesavail',
1675 b'compenginesavail',
1676 _(b'checking available compression engines (%s)\n'),
1676 _(b'checking available compression engines (%s)\n'),
1677 fm.formatlist(
1677 fm.formatlist(
1678 sorted(e.name() for e in compengines if e.available()),
1678 sorted(e.name() for e in compengines if e.available()),
1679 name=b'compengine',
1679 name=b'compengine',
1680 fmt=b'%s',
1680 fmt=b'%s',
1681 sep=b', ',
1681 sep=b', ',
1682 ),
1682 ),
1683 )
1683 )
1684 wirecompengines = compression.compengines.supportedwireengines(
1684 wirecompengines = compression.compengines.supportedwireengines(
1685 compression.SERVERROLE
1685 compression.SERVERROLE
1686 )
1686 )
1687 fm.write(
1687 fm.write(
1688 b'compenginesserver',
1688 b'compenginesserver',
1689 _(
1689 _(
1690 b'checking available compression engines '
1690 b'checking available compression engines '
1691 b'for wire protocol (%s)\n'
1691 b'for wire protocol (%s)\n'
1692 ),
1692 ),
1693 fm.formatlist(
1693 fm.formatlist(
1694 [e.name() for e in wirecompengines if e.wireprotosupport()],
1694 [e.name() for e in wirecompengines if e.wireprotosupport()],
1695 name=b'compengine',
1695 name=b'compengine',
1696 fmt=b'%s',
1696 fmt=b'%s',
1697 sep=b', ',
1697 sep=b', ',
1698 ),
1698 ),
1699 )
1699 )
1700 re2 = b'missing'
1700 re2 = b'missing'
1701 if util._re2:
1701 if util._re2:
1702 re2 = b'available'
1702 re2 = b'available'
1703 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1703 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1704 fm.data(re2=bool(util._re2))
1704 fm.data(re2=bool(util._re2))
1705
1705
1706 # templates
1706 # templates
1707 p = templater.templatedir()
1707 p = templater.templatedir()
1708 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1708 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1709 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1709 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1710 if p:
1710 if p:
1711 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1711 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1712 if m:
1712 if m:
1713 # template found, check if it is working
1713 # template found, check if it is working
1714 err = None
1714 err = None
1715 try:
1715 try:
1716 templater.templater.frommapfile(m)
1716 templater.templater.frommapfile(m)
1717 except Exception as inst:
1717 except Exception as inst:
1718 err = stringutil.forcebytestr(inst)
1718 err = stringutil.forcebytestr(inst)
1719 p = None
1719 p = None
1720 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1720 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1721 else:
1721 else:
1722 p = None
1722 p = None
1723 fm.condwrite(
1723 fm.condwrite(
1724 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1724 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1725 )
1725 )
1726 fm.condwrite(
1726 fm.condwrite(
1727 not m,
1727 not m,
1728 b'defaulttemplatenotfound',
1728 b'defaulttemplatenotfound',
1729 _(b" template '%s' not found\n"),
1729 _(b" template '%s' not found\n"),
1730 b"default",
1730 b"default",
1731 )
1731 )
1732 if not p:
1732 if not p:
1733 problems += 1
1733 problems += 1
1734 fm.condwrite(
1734 fm.condwrite(
1735 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1735 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1736 )
1736 )
1737
1737
1738 # editor
1738 # editor
1739 editor = ui.geteditor()
1739 editor = ui.geteditor()
1740 editor = util.expandpath(editor)
1740 editor = util.expandpath(editor)
1741 editorbin = procutil.shellsplit(editor)[0]
1741 editorbin = procutil.shellsplit(editor)[0]
1742 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1742 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1743 cmdpath = procutil.findexe(editorbin)
1743 cmdpath = procutil.findexe(editorbin)
1744 fm.condwrite(
1744 fm.condwrite(
1745 not cmdpath and editor == b'vi',
1745 not cmdpath and editor == b'vi',
1746 b'vinotfound',
1746 b'vinotfound',
1747 _(
1747 _(
1748 b" No commit editor set and can't find %s in PATH\n"
1748 b" No commit editor set and can't find %s in PATH\n"
1749 b" (specify a commit editor in your configuration"
1749 b" (specify a commit editor in your configuration"
1750 b" file)\n"
1750 b" file)\n"
1751 ),
1751 ),
1752 not cmdpath and editor == b'vi' and editorbin,
1752 not cmdpath and editor == b'vi' and editorbin,
1753 )
1753 )
1754 fm.condwrite(
1754 fm.condwrite(
1755 not cmdpath and editor != b'vi',
1755 not cmdpath and editor != b'vi',
1756 b'editornotfound',
1756 b'editornotfound',
1757 _(
1757 _(
1758 b" Can't find editor '%s' in PATH\n"
1758 b" Can't find editor '%s' in PATH\n"
1759 b" (specify a commit editor in your configuration"
1759 b" (specify a commit editor in your configuration"
1760 b" file)\n"
1760 b" file)\n"
1761 ),
1761 ),
1762 not cmdpath and editorbin,
1762 not cmdpath and editorbin,
1763 )
1763 )
1764 if not cmdpath and editor != b'vi':
1764 if not cmdpath and editor != b'vi':
1765 problems += 1
1765 problems += 1
1766
1766
1767 # check username
1767 # check username
1768 username = None
1768 username = None
1769 err = None
1769 err = None
1770 try:
1770 try:
1771 username = ui.username()
1771 username = ui.username()
1772 except error.Abort as e:
1772 except error.Abort as e:
1773 err = stringutil.forcebytestr(e)
1773 err = e.message
1774 problems += 1
1774 problems += 1
1775
1775
1776 fm.condwrite(
1776 fm.condwrite(
1777 username, b'username', _(b"checking username (%s)\n"), username
1777 username, b'username', _(b"checking username (%s)\n"), username
1778 )
1778 )
1779 fm.condwrite(
1779 fm.condwrite(
1780 err,
1780 err,
1781 b'usernameerror',
1781 b'usernameerror',
1782 _(
1782 _(
1783 b"checking username...\n %s\n"
1783 b"checking username...\n %s\n"
1784 b" (specify a username in your configuration file)\n"
1784 b" (specify a username in your configuration file)\n"
1785 ),
1785 ),
1786 err,
1786 err,
1787 )
1787 )
1788
1788
1789 for name, mod in extensions.extensions():
1789 for name, mod in extensions.extensions():
1790 handler = getattr(mod, 'debuginstall', None)
1790 handler = getattr(mod, 'debuginstall', None)
1791 if handler is not None:
1791 if handler is not None:
1792 problems += handler(ui, fm)
1792 problems += handler(ui, fm)
1793
1793
1794 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1794 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1795 if not problems:
1795 if not problems:
1796 fm.data(problems=problems)
1796 fm.data(problems=problems)
1797 fm.condwrite(
1797 fm.condwrite(
1798 problems,
1798 problems,
1799 b'problems',
1799 b'problems',
1800 _(b"%d problems detected, please check your install!\n"),
1800 _(b"%d problems detected, please check your install!\n"),
1801 problems,
1801 problems,
1802 )
1802 )
1803 fm.end()
1803 fm.end()
1804
1804
1805 return problems
1805 return problems
1806
1806
1807
1807
1808 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1808 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1809 def debugknown(ui, repopath, *ids, **opts):
1809 def debugknown(ui, repopath, *ids, **opts):
1810 """test whether node ids are known to a repo
1810 """test whether node ids are known to a repo
1811
1811
1812 Every ID must be a full-length hex node id string. Returns a list of 0s
1812 Every ID must be a full-length hex node id string. Returns a list of 0s
1813 and 1s indicating unknown/known.
1813 and 1s indicating unknown/known.
1814 """
1814 """
1815 opts = pycompat.byteskwargs(opts)
1815 opts = pycompat.byteskwargs(opts)
1816 repo = hg.peer(ui, opts, repopath)
1816 repo = hg.peer(ui, opts, repopath)
1817 if not repo.capable(b'known'):
1817 if not repo.capable(b'known'):
1818 raise error.Abort(b"known() not supported by target repository")
1818 raise error.Abort(b"known() not supported by target repository")
1819 flags = repo.known([bin(s) for s in ids])
1819 flags = repo.known([bin(s) for s in ids])
1820 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1820 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1821
1821
1822
1822
1823 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1823 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1824 def debuglabelcomplete(ui, repo, *args):
1824 def debuglabelcomplete(ui, repo, *args):
1825 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1825 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1826 debugnamecomplete(ui, repo, *args)
1826 debugnamecomplete(ui, repo, *args)
1827
1827
1828
1828
1829 @command(
1829 @command(
1830 b'debuglocks',
1830 b'debuglocks',
1831 [
1831 [
1832 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1832 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1833 (
1833 (
1834 b'W',
1834 b'W',
1835 b'force-wlock',
1835 b'force-wlock',
1836 None,
1836 None,
1837 _(b'free the working state lock (DANGEROUS)'),
1837 _(b'free the working state lock (DANGEROUS)'),
1838 ),
1838 ),
1839 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1839 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1840 (
1840 (
1841 b'S',
1841 b'S',
1842 b'set-wlock',
1842 b'set-wlock',
1843 None,
1843 None,
1844 _(b'set the working state lock until stopped'),
1844 _(b'set the working state lock until stopped'),
1845 ),
1845 ),
1846 ],
1846 ],
1847 _(b'[OPTION]...'),
1847 _(b'[OPTION]...'),
1848 )
1848 )
1849 def debuglocks(ui, repo, **opts):
1849 def debuglocks(ui, repo, **opts):
1850 """show or modify state of locks
1850 """show or modify state of locks
1851
1851
1852 By default, this command will show which locks are held. This
1852 By default, this command will show which locks are held. This
1853 includes the user and process holding the lock, the amount of time
1853 includes the user and process holding the lock, the amount of time
1854 the lock has been held, and the machine name where the process is
1854 the lock has been held, and the machine name where the process is
1855 running if it's not local.
1855 running if it's not local.
1856
1856
1857 Locks protect the integrity of Mercurial's data, so should be
1857 Locks protect the integrity of Mercurial's data, so should be
1858 treated with care. System crashes or other interruptions may cause
1858 treated with care. System crashes or other interruptions may cause
1859 locks to not be properly released, though Mercurial will usually
1859 locks to not be properly released, though Mercurial will usually
1860 detect and remove such stale locks automatically.
1860 detect and remove such stale locks automatically.
1861
1861
1862 However, detecting stale locks may not always be possible (for
1862 However, detecting stale locks may not always be possible (for
1863 instance, on a shared filesystem). Removing locks may also be
1863 instance, on a shared filesystem). Removing locks may also be
1864 blocked by filesystem permissions.
1864 blocked by filesystem permissions.
1865
1865
1866 Setting a lock will prevent other commands from changing the data.
1866 Setting a lock will prevent other commands from changing the data.
1867 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1867 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1868 The set locks are removed when the command exits.
1868 The set locks are removed when the command exits.
1869
1869
1870 Returns 0 if no locks are held.
1870 Returns 0 if no locks are held.
1871
1871
1872 """
1872 """
1873
1873
1874 if opts.get('force_lock'):
1874 if opts.get('force_lock'):
1875 repo.svfs.unlink(b'lock')
1875 repo.svfs.unlink(b'lock')
1876 if opts.get('force_wlock'):
1876 if opts.get('force_wlock'):
1877 repo.vfs.unlink(b'wlock')
1877 repo.vfs.unlink(b'wlock')
1878 if opts.get('force_lock') or opts.get('force_wlock'):
1878 if opts.get('force_lock') or opts.get('force_wlock'):
1879 return 0
1879 return 0
1880
1880
1881 locks = []
1881 locks = []
1882 try:
1882 try:
1883 if opts.get('set_wlock'):
1883 if opts.get('set_wlock'):
1884 try:
1884 try:
1885 locks.append(repo.wlock(False))
1885 locks.append(repo.wlock(False))
1886 except error.LockHeld:
1886 except error.LockHeld:
1887 raise error.Abort(_(b'wlock is already held'))
1887 raise error.Abort(_(b'wlock is already held'))
1888 if opts.get('set_lock'):
1888 if opts.get('set_lock'):
1889 try:
1889 try:
1890 locks.append(repo.lock(False))
1890 locks.append(repo.lock(False))
1891 except error.LockHeld:
1891 except error.LockHeld:
1892 raise error.Abort(_(b'lock is already held'))
1892 raise error.Abort(_(b'lock is already held'))
1893 if len(locks):
1893 if len(locks):
1894 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1894 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1895 return 0
1895 return 0
1896 finally:
1896 finally:
1897 release(*locks)
1897 release(*locks)
1898
1898
1899 now = time.time()
1899 now = time.time()
1900 held = 0
1900 held = 0
1901
1901
1902 def report(vfs, name, method):
1902 def report(vfs, name, method):
1903 # this causes stale locks to get reaped for more accurate reporting
1903 # this causes stale locks to get reaped for more accurate reporting
1904 try:
1904 try:
1905 l = method(False)
1905 l = method(False)
1906 except error.LockHeld:
1906 except error.LockHeld:
1907 l = None
1907 l = None
1908
1908
1909 if l:
1909 if l:
1910 l.release()
1910 l.release()
1911 else:
1911 else:
1912 try:
1912 try:
1913 st = vfs.lstat(name)
1913 st = vfs.lstat(name)
1914 age = now - st[stat.ST_MTIME]
1914 age = now - st[stat.ST_MTIME]
1915 user = util.username(st.st_uid)
1915 user = util.username(st.st_uid)
1916 locker = vfs.readlock(name)
1916 locker = vfs.readlock(name)
1917 if b":" in locker:
1917 if b":" in locker:
1918 host, pid = locker.split(b':')
1918 host, pid = locker.split(b':')
1919 if host == socket.gethostname():
1919 if host == socket.gethostname():
1920 locker = b'user %s, process %s' % (user or b'None', pid)
1920 locker = b'user %s, process %s' % (user or b'None', pid)
1921 else:
1921 else:
1922 locker = b'user %s, process %s, host %s' % (
1922 locker = b'user %s, process %s, host %s' % (
1923 user or b'None',
1923 user or b'None',
1924 pid,
1924 pid,
1925 host,
1925 host,
1926 )
1926 )
1927 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1927 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1928 return 1
1928 return 1
1929 except OSError as e:
1929 except OSError as e:
1930 if e.errno != errno.ENOENT:
1930 if e.errno != errno.ENOENT:
1931 raise
1931 raise
1932
1932
1933 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1933 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1934 return 0
1934 return 0
1935
1935
1936 held += report(repo.svfs, b"lock", repo.lock)
1936 held += report(repo.svfs, b"lock", repo.lock)
1937 held += report(repo.vfs, b"wlock", repo.wlock)
1937 held += report(repo.vfs, b"wlock", repo.wlock)
1938
1938
1939 return held
1939 return held
1940
1940
1941
1941
1942 @command(
1942 @command(
1943 b'debugmanifestfulltextcache',
1943 b'debugmanifestfulltextcache',
1944 [
1944 [
1945 (b'', b'clear', False, _(b'clear the cache')),
1945 (b'', b'clear', False, _(b'clear the cache')),
1946 (
1946 (
1947 b'a',
1947 b'a',
1948 b'add',
1948 b'add',
1949 [],
1949 [],
1950 _(b'add the given manifest nodes to the cache'),
1950 _(b'add the given manifest nodes to the cache'),
1951 _(b'NODE'),
1951 _(b'NODE'),
1952 ),
1952 ),
1953 ],
1953 ],
1954 b'',
1954 b'',
1955 )
1955 )
1956 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1956 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1957 """show, clear or amend the contents of the manifest fulltext cache"""
1957 """show, clear or amend the contents of the manifest fulltext cache"""
1958
1958
1959 def getcache():
1959 def getcache():
1960 r = repo.manifestlog.getstorage(b'')
1960 r = repo.manifestlog.getstorage(b'')
1961 try:
1961 try:
1962 return r._fulltextcache
1962 return r._fulltextcache
1963 except AttributeError:
1963 except AttributeError:
1964 msg = _(
1964 msg = _(
1965 b"Current revlog implementation doesn't appear to have a "
1965 b"Current revlog implementation doesn't appear to have a "
1966 b"manifest fulltext cache\n"
1966 b"manifest fulltext cache\n"
1967 )
1967 )
1968 raise error.Abort(msg)
1968 raise error.Abort(msg)
1969
1969
1970 if opts.get('clear'):
1970 if opts.get('clear'):
1971 with repo.wlock():
1971 with repo.wlock():
1972 cache = getcache()
1972 cache = getcache()
1973 cache.clear(clear_persisted_data=True)
1973 cache.clear(clear_persisted_data=True)
1974 return
1974 return
1975
1975
1976 if add:
1976 if add:
1977 with repo.wlock():
1977 with repo.wlock():
1978 m = repo.manifestlog
1978 m = repo.manifestlog
1979 store = m.getstorage(b'')
1979 store = m.getstorage(b'')
1980 for n in add:
1980 for n in add:
1981 try:
1981 try:
1982 manifest = m[store.lookup(n)]
1982 manifest = m[store.lookup(n)]
1983 except error.LookupError as e:
1983 except error.LookupError as e:
1984 raise error.Abort(e, hint=b"Check your manifest node id")
1984 raise error.Abort(e, hint=b"Check your manifest node id")
1985 manifest.read() # stores revisision in cache too
1985 manifest.read() # stores revisision in cache too
1986 return
1986 return
1987
1987
1988 cache = getcache()
1988 cache = getcache()
1989 if not len(cache):
1989 if not len(cache):
1990 ui.write(_(b'cache empty\n'))
1990 ui.write(_(b'cache empty\n'))
1991 else:
1991 else:
1992 ui.write(
1992 ui.write(
1993 _(
1993 _(
1994 b'cache contains %d manifest entries, in order of most to '
1994 b'cache contains %d manifest entries, in order of most to '
1995 b'least recent:\n'
1995 b'least recent:\n'
1996 )
1996 )
1997 % (len(cache),)
1997 % (len(cache),)
1998 )
1998 )
1999 totalsize = 0
1999 totalsize = 0
2000 for nodeid in cache:
2000 for nodeid in cache:
2001 # Use cache.get to not update the LRU order
2001 # Use cache.get to not update the LRU order
2002 data = cache.peek(nodeid)
2002 data = cache.peek(nodeid)
2003 size = len(data)
2003 size = len(data)
2004 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2004 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2005 ui.write(
2005 ui.write(
2006 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2006 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2007 )
2007 )
2008 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2008 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2009 ui.write(
2009 ui.write(
2010 _(b'total cache data size %s, on-disk %s\n')
2010 _(b'total cache data size %s, on-disk %s\n')
2011 % (util.bytecount(totalsize), util.bytecount(ondisk))
2011 % (util.bytecount(totalsize), util.bytecount(ondisk))
2012 )
2012 )
2013
2013
2014
2014
2015 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2015 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2016 def debugmergestate(ui, repo, *args, **opts):
2016 def debugmergestate(ui, repo, *args, **opts):
2017 """print merge state
2017 """print merge state
2018
2018
2019 Use --verbose to print out information about whether v1 or v2 merge state
2019 Use --verbose to print out information about whether v1 or v2 merge state
2020 was chosen."""
2020 was chosen."""
2021
2021
2022 if ui.verbose:
2022 if ui.verbose:
2023 ms = mergestatemod.mergestate(repo)
2023 ms = mergestatemod.mergestate(repo)
2024
2024
2025 # sort so that reasonable information is on top
2025 # sort so that reasonable information is on top
2026 v1records = ms._readrecordsv1()
2026 v1records = ms._readrecordsv1()
2027 v2records = ms._readrecordsv2()
2027 v2records = ms._readrecordsv2()
2028
2028
2029 if not v1records and not v2records:
2029 if not v1records and not v2records:
2030 pass
2030 pass
2031 elif not v2records:
2031 elif not v2records:
2032 ui.writenoi18n(b'no version 2 merge state\n')
2032 ui.writenoi18n(b'no version 2 merge state\n')
2033 elif ms._v1v2match(v1records, v2records):
2033 elif ms._v1v2match(v1records, v2records):
2034 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2034 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2035 else:
2035 else:
2036 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2036 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2037
2037
2038 opts = pycompat.byteskwargs(opts)
2038 opts = pycompat.byteskwargs(opts)
2039 if not opts[b'template']:
2039 if not opts[b'template']:
2040 opts[b'template'] = (
2040 opts[b'template'] = (
2041 b'{if(commits, "", "no merge state found\n")}'
2041 b'{if(commits, "", "no merge state found\n")}'
2042 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2042 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2043 b'{files % "file: {path} (state \\"{state}\\")\n'
2043 b'{files % "file: {path} (state \\"{state}\\")\n'
2044 b'{if(local_path, "'
2044 b'{if(local_path, "'
2045 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2045 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2046 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2046 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2047 b' other path: {other_path} (node {other_node})\n'
2047 b' other path: {other_path} (node {other_node})\n'
2048 b'")}'
2048 b'")}'
2049 b'{if(rename_side, "'
2049 b'{if(rename_side, "'
2050 b' rename side: {rename_side}\n'
2050 b' rename side: {rename_side}\n'
2051 b' renamed path: {renamed_path}\n'
2051 b' renamed path: {renamed_path}\n'
2052 b'")}'
2052 b'")}'
2053 b'{extras % " extra: {key} = {value}\n"}'
2053 b'{extras % " extra: {key} = {value}\n"}'
2054 b'"}'
2054 b'"}'
2055 b'{extras % "extra: {file} ({key} = {value})\n"}'
2055 b'{extras % "extra: {file} ({key} = {value})\n"}'
2056 )
2056 )
2057
2057
2058 ms = mergestatemod.mergestate.read(repo)
2058 ms = mergestatemod.mergestate.read(repo)
2059
2059
2060 fm = ui.formatter(b'debugmergestate', opts)
2060 fm = ui.formatter(b'debugmergestate', opts)
2061 fm.startitem()
2061 fm.startitem()
2062
2062
2063 fm_commits = fm.nested(b'commits')
2063 fm_commits = fm.nested(b'commits')
2064 if ms.active():
2064 if ms.active():
2065 for name, node, label_index in (
2065 for name, node, label_index in (
2066 (b'local', ms.local, 0),
2066 (b'local', ms.local, 0),
2067 (b'other', ms.other, 1),
2067 (b'other', ms.other, 1),
2068 ):
2068 ):
2069 fm_commits.startitem()
2069 fm_commits.startitem()
2070 fm_commits.data(name=name)
2070 fm_commits.data(name=name)
2071 fm_commits.data(node=hex(node))
2071 fm_commits.data(node=hex(node))
2072 if ms._labels and len(ms._labels) > label_index:
2072 if ms._labels and len(ms._labels) > label_index:
2073 fm_commits.data(label=ms._labels[label_index])
2073 fm_commits.data(label=ms._labels[label_index])
2074 fm_commits.end()
2074 fm_commits.end()
2075
2075
2076 fm_files = fm.nested(b'files')
2076 fm_files = fm.nested(b'files')
2077 if ms.active():
2077 if ms.active():
2078 for f in ms:
2078 for f in ms:
2079 fm_files.startitem()
2079 fm_files.startitem()
2080 fm_files.data(path=f)
2080 fm_files.data(path=f)
2081 state = ms._state[f]
2081 state = ms._state[f]
2082 fm_files.data(state=state[0])
2082 fm_files.data(state=state[0])
2083 if state[0] in (
2083 if state[0] in (
2084 mergestatemod.MERGE_RECORD_UNRESOLVED,
2084 mergestatemod.MERGE_RECORD_UNRESOLVED,
2085 mergestatemod.MERGE_RECORD_RESOLVED,
2085 mergestatemod.MERGE_RECORD_RESOLVED,
2086 ):
2086 ):
2087 fm_files.data(local_key=state[1])
2087 fm_files.data(local_key=state[1])
2088 fm_files.data(local_path=state[2])
2088 fm_files.data(local_path=state[2])
2089 fm_files.data(ancestor_path=state[3])
2089 fm_files.data(ancestor_path=state[3])
2090 fm_files.data(ancestor_node=state[4])
2090 fm_files.data(ancestor_node=state[4])
2091 fm_files.data(other_path=state[5])
2091 fm_files.data(other_path=state[5])
2092 fm_files.data(other_node=state[6])
2092 fm_files.data(other_node=state[6])
2093 fm_files.data(local_flags=state[7])
2093 fm_files.data(local_flags=state[7])
2094 elif state[0] in (
2094 elif state[0] in (
2095 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2095 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2096 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2096 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2097 ):
2097 ):
2098 fm_files.data(renamed_path=state[1])
2098 fm_files.data(renamed_path=state[1])
2099 fm_files.data(rename_side=state[2])
2099 fm_files.data(rename_side=state[2])
2100 fm_extras = fm_files.nested(b'extras')
2100 fm_extras = fm_files.nested(b'extras')
2101 for k, v in sorted(ms.extras(f).items()):
2101 for k, v in sorted(ms.extras(f).items()):
2102 fm_extras.startitem()
2102 fm_extras.startitem()
2103 fm_extras.data(key=k)
2103 fm_extras.data(key=k)
2104 fm_extras.data(value=v)
2104 fm_extras.data(value=v)
2105 fm_extras.end()
2105 fm_extras.end()
2106
2106
2107 fm_files.end()
2107 fm_files.end()
2108
2108
2109 fm_extras = fm.nested(b'extras')
2109 fm_extras = fm.nested(b'extras')
2110 for f, d in sorted(pycompat.iteritems(ms._stateextras)):
2110 for f, d in sorted(pycompat.iteritems(ms._stateextras)):
2111 if f in ms:
2111 if f in ms:
2112 # If file is in mergestate, we have already processed it's extras
2112 # If file is in mergestate, we have already processed it's extras
2113 continue
2113 continue
2114 for k, v in pycompat.iteritems(d):
2114 for k, v in pycompat.iteritems(d):
2115 fm_extras.startitem()
2115 fm_extras.startitem()
2116 fm_extras.data(file=f)
2116 fm_extras.data(file=f)
2117 fm_extras.data(key=k)
2117 fm_extras.data(key=k)
2118 fm_extras.data(value=v)
2118 fm_extras.data(value=v)
2119 fm_extras.end()
2119 fm_extras.end()
2120
2120
2121 fm.end()
2121 fm.end()
2122
2122
2123
2123
2124 @command(b'debugnamecomplete', [], _(b'NAME...'))
2124 @command(b'debugnamecomplete', [], _(b'NAME...'))
2125 def debugnamecomplete(ui, repo, *args):
2125 def debugnamecomplete(ui, repo, *args):
2126 '''complete "names" - tags, open branch names, bookmark names'''
2126 '''complete "names" - tags, open branch names, bookmark names'''
2127
2127
2128 names = set()
2128 names = set()
2129 # since we previously only listed open branches, we will handle that
2129 # since we previously only listed open branches, we will handle that
2130 # specially (after this for loop)
2130 # specially (after this for loop)
2131 for name, ns in pycompat.iteritems(repo.names):
2131 for name, ns in pycompat.iteritems(repo.names):
2132 if name != b'branches':
2132 if name != b'branches':
2133 names.update(ns.listnames(repo))
2133 names.update(ns.listnames(repo))
2134 names.update(
2134 names.update(
2135 tag
2135 tag
2136 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2136 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2137 if not closed
2137 if not closed
2138 )
2138 )
2139 completions = set()
2139 completions = set()
2140 if not args:
2140 if not args:
2141 args = [b'']
2141 args = [b'']
2142 for a in args:
2142 for a in args:
2143 completions.update(n for n in names if n.startswith(a))
2143 completions.update(n for n in names if n.startswith(a))
2144 ui.write(b'\n'.join(sorted(completions)))
2144 ui.write(b'\n'.join(sorted(completions)))
2145 ui.write(b'\n')
2145 ui.write(b'\n')
2146
2146
2147
2147
2148 @command(
2148 @command(
2149 b'debugnodemap',
2149 b'debugnodemap',
2150 [
2150 [
2151 (
2151 (
2152 b'',
2152 b'',
2153 b'dump-new',
2153 b'dump-new',
2154 False,
2154 False,
2155 _(b'write a (new) persistent binary nodemap on stdin'),
2155 _(b'write a (new) persistent binary nodemap on stdin'),
2156 ),
2156 ),
2157 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2157 (b'', b'dump-disk', False, _(b'dump on-disk data on stdin')),
2158 (
2158 (
2159 b'',
2159 b'',
2160 b'check',
2160 b'check',
2161 False,
2161 False,
2162 _(b'check that the data on disk data are correct.'),
2162 _(b'check that the data on disk data are correct.'),
2163 ),
2163 ),
2164 (
2164 (
2165 b'',
2165 b'',
2166 b'metadata',
2166 b'metadata',
2167 False,
2167 False,
2168 _(b'display the on disk meta data for the nodemap'),
2168 _(b'display the on disk meta data for the nodemap'),
2169 ),
2169 ),
2170 ],
2170 ],
2171 )
2171 )
2172 def debugnodemap(ui, repo, **opts):
2172 def debugnodemap(ui, repo, **opts):
2173 """write and inspect on disk nodemap
2173 """write and inspect on disk nodemap
2174 """
2174 """
2175 if opts['dump_new']:
2175 if opts['dump_new']:
2176 unfi = repo.unfiltered()
2176 unfi = repo.unfiltered()
2177 cl = unfi.changelog
2177 cl = unfi.changelog
2178 if util.safehasattr(cl.index, "nodemap_data_all"):
2178 if util.safehasattr(cl.index, "nodemap_data_all"):
2179 data = cl.index.nodemap_data_all()
2179 data = cl.index.nodemap_data_all()
2180 else:
2180 else:
2181 data = nodemap.persistent_data(cl.index)
2181 data = nodemap.persistent_data(cl.index)
2182 ui.write(data)
2182 ui.write(data)
2183 elif opts['dump_disk']:
2183 elif opts['dump_disk']:
2184 unfi = repo.unfiltered()
2184 unfi = repo.unfiltered()
2185 cl = unfi.changelog
2185 cl = unfi.changelog
2186 nm_data = nodemap.persisted_data(cl)
2186 nm_data = nodemap.persisted_data(cl)
2187 if nm_data is not None:
2187 if nm_data is not None:
2188 docket, data = nm_data
2188 docket, data = nm_data
2189 ui.write(data[:])
2189 ui.write(data[:])
2190 elif opts['check']:
2190 elif opts['check']:
2191 unfi = repo.unfiltered()
2191 unfi = repo.unfiltered()
2192 cl = unfi.changelog
2192 cl = unfi.changelog
2193 nm_data = nodemap.persisted_data(cl)
2193 nm_data = nodemap.persisted_data(cl)
2194 if nm_data is not None:
2194 if nm_data is not None:
2195 docket, data = nm_data
2195 docket, data = nm_data
2196 return nodemap.check_data(ui, cl.index, data)
2196 return nodemap.check_data(ui, cl.index, data)
2197 elif opts['metadata']:
2197 elif opts['metadata']:
2198 unfi = repo.unfiltered()
2198 unfi = repo.unfiltered()
2199 cl = unfi.changelog
2199 cl = unfi.changelog
2200 nm_data = nodemap.persisted_data(cl)
2200 nm_data = nodemap.persisted_data(cl)
2201 if nm_data is not None:
2201 if nm_data is not None:
2202 docket, data = nm_data
2202 docket, data = nm_data
2203 ui.write((b"uid: %s\n") % docket.uid)
2203 ui.write((b"uid: %s\n") % docket.uid)
2204 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2204 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2205 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2205 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2206 ui.write((b"data-length: %d\n") % docket.data_length)
2206 ui.write((b"data-length: %d\n") % docket.data_length)
2207 ui.write((b"data-unused: %d\n") % docket.data_unused)
2207 ui.write((b"data-unused: %d\n") % docket.data_unused)
2208 unused_perc = docket.data_unused * 100.0 / docket.data_length
2208 unused_perc = docket.data_unused * 100.0 / docket.data_length
2209 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2209 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2210
2210
2211
2211
2212 @command(
2212 @command(
2213 b'debugobsolete',
2213 b'debugobsolete',
2214 [
2214 [
2215 (b'', b'flags', 0, _(b'markers flag')),
2215 (b'', b'flags', 0, _(b'markers flag')),
2216 (
2216 (
2217 b'',
2217 b'',
2218 b'record-parents',
2218 b'record-parents',
2219 False,
2219 False,
2220 _(b'record parent information for the precursor'),
2220 _(b'record parent information for the precursor'),
2221 ),
2221 ),
2222 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2222 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2223 (
2223 (
2224 b'',
2224 b'',
2225 b'exclusive',
2225 b'exclusive',
2226 False,
2226 False,
2227 _(b'restrict display to markers only relevant to REV'),
2227 _(b'restrict display to markers only relevant to REV'),
2228 ),
2228 ),
2229 (b'', b'index', False, _(b'display index of the marker')),
2229 (b'', b'index', False, _(b'display index of the marker')),
2230 (b'', b'delete', [], _(b'delete markers specified by indices')),
2230 (b'', b'delete', [], _(b'delete markers specified by indices')),
2231 ]
2231 ]
2232 + cmdutil.commitopts2
2232 + cmdutil.commitopts2
2233 + cmdutil.formatteropts,
2233 + cmdutil.formatteropts,
2234 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2234 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2235 )
2235 )
2236 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2236 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2237 """create arbitrary obsolete marker
2237 """create arbitrary obsolete marker
2238
2238
2239 With no arguments, displays the list of obsolescence markers."""
2239 With no arguments, displays the list of obsolescence markers."""
2240
2240
2241 opts = pycompat.byteskwargs(opts)
2241 opts = pycompat.byteskwargs(opts)
2242
2242
2243 def parsenodeid(s):
2243 def parsenodeid(s):
2244 try:
2244 try:
2245 # We do not use revsingle/revrange functions here to accept
2245 # We do not use revsingle/revrange functions here to accept
2246 # arbitrary node identifiers, possibly not present in the
2246 # arbitrary node identifiers, possibly not present in the
2247 # local repository.
2247 # local repository.
2248 n = bin(s)
2248 n = bin(s)
2249 if len(n) != len(nullid):
2249 if len(n) != len(nullid):
2250 raise TypeError()
2250 raise TypeError()
2251 return n
2251 return n
2252 except TypeError:
2252 except TypeError:
2253 raise error.Abort(
2253 raise error.Abort(
2254 b'changeset references must be full hexadecimal '
2254 b'changeset references must be full hexadecimal '
2255 b'node identifiers'
2255 b'node identifiers'
2256 )
2256 )
2257
2257
2258 if opts.get(b'delete'):
2258 if opts.get(b'delete'):
2259 indices = []
2259 indices = []
2260 for v in opts.get(b'delete'):
2260 for v in opts.get(b'delete'):
2261 try:
2261 try:
2262 indices.append(int(v))
2262 indices.append(int(v))
2263 except ValueError:
2263 except ValueError:
2264 raise error.Abort(
2264 raise error.Abort(
2265 _(b'invalid index value: %r') % v,
2265 _(b'invalid index value: %r') % v,
2266 hint=_(b'use integers for indices'),
2266 hint=_(b'use integers for indices'),
2267 )
2267 )
2268
2268
2269 if repo.currenttransaction():
2269 if repo.currenttransaction():
2270 raise error.Abort(
2270 raise error.Abort(
2271 _(b'cannot delete obsmarkers in the middle of transaction.')
2271 _(b'cannot delete obsmarkers in the middle of transaction.')
2272 )
2272 )
2273
2273
2274 with repo.lock():
2274 with repo.lock():
2275 n = repair.deleteobsmarkers(repo.obsstore, indices)
2275 n = repair.deleteobsmarkers(repo.obsstore, indices)
2276 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2276 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2277
2277
2278 return
2278 return
2279
2279
2280 if precursor is not None:
2280 if precursor is not None:
2281 if opts[b'rev']:
2281 if opts[b'rev']:
2282 raise error.Abort(b'cannot select revision when creating marker')
2282 raise error.Abort(b'cannot select revision when creating marker')
2283 metadata = {}
2283 metadata = {}
2284 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2284 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2285 succs = tuple(parsenodeid(succ) for succ in successors)
2285 succs = tuple(parsenodeid(succ) for succ in successors)
2286 l = repo.lock()
2286 l = repo.lock()
2287 try:
2287 try:
2288 tr = repo.transaction(b'debugobsolete')
2288 tr = repo.transaction(b'debugobsolete')
2289 try:
2289 try:
2290 date = opts.get(b'date')
2290 date = opts.get(b'date')
2291 if date:
2291 if date:
2292 date = dateutil.parsedate(date)
2292 date = dateutil.parsedate(date)
2293 else:
2293 else:
2294 date = None
2294 date = None
2295 prec = parsenodeid(precursor)
2295 prec = parsenodeid(precursor)
2296 parents = None
2296 parents = None
2297 if opts[b'record_parents']:
2297 if opts[b'record_parents']:
2298 if prec not in repo.unfiltered():
2298 if prec not in repo.unfiltered():
2299 raise error.Abort(
2299 raise error.Abort(
2300 b'cannot used --record-parents on '
2300 b'cannot used --record-parents on '
2301 b'unknown changesets'
2301 b'unknown changesets'
2302 )
2302 )
2303 parents = repo.unfiltered()[prec].parents()
2303 parents = repo.unfiltered()[prec].parents()
2304 parents = tuple(p.node() for p in parents)
2304 parents = tuple(p.node() for p in parents)
2305 repo.obsstore.create(
2305 repo.obsstore.create(
2306 tr,
2306 tr,
2307 prec,
2307 prec,
2308 succs,
2308 succs,
2309 opts[b'flags'],
2309 opts[b'flags'],
2310 parents=parents,
2310 parents=parents,
2311 date=date,
2311 date=date,
2312 metadata=metadata,
2312 metadata=metadata,
2313 ui=ui,
2313 ui=ui,
2314 )
2314 )
2315 tr.close()
2315 tr.close()
2316 except ValueError as exc:
2316 except ValueError as exc:
2317 raise error.Abort(
2317 raise error.Abort(
2318 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2318 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2319 )
2319 )
2320 finally:
2320 finally:
2321 tr.release()
2321 tr.release()
2322 finally:
2322 finally:
2323 l.release()
2323 l.release()
2324 else:
2324 else:
2325 if opts[b'rev']:
2325 if opts[b'rev']:
2326 revs = scmutil.revrange(repo, opts[b'rev'])
2326 revs = scmutil.revrange(repo, opts[b'rev'])
2327 nodes = [repo[r].node() for r in revs]
2327 nodes = [repo[r].node() for r in revs]
2328 markers = list(
2328 markers = list(
2329 obsutil.getmarkers(
2329 obsutil.getmarkers(
2330 repo, nodes=nodes, exclusive=opts[b'exclusive']
2330 repo, nodes=nodes, exclusive=opts[b'exclusive']
2331 )
2331 )
2332 )
2332 )
2333 markers.sort(key=lambda x: x._data)
2333 markers.sort(key=lambda x: x._data)
2334 else:
2334 else:
2335 markers = obsutil.getmarkers(repo)
2335 markers = obsutil.getmarkers(repo)
2336
2336
2337 markerstoiter = markers
2337 markerstoiter = markers
2338 isrelevant = lambda m: True
2338 isrelevant = lambda m: True
2339 if opts.get(b'rev') and opts.get(b'index'):
2339 if opts.get(b'rev') and opts.get(b'index'):
2340 markerstoiter = obsutil.getmarkers(repo)
2340 markerstoiter = obsutil.getmarkers(repo)
2341 markerset = set(markers)
2341 markerset = set(markers)
2342 isrelevant = lambda m: m in markerset
2342 isrelevant = lambda m: m in markerset
2343
2343
2344 fm = ui.formatter(b'debugobsolete', opts)
2344 fm = ui.formatter(b'debugobsolete', opts)
2345 for i, m in enumerate(markerstoiter):
2345 for i, m in enumerate(markerstoiter):
2346 if not isrelevant(m):
2346 if not isrelevant(m):
2347 # marker can be irrelevant when we're iterating over a set
2347 # marker can be irrelevant when we're iterating over a set
2348 # of markers (markerstoiter) which is bigger than the set
2348 # of markers (markerstoiter) which is bigger than the set
2349 # of markers we want to display (markers)
2349 # of markers we want to display (markers)
2350 # this can happen if both --index and --rev options are
2350 # this can happen if both --index and --rev options are
2351 # provided and thus we need to iterate over all of the markers
2351 # provided and thus we need to iterate over all of the markers
2352 # to get the correct indices, but only display the ones that
2352 # to get the correct indices, but only display the ones that
2353 # are relevant to --rev value
2353 # are relevant to --rev value
2354 continue
2354 continue
2355 fm.startitem()
2355 fm.startitem()
2356 ind = i if opts.get(b'index') else None
2356 ind = i if opts.get(b'index') else None
2357 cmdutil.showmarker(fm, m, index=ind)
2357 cmdutil.showmarker(fm, m, index=ind)
2358 fm.end()
2358 fm.end()
2359
2359
2360
2360
2361 @command(
2361 @command(
2362 b'debugp1copies',
2362 b'debugp1copies',
2363 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2363 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2364 _(b'[-r REV]'),
2364 _(b'[-r REV]'),
2365 )
2365 )
2366 def debugp1copies(ui, repo, **opts):
2366 def debugp1copies(ui, repo, **opts):
2367 """dump copy information compared to p1"""
2367 """dump copy information compared to p1"""
2368
2368
2369 opts = pycompat.byteskwargs(opts)
2369 opts = pycompat.byteskwargs(opts)
2370 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2370 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2371 for dst, src in ctx.p1copies().items():
2371 for dst, src in ctx.p1copies().items():
2372 ui.write(b'%s -> %s\n' % (src, dst))
2372 ui.write(b'%s -> %s\n' % (src, dst))
2373
2373
2374
2374
2375 @command(
2375 @command(
2376 b'debugp2copies',
2376 b'debugp2copies',
2377 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2377 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2378 _(b'[-r REV]'),
2378 _(b'[-r REV]'),
2379 )
2379 )
2380 def debugp1copies(ui, repo, **opts):
2380 def debugp1copies(ui, repo, **opts):
2381 """dump copy information compared to p2"""
2381 """dump copy information compared to p2"""
2382
2382
2383 opts = pycompat.byteskwargs(opts)
2383 opts = pycompat.byteskwargs(opts)
2384 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2384 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2385 for dst, src in ctx.p2copies().items():
2385 for dst, src in ctx.p2copies().items():
2386 ui.write(b'%s -> %s\n' % (src, dst))
2386 ui.write(b'%s -> %s\n' % (src, dst))
2387
2387
2388
2388
2389 @command(
2389 @command(
2390 b'debugpathcomplete',
2390 b'debugpathcomplete',
2391 [
2391 [
2392 (b'f', b'full', None, _(b'complete an entire path')),
2392 (b'f', b'full', None, _(b'complete an entire path')),
2393 (b'n', b'normal', None, _(b'show only normal files')),
2393 (b'n', b'normal', None, _(b'show only normal files')),
2394 (b'a', b'added', None, _(b'show only added files')),
2394 (b'a', b'added', None, _(b'show only added files')),
2395 (b'r', b'removed', None, _(b'show only removed files')),
2395 (b'r', b'removed', None, _(b'show only removed files')),
2396 ],
2396 ],
2397 _(b'FILESPEC...'),
2397 _(b'FILESPEC...'),
2398 )
2398 )
2399 def debugpathcomplete(ui, repo, *specs, **opts):
2399 def debugpathcomplete(ui, repo, *specs, **opts):
2400 '''complete part or all of a tracked path
2400 '''complete part or all of a tracked path
2401
2401
2402 This command supports shells that offer path name completion. It
2402 This command supports shells that offer path name completion. It
2403 currently completes only files already known to the dirstate.
2403 currently completes only files already known to the dirstate.
2404
2404
2405 Completion extends only to the next path segment unless
2405 Completion extends only to the next path segment unless
2406 --full is specified, in which case entire paths are used.'''
2406 --full is specified, in which case entire paths are used.'''
2407
2407
2408 def complete(path, acceptable):
2408 def complete(path, acceptable):
2409 dirstate = repo.dirstate
2409 dirstate = repo.dirstate
2410 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2410 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2411 rootdir = repo.root + pycompat.ossep
2411 rootdir = repo.root + pycompat.ossep
2412 if spec != repo.root and not spec.startswith(rootdir):
2412 if spec != repo.root and not spec.startswith(rootdir):
2413 return [], []
2413 return [], []
2414 if os.path.isdir(spec):
2414 if os.path.isdir(spec):
2415 spec += b'/'
2415 spec += b'/'
2416 spec = spec[len(rootdir) :]
2416 spec = spec[len(rootdir) :]
2417 fixpaths = pycompat.ossep != b'/'
2417 fixpaths = pycompat.ossep != b'/'
2418 if fixpaths:
2418 if fixpaths:
2419 spec = spec.replace(pycompat.ossep, b'/')
2419 spec = spec.replace(pycompat.ossep, b'/')
2420 speclen = len(spec)
2420 speclen = len(spec)
2421 fullpaths = opts['full']
2421 fullpaths = opts['full']
2422 files, dirs = set(), set()
2422 files, dirs = set(), set()
2423 adddir, addfile = dirs.add, files.add
2423 adddir, addfile = dirs.add, files.add
2424 for f, st in pycompat.iteritems(dirstate):
2424 for f, st in pycompat.iteritems(dirstate):
2425 if f.startswith(spec) and st[0] in acceptable:
2425 if f.startswith(spec) and st[0] in acceptable:
2426 if fixpaths:
2426 if fixpaths:
2427 f = f.replace(b'/', pycompat.ossep)
2427 f = f.replace(b'/', pycompat.ossep)
2428 if fullpaths:
2428 if fullpaths:
2429 addfile(f)
2429 addfile(f)
2430 continue
2430 continue
2431 s = f.find(pycompat.ossep, speclen)
2431 s = f.find(pycompat.ossep, speclen)
2432 if s >= 0:
2432 if s >= 0:
2433 adddir(f[:s])
2433 adddir(f[:s])
2434 else:
2434 else:
2435 addfile(f)
2435 addfile(f)
2436 return files, dirs
2436 return files, dirs
2437
2437
2438 acceptable = b''
2438 acceptable = b''
2439 if opts['normal']:
2439 if opts['normal']:
2440 acceptable += b'nm'
2440 acceptable += b'nm'
2441 if opts['added']:
2441 if opts['added']:
2442 acceptable += b'a'
2442 acceptable += b'a'
2443 if opts['removed']:
2443 if opts['removed']:
2444 acceptable += b'r'
2444 acceptable += b'r'
2445 cwd = repo.getcwd()
2445 cwd = repo.getcwd()
2446 if not specs:
2446 if not specs:
2447 specs = [b'.']
2447 specs = [b'.']
2448
2448
2449 files, dirs = set(), set()
2449 files, dirs = set(), set()
2450 for spec in specs:
2450 for spec in specs:
2451 f, d = complete(spec, acceptable or b'nmar')
2451 f, d = complete(spec, acceptable or b'nmar')
2452 files.update(f)
2452 files.update(f)
2453 dirs.update(d)
2453 dirs.update(d)
2454 files.update(dirs)
2454 files.update(dirs)
2455 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2455 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2456 ui.write(b'\n')
2456 ui.write(b'\n')
2457
2457
2458
2458
2459 @command(
2459 @command(
2460 b'debugpathcopies',
2460 b'debugpathcopies',
2461 cmdutil.walkopts,
2461 cmdutil.walkopts,
2462 b'hg debugpathcopies REV1 REV2 [FILE]',
2462 b'hg debugpathcopies REV1 REV2 [FILE]',
2463 inferrepo=True,
2463 inferrepo=True,
2464 )
2464 )
2465 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2465 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2466 """show copies between two revisions"""
2466 """show copies between two revisions"""
2467 ctx1 = scmutil.revsingle(repo, rev1)
2467 ctx1 = scmutil.revsingle(repo, rev1)
2468 ctx2 = scmutil.revsingle(repo, rev2)
2468 ctx2 = scmutil.revsingle(repo, rev2)
2469 m = scmutil.match(ctx1, pats, opts)
2469 m = scmutil.match(ctx1, pats, opts)
2470 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2470 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2471 ui.write(b'%s -> %s\n' % (src, dst))
2471 ui.write(b'%s -> %s\n' % (src, dst))
2472
2472
2473
2473
2474 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2474 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2475 def debugpeer(ui, path):
2475 def debugpeer(ui, path):
2476 """establish a connection to a peer repository"""
2476 """establish a connection to a peer repository"""
2477 # Always enable peer request logging. Requires --debug to display
2477 # Always enable peer request logging. Requires --debug to display
2478 # though.
2478 # though.
2479 overrides = {
2479 overrides = {
2480 (b'devel', b'debug.peer-request'): True,
2480 (b'devel', b'debug.peer-request'): True,
2481 }
2481 }
2482
2482
2483 with ui.configoverride(overrides):
2483 with ui.configoverride(overrides):
2484 peer = hg.peer(ui, {}, path)
2484 peer = hg.peer(ui, {}, path)
2485
2485
2486 local = peer.local() is not None
2486 local = peer.local() is not None
2487 canpush = peer.canpush()
2487 canpush = peer.canpush()
2488
2488
2489 ui.write(_(b'url: %s\n') % peer.url())
2489 ui.write(_(b'url: %s\n') % peer.url())
2490 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2490 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2491 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2491 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2492
2492
2493
2493
2494 @command(
2494 @command(
2495 b'debugpickmergetool',
2495 b'debugpickmergetool',
2496 [
2496 [
2497 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2497 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2498 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2498 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2499 ]
2499 ]
2500 + cmdutil.walkopts
2500 + cmdutil.walkopts
2501 + cmdutil.mergetoolopts,
2501 + cmdutil.mergetoolopts,
2502 _(b'[PATTERN]...'),
2502 _(b'[PATTERN]...'),
2503 inferrepo=True,
2503 inferrepo=True,
2504 )
2504 )
2505 def debugpickmergetool(ui, repo, *pats, **opts):
2505 def debugpickmergetool(ui, repo, *pats, **opts):
2506 """examine which merge tool is chosen for specified file
2506 """examine which merge tool is chosen for specified file
2507
2507
2508 As described in :hg:`help merge-tools`, Mercurial examines
2508 As described in :hg:`help merge-tools`, Mercurial examines
2509 configurations below in this order to decide which merge tool is
2509 configurations below in this order to decide which merge tool is
2510 chosen for specified file.
2510 chosen for specified file.
2511
2511
2512 1. ``--tool`` option
2512 1. ``--tool`` option
2513 2. ``HGMERGE`` environment variable
2513 2. ``HGMERGE`` environment variable
2514 3. configurations in ``merge-patterns`` section
2514 3. configurations in ``merge-patterns`` section
2515 4. configuration of ``ui.merge``
2515 4. configuration of ``ui.merge``
2516 5. configurations in ``merge-tools`` section
2516 5. configurations in ``merge-tools`` section
2517 6. ``hgmerge`` tool (for historical reason only)
2517 6. ``hgmerge`` tool (for historical reason only)
2518 7. default tool for fallback (``:merge`` or ``:prompt``)
2518 7. default tool for fallback (``:merge`` or ``:prompt``)
2519
2519
2520 This command writes out examination result in the style below::
2520 This command writes out examination result in the style below::
2521
2521
2522 FILE = MERGETOOL
2522 FILE = MERGETOOL
2523
2523
2524 By default, all files known in the first parent context of the
2524 By default, all files known in the first parent context of the
2525 working directory are examined. Use file patterns and/or -I/-X
2525 working directory are examined. Use file patterns and/or -I/-X
2526 options to limit target files. -r/--rev is also useful to examine
2526 options to limit target files. -r/--rev is also useful to examine
2527 files in another context without actual updating to it.
2527 files in another context without actual updating to it.
2528
2528
2529 With --debug, this command shows warning messages while matching
2529 With --debug, this command shows warning messages while matching
2530 against ``merge-patterns`` and so on, too. It is recommended to
2530 against ``merge-patterns`` and so on, too. It is recommended to
2531 use this option with explicit file patterns and/or -I/-X options,
2531 use this option with explicit file patterns and/or -I/-X options,
2532 because this option increases amount of output per file according
2532 because this option increases amount of output per file according
2533 to configurations in hgrc.
2533 to configurations in hgrc.
2534
2534
2535 With -v/--verbose, this command shows configurations below at
2535 With -v/--verbose, this command shows configurations below at
2536 first (only if specified).
2536 first (only if specified).
2537
2537
2538 - ``--tool`` option
2538 - ``--tool`` option
2539 - ``HGMERGE`` environment variable
2539 - ``HGMERGE`` environment variable
2540 - configuration of ``ui.merge``
2540 - configuration of ``ui.merge``
2541
2541
2542 If merge tool is chosen before matching against
2542 If merge tool is chosen before matching against
2543 ``merge-patterns``, this command can't show any helpful
2543 ``merge-patterns``, this command can't show any helpful
2544 information, even with --debug. In such case, information above is
2544 information, even with --debug. In such case, information above is
2545 useful to know why a merge tool is chosen.
2545 useful to know why a merge tool is chosen.
2546 """
2546 """
2547 opts = pycompat.byteskwargs(opts)
2547 opts = pycompat.byteskwargs(opts)
2548 overrides = {}
2548 overrides = {}
2549 if opts[b'tool']:
2549 if opts[b'tool']:
2550 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2550 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2551 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2551 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2552
2552
2553 with ui.configoverride(overrides, b'debugmergepatterns'):
2553 with ui.configoverride(overrides, b'debugmergepatterns'):
2554 hgmerge = encoding.environ.get(b"HGMERGE")
2554 hgmerge = encoding.environ.get(b"HGMERGE")
2555 if hgmerge is not None:
2555 if hgmerge is not None:
2556 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2556 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2557 uimerge = ui.config(b"ui", b"merge")
2557 uimerge = ui.config(b"ui", b"merge")
2558 if uimerge:
2558 if uimerge:
2559 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2559 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2560
2560
2561 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2561 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2562 m = scmutil.match(ctx, pats, opts)
2562 m = scmutil.match(ctx, pats, opts)
2563 changedelete = opts[b'changedelete']
2563 changedelete = opts[b'changedelete']
2564 for path in ctx.walk(m):
2564 for path in ctx.walk(m):
2565 fctx = ctx[path]
2565 fctx = ctx[path]
2566 try:
2566 try:
2567 if not ui.debugflag:
2567 if not ui.debugflag:
2568 ui.pushbuffer(error=True)
2568 ui.pushbuffer(error=True)
2569 tool, toolpath = filemerge._picktool(
2569 tool, toolpath = filemerge._picktool(
2570 repo,
2570 repo,
2571 ui,
2571 ui,
2572 path,
2572 path,
2573 fctx.isbinary(),
2573 fctx.isbinary(),
2574 b'l' in fctx.flags(),
2574 b'l' in fctx.flags(),
2575 changedelete,
2575 changedelete,
2576 )
2576 )
2577 finally:
2577 finally:
2578 if not ui.debugflag:
2578 if not ui.debugflag:
2579 ui.popbuffer()
2579 ui.popbuffer()
2580 ui.write(b'%s = %s\n' % (path, tool))
2580 ui.write(b'%s = %s\n' % (path, tool))
2581
2581
2582
2582
2583 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2583 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2584 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2584 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2585 '''access the pushkey key/value protocol
2585 '''access the pushkey key/value protocol
2586
2586
2587 With two args, list the keys in the given namespace.
2587 With two args, list the keys in the given namespace.
2588
2588
2589 With five args, set a key to new if it currently is set to old.
2589 With five args, set a key to new if it currently is set to old.
2590 Reports success or failure.
2590 Reports success or failure.
2591 '''
2591 '''
2592
2592
2593 target = hg.peer(ui, {}, repopath)
2593 target = hg.peer(ui, {}, repopath)
2594 if keyinfo:
2594 if keyinfo:
2595 key, old, new = keyinfo
2595 key, old, new = keyinfo
2596 with target.commandexecutor() as e:
2596 with target.commandexecutor() as e:
2597 r = e.callcommand(
2597 r = e.callcommand(
2598 b'pushkey',
2598 b'pushkey',
2599 {
2599 {
2600 b'namespace': namespace,
2600 b'namespace': namespace,
2601 b'key': key,
2601 b'key': key,
2602 b'old': old,
2602 b'old': old,
2603 b'new': new,
2603 b'new': new,
2604 },
2604 },
2605 ).result()
2605 ).result()
2606
2606
2607 ui.status(pycompat.bytestr(r) + b'\n')
2607 ui.status(pycompat.bytestr(r) + b'\n')
2608 return not r
2608 return not r
2609 else:
2609 else:
2610 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2610 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2611 ui.write(
2611 ui.write(
2612 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2612 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2613 )
2613 )
2614
2614
2615
2615
2616 @command(b'debugpvec', [], _(b'A B'))
2616 @command(b'debugpvec', [], _(b'A B'))
2617 def debugpvec(ui, repo, a, b=None):
2617 def debugpvec(ui, repo, a, b=None):
2618 ca = scmutil.revsingle(repo, a)
2618 ca = scmutil.revsingle(repo, a)
2619 cb = scmutil.revsingle(repo, b)
2619 cb = scmutil.revsingle(repo, b)
2620 pa = pvec.ctxpvec(ca)
2620 pa = pvec.ctxpvec(ca)
2621 pb = pvec.ctxpvec(cb)
2621 pb = pvec.ctxpvec(cb)
2622 if pa == pb:
2622 if pa == pb:
2623 rel = b"="
2623 rel = b"="
2624 elif pa > pb:
2624 elif pa > pb:
2625 rel = b">"
2625 rel = b">"
2626 elif pa < pb:
2626 elif pa < pb:
2627 rel = b"<"
2627 rel = b"<"
2628 elif pa | pb:
2628 elif pa | pb:
2629 rel = b"|"
2629 rel = b"|"
2630 ui.write(_(b"a: %s\n") % pa)
2630 ui.write(_(b"a: %s\n") % pa)
2631 ui.write(_(b"b: %s\n") % pb)
2631 ui.write(_(b"b: %s\n") % pb)
2632 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2632 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2633 ui.write(
2633 ui.write(
2634 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2634 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2635 % (
2635 % (
2636 abs(pa._depth - pb._depth),
2636 abs(pa._depth - pb._depth),
2637 pvec._hamming(pa._vec, pb._vec),
2637 pvec._hamming(pa._vec, pb._vec),
2638 pa.distance(pb),
2638 pa.distance(pb),
2639 rel,
2639 rel,
2640 )
2640 )
2641 )
2641 )
2642
2642
2643
2643
2644 @command(
2644 @command(
2645 b'debugrebuilddirstate|debugrebuildstate',
2645 b'debugrebuilddirstate|debugrebuildstate',
2646 [
2646 [
2647 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2647 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2648 (
2648 (
2649 b'',
2649 b'',
2650 b'minimal',
2650 b'minimal',
2651 None,
2651 None,
2652 _(
2652 _(
2653 b'only rebuild files that are inconsistent with '
2653 b'only rebuild files that are inconsistent with '
2654 b'the working copy parent'
2654 b'the working copy parent'
2655 ),
2655 ),
2656 ),
2656 ),
2657 ],
2657 ],
2658 _(b'[-r REV]'),
2658 _(b'[-r REV]'),
2659 )
2659 )
2660 def debugrebuilddirstate(ui, repo, rev, **opts):
2660 def debugrebuilddirstate(ui, repo, rev, **opts):
2661 """rebuild the dirstate as it would look like for the given revision
2661 """rebuild the dirstate as it would look like for the given revision
2662
2662
2663 If no revision is specified the first current parent will be used.
2663 If no revision is specified the first current parent will be used.
2664
2664
2665 The dirstate will be set to the files of the given revision.
2665 The dirstate will be set to the files of the given revision.
2666 The actual working directory content or existing dirstate
2666 The actual working directory content or existing dirstate
2667 information such as adds or removes is not considered.
2667 information such as adds or removes is not considered.
2668
2668
2669 ``minimal`` will only rebuild the dirstate status for files that claim to be
2669 ``minimal`` will only rebuild the dirstate status for files that claim to be
2670 tracked but are not in the parent manifest, or that exist in the parent
2670 tracked but are not in the parent manifest, or that exist in the parent
2671 manifest but are not in the dirstate. It will not change adds, removes, or
2671 manifest but are not in the dirstate. It will not change adds, removes, or
2672 modified files that are in the working copy parent.
2672 modified files that are in the working copy parent.
2673
2673
2674 One use of this command is to make the next :hg:`status` invocation
2674 One use of this command is to make the next :hg:`status` invocation
2675 check the actual file content.
2675 check the actual file content.
2676 """
2676 """
2677 ctx = scmutil.revsingle(repo, rev)
2677 ctx = scmutil.revsingle(repo, rev)
2678 with repo.wlock():
2678 with repo.wlock():
2679 dirstate = repo.dirstate
2679 dirstate = repo.dirstate
2680 changedfiles = None
2680 changedfiles = None
2681 # See command doc for what minimal does.
2681 # See command doc for what minimal does.
2682 if opts.get('minimal'):
2682 if opts.get('minimal'):
2683 manifestfiles = set(ctx.manifest().keys())
2683 manifestfiles = set(ctx.manifest().keys())
2684 dirstatefiles = set(dirstate)
2684 dirstatefiles = set(dirstate)
2685 manifestonly = manifestfiles - dirstatefiles
2685 manifestonly = manifestfiles - dirstatefiles
2686 dsonly = dirstatefiles - manifestfiles
2686 dsonly = dirstatefiles - manifestfiles
2687 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2687 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2688 changedfiles = manifestonly | dsnotadded
2688 changedfiles = manifestonly | dsnotadded
2689
2689
2690 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2690 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2691
2691
2692
2692
2693 @command(b'debugrebuildfncache', [], b'')
2693 @command(b'debugrebuildfncache', [], b'')
2694 def debugrebuildfncache(ui, repo):
2694 def debugrebuildfncache(ui, repo):
2695 """rebuild the fncache file"""
2695 """rebuild the fncache file"""
2696 repair.rebuildfncache(ui, repo)
2696 repair.rebuildfncache(ui, repo)
2697
2697
2698
2698
2699 @command(
2699 @command(
2700 b'debugrename',
2700 b'debugrename',
2701 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2701 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2702 _(b'[-r REV] [FILE]...'),
2702 _(b'[-r REV] [FILE]...'),
2703 )
2703 )
2704 def debugrename(ui, repo, *pats, **opts):
2704 def debugrename(ui, repo, *pats, **opts):
2705 """dump rename information"""
2705 """dump rename information"""
2706
2706
2707 opts = pycompat.byteskwargs(opts)
2707 opts = pycompat.byteskwargs(opts)
2708 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2708 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2709 m = scmutil.match(ctx, pats, opts)
2709 m = scmutil.match(ctx, pats, opts)
2710 for abs in ctx.walk(m):
2710 for abs in ctx.walk(m):
2711 fctx = ctx[abs]
2711 fctx = ctx[abs]
2712 o = fctx.filelog().renamed(fctx.filenode())
2712 o = fctx.filelog().renamed(fctx.filenode())
2713 rel = repo.pathto(abs)
2713 rel = repo.pathto(abs)
2714 if o:
2714 if o:
2715 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2715 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2716 else:
2716 else:
2717 ui.write(_(b"%s not renamed\n") % rel)
2717 ui.write(_(b"%s not renamed\n") % rel)
2718
2718
2719
2719
2720 @command(b'debugrequires|debugrequirements', [], b'')
2720 @command(b'debugrequires|debugrequirements', [], b'')
2721 def debugrequirements(ui, repo):
2721 def debugrequirements(ui, repo):
2722 """ print the current repo requirements """
2722 """ print the current repo requirements """
2723 for r in sorted(repo.requirements):
2723 for r in sorted(repo.requirements):
2724 ui.write(b"%s\n" % r)
2724 ui.write(b"%s\n" % r)
2725
2725
2726
2726
2727 @command(
2727 @command(
2728 b'debugrevlog',
2728 b'debugrevlog',
2729 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2729 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2730 _(b'-c|-m|FILE'),
2730 _(b'-c|-m|FILE'),
2731 optionalrepo=True,
2731 optionalrepo=True,
2732 )
2732 )
2733 def debugrevlog(ui, repo, file_=None, **opts):
2733 def debugrevlog(ui, repo, file_=None, **opts):
2734 """show data and statistics about a revlog"""
2734 """show data and statistics about a revlog"""
2735 opts = pycompat.byteskwargs(opts)
2735 opts = pycompat.byteskwargs(opts)
2736 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2736 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2737
2737
2738 if opts.get(b"dump"):
2738 if opts.get(b"dump"):
2739 numrevs = len(r)
2739 numrevs = len(r)
2740 ui.write(
2740 ui.write(
2741 (
2741 (
2742 b"# rev p1rev p2rev start end deltastart base p1 p2"
2742 b"# rev p1rev p2rev start end deltastart base p1 p2"
2743 b" rawsize totalsize compression heads chainlen\n"
2743 b" rawsize totalsize compression heads chainlen\n"
2744 )
2744 )
2745 )
2745 )
2746 ts = 0
2746 ts = 0
2747 heads = set()
2747 heads = set()
2748
2748
2749 for rev in pycompat.xrange(numrevs):
2749 for rev in pycompat.xrange(numrevs):
2750 dbase = r.deltaparent(rev)
2750 dbase = r.deltaparent(rev)
2751 if dbase == -1:
2751 if dbase == -1:
2752 dbase = rev
2752 dbase = rev
2753 cbase = r.chainbase(rev)
2753 cbase = r.chainbase(rev)
2754 clen = r.chainlen(rev)
2754 clen = r.chainlen(rev)
2755 p1, p2 = r.parentrevs(rev)
2755 p1, p2 = r.parentrevs(rev)
2756 rs = r.rawsize(rev)
2756 rs = r.rawsize(rev)
2757 ts = ts + rs
2757 ts = ts + rs
2758 heads -= set(r.parentrevs(rev))
2758 heads -= set(r.parentrevs(rev))
2759 heads.add(rev)
2759 heads.add(rev)
2760 try:
2760 try:
2761 compression = ts / r.end(rev)
2761 compression = ts / r.end(rev)
2762 except ZeroDivisionError:
2762 except ZeroDivisionError:
2763 compression = 0
2763 compression = 0
2764 ui.write(
2764 ui.write(
2765 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2765 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2766 b"%11d %5d %8d\n"
2766 b"%11d %5d %8d\n"
2767 % (
2767 % (
2768 rev,
2768 rev,
2769 p1,
2769 p1,
2770 p2,
2770 p2,
2771 r.start(rev),
2771 r.start(rev),
2772 r.end(rev),
2772 r.end(rev),
2773 r.start(dbase),
2773 r.start(dbase),
2774 r.start(cbase),
2774 r.start(cbase),
2775 r.start(p1),
2775 r.start(p1),
2776 r.start(p2),
2776 r.start(p2),
2777 rs,
2777 rs,
2778 ts,
2778 ts,
2779 compression,
2779 compression,
2780 len(heads),
2780 len(heads),
2781 clen,
2781 clen,
2782 )
2782 )
2783 )
2783 )
2784 return 0
2784 return 0
2785
2785
2786 v = r.version
2786 v = r.version
2787 format = v & 0xFFFF
2787 format = v & 0xFFFF
2788 flags = []
2788 flags = []
2789 gdelta = False
2789 gdelta = False
2790 if v & revlog.FLAG_INLINE_DATA:
2790 if v & revlog.FLAG_INLINE_DATA:
2791 flags.append(b'inline')
2791 flags.append(b'inline')
2792 if v & revlog.FLAG_GENERALDELTA:
2792 if v & revlog.FLAG_GENERALDELTA:
2793 gdelta = True
2793 gdelta = True
2794 flags.append(b'generaldelta')
2794 flags.append(b'generaldelta')
2795 if not flags:
2795 if not flags:
2796 flags = [b'(none)']
2796 flags = [b'(none)']
2797
2797
2798 ### tracks merge vs single parent
2798 ### tracks merge vs single parent
2799 nummerges = 0
2799 nummerges = 0
2800
2800
2801 ### tracks ways the "delta" are build
2801 ### tracks ways the "delta" are build
2802 # nodelta
2802 # nodelta
2803 numempty = 0
2803 numempty = 0
2804 numemptytext = 0
2804 numemptytext = 0
2805 numemptydelta = 0
2805 numemptydelta = 0
2806 # full file content
2806 # full file content
2807 numfull = 0
2807 numfull = 0
2808 # intermediate snapshot against a prior snapshot
2808 # intermediate snapshot against a prior snapshot
2809 numsemi = 0
2809 numsemi = 0
2810 # snapshot count per depth
2810 # snapshot count per depth
2811 numsnapdepth = collections.defaultdict(lambda: 0)
2811 numsnapdepth = collections.defaultdict(lambda: 0)
2812 # delta against previous revision
2812 # delta against previous revision
2813 numprev = 0
2813 numprev = 0
2814 # delta against first or second parent (not prev)
2814 # delta against first or second parent (not prev)
2815 nump1 = 0
2815 nump1 = 0
2816 nump2 = 0
2816 nump2 = 0
2817 # delta against neither prev nor parents
2817 # delta against neither prev nor parents
2818 numother = 0
2818 numother = 0
2819 # delta against prev that are also first or second parent
2819 # delta against prev that are also first or second parent
2820 # (details of `numprev`)
2820 # (details of `numprev`)
2821 nump1prev = 0
2821 nump1prev = 0
2822 nump2prev = 0
2822 nump2prev = 0
2823
2823
2824 # data about delta chain of each revs
2824 # data about delta chain of each revs
2825 chainlengths = []
2825 chainlengths = []
2826 chainbases = []
2826 chainbases = []
2827 chainspans = []
2827 chainspans = []
2828
2828
2829 # data about each revision
2829 # data about each revision
2830 datasize = [None, 0, 0]
2830 datasize = [None, 0, 0]
2831 fullsize = [None, 0, 0]
2831 fullsize = [None, 0, 0]
2832 semisize = [None, 0, 0]
2832 semisize = [None, 0, 0]
2833 # snapshot count per depth
2833 # snapshot count per depth
2834 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2834 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2835 deltasize = [None, 0, 0]
2835 deltasize = [None, 0, 0]
2836 chunktypecounts = {}
2836 chunktypecounts = {}
2837 chunktypesizes = {}
2837 chunktypesizes = {}
2838
2838
2839 def addsize(size, l):
2839 def addsize(size, l):
2840 if l[0] is None or size < l[0]:
2840 if l[0] is None or size < l[0]:
2841 l[0] = size
2841 l[0] = size
2842 if size > l[1]:
2842 if size > l[1]:
2843 l[1] = size
2843 l[1] = size
2844 l[2] += size
2844 l[2] += size
2845
2845
2846 numrevs = len(r)
2846 numrevs = len(r)
2847 for rev in pycompat.xrange(numrevs):
2847 for rev in pycompat.xrange(numrevs):
2848 p1, p2 = r.parentrevs(rev)
2848 p1, p2 = r.parentrevs(rev)
2849 delta = r.deltaparent(rev)
2849 delta = r.deltaparent(rev)
2850 if format > 0:
2850 if format > 0:
2851 addsize(r.rawsize(rev), datasize)
2851 addsize(r.rawsize(rev), datasize)
2852 if p2 != nullrev:
2852 if p2 != nullrev:
2853 nummerges += 1
2853 nummerges += 1
2854 size = r.length(rev)
2854 size = r.length(rev)
2855 if delta == nullrev:
2855 if delta == nullrev:
2856 chainlengths.append(0)
2856 chainlengths.append(0)
2857 chainbases.append(r.start(rev))
2857 chainbases.append(r.start(rev))
2858 chainspans.append(size)
2858 chainspans.append(size)
2859 if size == 0:
2859 if size == 0:
2860 numempty += 1
2860 numempty += 1
2861 numemptytext += 1
2861 numemptytext += 1
2862 else:
2862 else:
2863 numfull += 1
2863 numfull += 1
2864 numsnapdepth[0] += 1
2864 numsnapdepth[0] += 1
2865 addsize(size, fullsize)
2865 addsize(size, fullsize)
2866 addsize(size, snapsizedepth[0])
2866 addsize(size, snapsizedepth[0])
2867 else:
2867 else:
2868 chainlengths.append(chainlengths[delta] + 1)
2868 chainlengths.append(chainlengths[delta] + 1)
2869 baseaddr = chainbases[delta]
2869 baseaddr = chainbases[delta]
2870 revaddr = r.start(rev)
2870 revaddr = r.start(rev)
2871 chainbases.append(baseaddr)
2871 chainbases.append(baseaddr)
2872 chainspans.append((revaddr - baseaddr) + size)
2872 chainspans.append((revaddr - baseaddr) + size)
2873 if size == 0:
2873 if size == 0:
2874 numempty += 1
2874 numempty += 1
2875 numemptydelta += 1
2875 numemptydelta += 1
2876 elif r.issnapshot(rev):
2876 elif r.issnapshot(rev):
2877 addsize(size, semisize)
2877 addsize(size, semisize)
2878 numsemi += 1
2878 numsemi += 1
2879 depth = r.snapshotdepth(rev)
2879 depth = r.snapshotdepth(rev)
2880 numsnapdepth[depth] += 1
2880 numsnapdepth[depth] += 1
2881 addsize(size, snapsizedepth[depth])
2881 addsize(size, snapsizedepth[depth])
2882 else:
2882 else:
2883 addsize(size, deltasize)
2883 addsize(size, deltasize)
2884 if delta == rev - 1:
2884 if delta == rev - 1:
2885 numprev += 1
2885 numprev += 1
2886 if delta == p1:
2886 if delta == p1:
2887 nump1prev += 1
2887 nump1prev += 1
2888 elif delta == p2:
2888 elif delta == p2:
2889 nump2prev += 1
2889 nump2prev += 1
2890 elif delta == p1:
2890 elif delta == p1:
2891 nump1 += 1
2891 nump1 += 1
2892 elif delta == p2:
2892 elif delta == p2:
2893 nump2 += 1
2893 nump2 += 1
2894 elif delta != nullrev:
2894 elif delta != nullrev:
2895 numother += 1
2895 numother += 1
2896
2896
2897 # Obtain data on the raw chunks in the revlog.
2897 # Obtain data on the raw chunks in the revlog.
2898 if util.safehasattr(r, b'_getsegmentforrevs'):
2898 if util.safehasattr(r, b'_getsegmentforrevs'):
2899 segment = r._getsegmentforrevs(rev, rev)[1]
2899 segment = r._getsegmentforrevs(rev, rev)[1]
2900 else:
2900 else:
2901 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2901 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2902 if segment:
2902 if segment:
2903 chunktype = bytes(segment[0:1])
2903 chunktype = bytes(segment[0:1])
2904 else:
2904 else:
2905 chunktype = b'empty'
2905 chunktype = b'empty'
2906
2906
2907 if chunktype not in chunktypecounts:
2907 if chunktype not in chunktypecounts:
2908 chunktypecounts[chunktype] = 0
2908 chunktypecounts[chunktype] = 0
2909 chunktypesizes[chunktype] = 0
2909 chunktypesizes[chunktype] = 0
2910
2910
2911 chunktypecounts[chunktype] += 1
2911 chunktypecounts[chunktype] += 1
2912 chunktypesizes[chunktype] += size
2912 chunktypesizes[chunktype] += size
2913
2913
2914 # Adjust size min value for empty cases
2914 # Adjust size min value for empty cases
2915 for size in (datasize, fullsize, semisize, deltasize):
2915 for size in (datasize, fullsize, semisize, deltasize):
2916 if size[0] is None:
2916 if size[0] is None:
2917 size[0] = 0
2917 size[0] = 0
2918
2918
2919 numdeltas = numrevs - numfull - numempty - numsemi
2919 numdeltas = numrevs - numfull - numempty - numsemi
2920 numoprev = numprev - nump1prev - nump2prev
2920 numoprev = numprev - nump1prev - nump2prev
2921 totalrawsize = datasize[2]
2921 totalrawsize = datasize[2]
2922 datasize[2] /= numrevs
2922 datasize[2] /= numrevs
2923 fulltotal = fullsize[2]
2923 fulltotal = fullsize[2]
2924 if numfull == 0:
2924 if numfull == 0:
2925 fullsize[2] = 0
2925 fullsize[2] = 0
2926 else:
2926 else:
2927 fullsize[2] /= numfull
2927 fullsize[2] /= numfull
2928 semitotal = semisize[2]
2928 semitotal = semisize[2]
2929 snaptotal = {}
2929 snaptotal = {}
2930 if numsemi > 0:
2930 if numsemi > 0:
2931 semisize[2] /= numsemi
2931 semisize[2] /= numsemi
2932 for depth in snapsizedepth:
2932 for depth in snapsizedepth:
2933 snaptotal[depth] = snapsizedepth[depth][2]
2933 snaptotal[depth] = snapsizedepth[depth][2]
2934 snapsizedepth[depth][2] /= numsnapdepth[depth]
2934 snapsizedepth[depth][2] /= numsnapdepth[depth]
2935
2935
2936 deltatotal = deltasize[2]
2936 deltatotal = deltasize[2]
2937 if numdeltas > 0:
2937 if numdeltas > 0:
2938 deltasize[2] /= numdeltas
2938 deltasize[2] /= numdeltas
2939 totalsize = fulltotal + semitotal + deltatotal
2939 totalsize = fulltotal + semitotal + deltatotal
2940 avgchainlen = sum(chainlengths) / numrevs
2940 avgchainlen = sum(chainlengths) / numrevs
2941 maxchainlen = max(chainlengths)
2941 maxchainlen = max(chainlengths)
2942 maxchainspan = max(chainspans)
2942 maxchainspan = max(chainspans)
2943 compratio = 1
2943 compratio = 1
2944 if totalsize:
2944 if totalsize:
2945 compratio = totalrawsize / totalsize
2945 compratio = totalrawsize / totalsize
2946
2946
2947 basedfmtstr = b'%%%dd\n'
2947 basedfmtstr = b'%%%dd\n'
2948 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2948 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2949
2949
2950 def dfmtstr(max):
2950 def dfmtstr(max):
2951 return basedfmtstr % len(str(max))
2951 return basedfmtstr % len(str(max))
2952
2952
2953 def pcfmtstr(max, padding=0):
2953 def pcfmtstr(max, padding=0):
2954 return basepcfmtstr % (len(str(max)), b' ' * padding)
2954 return basepcfmtstr % (len(str(max)), b' ' * padding)
2955
2955
2956 def pcfmt(value, total):
2956 def pcfmt(value, total):
2957 if total:
2957 if total:
2958 return (value, 100 * float(value) / total)
2958 return (value, 100 * float(value) / total)
2959 else:
2959 else:
2960 return value, 100.0
2960 return value, 100.0
2961
2961
2962 ui.writenoi18n(b'format : %d\n' % format)
2962 ui.writenoi18n(b'format : %d\n' % format)
2963 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2963 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2964
2964
2965 ui.write(b'\n')
2965 ui.write(b'\n')
2966 fmt = pcfmtstr(totalsize)
2966 fmt = pcfmtstr(totalsize)
2967 fmt2 = dfmtstr(totalsize)
2967 fmt2 = dfmtstr(totalsize)
2968 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2968 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2969 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2969 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2970 ui.writenoi18n(
2970 ui.writenoi18n(
2971 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2971 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2972 )
2972 )
2973 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2973 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2974 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2974 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2975 ui.writenoi18n(
2975 ui.writenoi18n(
2976 b' text : '
2976 b' text : '
2977 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2977 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2978 )
2978 )
2979 ui.writenoi18n(
2979 ui.writenoi18n(
2980 b' delta : '
2980 b' delta : '
2981 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2981 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2982 )
2982 )
2983 ui.writenoi18n(
2983 ui.writenoi18n(
2984 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2984 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2985 )
2985 )
2986 for depth in sorted(numsnapdepth):
2986 for depth in sorted(numsnapdepth):
2987 ui.write(
2987 ui.write(
2988 (b' lvl-%-3d : ' % depth)
2988 (b' lvl-%-3d : ' % depth)
2989 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2989 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2990 )
2990 )
2991 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2991 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2992 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2992 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2993 ui.writenoi18n(
2993 ui.writenoi18n(
2994 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2994 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2995 )
2995 )
2996 for depth in sorted(numsnapdepth):
2996 for depth in sorted(numsnapdepth):
2997 ui.write(
2997 ui.write(
2998 (b' lvl-%-3d : ' % depth)
2998 (b' lvl-%-3d : ' % depth)
2999 + fmt % pcfmt(snaptotal[depth], totalsize)
2999 + fmt % pcfmt(snaptotal[depth], totalsize)
3000 )
3000 )
3001 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3001 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3002
3002
3003 def fmtchunktype(chunktype):
3003 def fmtchunktype(chunktype):
3004 if chunktype == b'empty':
3004 if chunktype == b'empty':
3005 return b' %s : ' % chunktype
3005 return b' %s : ' % chunktype
3006 elif chunktype in pycompat.bytestr(string.ascii_letters):
3006 elif chunktype in pycompat.bytestr(string.ascii_letters):
3007 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3007 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3008 else:
3008 else:
3009 return b' 0x%s : ' % hex(chunktype)
3009 return b' 0x%s : ' % hex(chunktype)
3010
3010
3011 ui.write(b'\n')
3011 ui.write(b'\n')
3012 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3012 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3013 for chunktype in sorted(chunktypecounts):
3013 for chunktype in sorted(chunktypecounts):
3014 ui.write(fmtchunktype(chunktype))
3014 ui.write(fmtchunktype(chunktype))
3015 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3015 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3016 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3016 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3017 for chunktype in sorted(chunktypecounts):
3017 for chunktype in sorted(chunktypecounts):
3018 ui.write(fmtchunktype(chunktype))
3018 ui.write(fmtchunktype(chunktype))
3019 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3019 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3020
3020
3021 ui.write(b'\n')
3021 ui.write(b'\n')
3022 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3022 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3023 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3023 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3024 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3024 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3025 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3025 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3026 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3026 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3027
3027
3028 if format > 0:
3028 if format > 0:
3029 ui.write(b'\n')
3029 ui.write(b'\n')
3030 ui.writenoi18n(
3030 ui.writenoi18n(
3031 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3031 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3032 % tuple(datasize)
3032 % tuple(datasize)
3033 )
3033 )
3034 ui.writenoi18n(
3034 ui.writenoi18n(
3035 b'full revision size (min/max/avg) : %d / %d / %d\n'
3035 b'full revision size (min/max/avg) : %d / %d / %d\n'
3036 % tuple(fullsize)
3036 % tuple(fullsize)
3037 )
3037 )
3038 ui.writenoi18n(
3038 ui.writenoi18n(
3039 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3039 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3040 % tuple(semisize)
3040 % tuple(semisize)
3041 )
3041 )
3042 for depth in sorted(snapsizedepth):
3042 for depth in sorted(snapsizedepth):
3043 if depth == 0:
3043 if depth == 0:
3044 continue
3044 continue
3045 ui.writenoi18n(
3045 ui.writenoi18n(
3046 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3046 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3047 % ((depth,) + tuple(snapsizedepth[depth]))
3047 % ((depth,) + tuple(snapsizedepth[depth]))
3048 )
3048 )
3049 ui.writenoi18n(
3049 ui.writenoi18n(
3050 b'delta size (min/max/avg) : %d / %d / %d\n'
3050 b'delta size (min/max/avg) : %d / %d / %d\n'
3051 % tuple(deltasize)
3051 % tuple(deltasize)
3052 )
3052 )
3053
3053
3054 if numdeltas > 0:
3054 if numdeltas > 0:
3055 ui.write(b'\n')
3055 ui.write(b'\n')
3056 fmt = pcfmtstr(numdeltas)
3056 fmt = pcfmtstr(numdeltas)
3057 fmt2 = pcfmtstr(numdeltas, 4)
3057 fmt2 = pcfmtstr(numdeltas, 4)
3058 ui.writenoi18n(
3058 ui.writenoi18n(
3059 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3059 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3060 )
3060 )
3061 if numprev > 0:
3061 if numprev > 0:
3062 ui.writenoi18n(
3062 ui.writenoi18n(
3063 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3063 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3064 )
3064 )
3065 ui.writenoi18n(
3065 ui.writenoi18n(
3066 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3066 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3067 )
3067 )
3068 ui.writenoi18n(
3068 ui.writenoi18n(
3069 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3069 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3070 )
3070 )
3071 if gdelta:
3071 if gdelta:
3072 ui.writenoi18n(
3072 ui.writenoi18n(
3073 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3073 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3074 )
3074 )
3075 ui.writenoi18n(
3075 ui.writenoi18n(
3076 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3076 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3077 )
3077 )
3078 ui.writenoi18n(
3078 ui.writenoi18n(
3079 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3079 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3080 )
3080 )
3081
3081
3082
3082
3083 @command(
3083 @command(
3084 b'debugrevlogindex',
3084 b'debugrevlogindex',
3085 cmdutil.debugrevlogopts
3085 cmdutil.debugrevlogopts
3086 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3086 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3087 _(b'[-f FORMAT] -c|-m|FILE'),
3087 _(b'[-f FORMAT] -c|-m|FILE'),
3088 optionalrepo=True,
3088 optionalrepo=True,
3089 )
3089 )
3090 def debugrevlogindex(ui, repo, file_=None, **opts):
3090 def debugrevlogindex(ui, repo, file_=None, **opts):
3091 """dump the contents of a revlog index"""
3091 """dump the contents of a revlog index"""
3092 opts = pycompat.byteskwargs(opts)
3092 opts = pycompat.byteskwargs(opts)
3093 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3093 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3094 format = opts.get(b'format', 0)
3094 format = opts.get(b'format', 0)
3095 if format not in (0, 1):
3095 if format not in (0, 1):
3096 raise error.Abort(_(b"unknown format %d") % format)
3096 raise error.Abort(_(b"unknown format %d") % format)
3097
3097
3098 if ui.debugflag:
3098 if ui.debugflag:
3099 shortfn = hex
3099 shortfn = hex
3100 else:
3100 else:
3101 shortfn = short
3101 shortfn = short
3102
3102
3103 # There might not be anything in r, so have a sane default
3103 # There might not be anything in r, so have a sane default
3104 idlen = 12
3104 idlen = 12
3105 for i in r:
3105 for i in r:
3106 idlen = len(shortfn(r.node(i)))
3106 idlen = len(shortfn(r.node(i)))
3107 break
3107 break
3108
3108
3109 if format == 0:
3109 if format == 0:
3110 if ui.verbose:
3110 if ui.verbose:
3111 ui.writenoi18n(
3111 ui.writenoi18n(
3112 b" rev offset length linkrev %s %s p2\n"
3112 b" rev offset length linkrev %s %s p2\n"
3113 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3113 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3114 )
3114 )
3115 else:
3115 else:
3116 ui.writenoi18n(
3116 ui.writenoi18n(
3117 b" rev linkrev %s %s p2\n"
3117 b" rev linkrev %s %s p2\n"
3118 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3118 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3119 )
3119 )
3120 elif format == 1:
3120 elif format == 1:
3121 if ui.verbose:
3121 if ui.verbose:
3122 ui.writenoi18n(
3122 ui.writenoi18n(
3123 (
3123 (
3124 b" rev flag offset length size link p1"
3124 b" rev flag offset length size link p1"
3125 b" p2 %s\n"
3125 b" p2 %s\n"
3126 )
3126 )
3127 % b"nodeid".rjust(idlen)
3127 % b"nodeid".rjust(idlen)
3128 )
3128 )
3129 else:
3129 else:
3130 ui.writenoi18n(
3130 ui.writenoi18n(
3131 b" rev flag size link p1 p2 %s\n"
3131 b" rev flag size link p1 p2 %s\n"
3132 % b"nodeid".rjust(idlen)
3132 % b"nodeid".rjust(idlen)
3133 )
3133 )
3134
3134
3135 for i in r:
3135 for i in r:
3136 node = r.node(i)
3136 node = r.node(i)
3137 if format == 0:
3137 if format == 0:
3138 try:
3138 try:
3139 pp = r.parents(node)
3139 pp = r.parents(node)
3140 except Exception:
3140 except Exception:
3141 pp = [nullid, nullid]
3141 pp = [nullid, nullid]
3142 if ui.verbose:
3142 if ui.verbose:
3143 ui.write(
3143 ui.write(
3144 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3144 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3145 % (
3145 % (
3146 i,
3146 i,
3147 r.start(i),
3147 r.start(i),
3148 r.length(i),
3148 r.length(i),
3149 r.linkrev(i),
3149 r.linkrev(i),
3150 shortfn(node),
3150 shortfn(node),
3151 shortfn(pp[0]),
3151 shortfn(pp[0]),
3152 shortfn(pp[1]),
3152 shortfn(pp[1]),
3153 )
3153 )
3154 )
3154 )
3155 else:
3155 else:
3156 ui.write(
3156 ui.write(
3157 b"% 6d % 7d %s %s %s\n"
3157 b"% 6d % 7d %s %s %s\n"
3158 % (
3158 % (
3159 i,
3159 i,
3160 r.linkrev(i),
3160 r.linkrev(i),
3161 shortfn(node),
3161 shortfn(node),
3162 shortfn(pp[0]),
3162 shortfn(pp[0]),
3163 shortfn(pp[1]),
3163 shortfn(pp[1]),
3164 )
3164 )
3165 )
3165 )
3166 elif format == 1:
3166 elif format == 1:
3167 pr = r.parentrevs(i)
3167 pr = r.parentrevs(i)
3168 if ui.verbose:
3168 if ui.verbose:
3169 ui.write(
3169 ui.write(
3170 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3170 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3171 % (
3171 % (
3172 i,
3172 i,
3173 r.flags(i),
3173 r.flags(i),
3174 r.start(i),
3174 r.start(i),
3175 r.length(i),
3175 r.length(i),
3176 r.rawsize(i),
3176 r.rawsize(i),
3177 r.linkrev(i),
3177 r.linkrev(i),
3178 pr[0],
3178 pr[0],
3179 pr[1],
3179 pr[1],
3180 shortfn(node),
3180 shortfn(node),
3181 )
3181 )
3182 )
3182 )
3183 else:
3183 else:
3184 ui.write(
3184 ui.write(
3185 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3185 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3186 % (
3186 % (
3187 i,
3187 i,
3188 r.flags(i),
3188 r.flags(i),
3189 r.rawsize(i),
3189 r.rawsize(i),
3190 r.linkrev(i),
3190 r.linkrev(i),
3191 pr[0],
3191 pr[0],
3192 pr[1],
3192 pr[1],
3193 shortfn(node),
3193 shortfn(node),
3194 )
3194 )
3195 )
3195 )
3196
3196
3197
3197
3198 @command(
3198 @command(
3199 b'debugrevspec',
3199 b'debugrevspec',
3200 [
3200 [
3201 (
3201 (
3202 b'',
3202 b'',
3203 b'optimize',
3203 b'optimize',
3204 None,
3204 None,
3205 _(b'print parsed tree after optimizing (DEPRECATED)'),
3205 _(b'print parsed tree after optimizing (DEPRECATED)'),
3206 ),
3206 ),
3207 (
3207 (
3208 b'',
3208 b'',
3209 b'show-revs',
3209 b'show-revs',
3210 True,
3210 True,
3211 _(b'print list of result revisions (default)'),
3211 _(b'print list of result revisions (default)'),
3212 ),
3212 ),
3213 (
3213 (
3214 b's',
3214 b's',
3215 b'show-set',
3215 b'show-set',
3216 None,
3216 None,
3217 _(b'print internal representation of result set'),
3217 _(b'print internal representation of result set'),
3218 ),
3218 ),
3219 (
3219 (
3220 b'p',
3220 b'p',
3221 b'show-stage',
3221 b'show-stage',
3222 [],
3222 [],
3223 _(b'print parsed tree at the given stage'),
3223 _(b'print parsed tree at the given stage'),
3224 _(b'NAME'),
3224 _(b'NAME'),
3225 ),
3225 ),
3226 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3226 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3227 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3227 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3228 ],
3228 ],
3229 b'REVSPEC',
3229 b'REVSPEC',
3230 )
3230 )
3231 def debugrevspec(ui, repo, expr, **opts):
3231 def debugrevspec(ui, repo, expr, **opts):
3232 """parse and apply a revision specification
3232 """parse and apply a revision specification
3233
3233
3234 Use -p/--show-stage option to print the parsed tree at the given stages.
3234 Use -p/--show-stage option to print the parsed tree at the given stages.
3235 Use -p all to print tree at every stage.
3235 Use -p all to print tree at every stage.
3236
3236
3237 Use --no-show-revs option with -s or -p to print only the set
3237 Use --no-show-revs option with -s or -p to print only the set
3238 representation or the parsed tree respectively.
3238 representation or the parsed tree respectively.
3239
3239
3240 Use --verify-optimized to compare the optimized result with the unoptimized
3240 Use --verify-optimized to compare the optimized result with the unoptimized
3241 one. Returns 1 if the optimized result differs.
3241 one. Returns 1 if the optimized result differs.
3242 """
3242 """
3243 opts = pycompat.byteskwargs(opts)
3243 opts = pycompat.byteskwargs(opts)
3244 aliases = ui.configitems(b'revsetalias')
3244 aliases = ui.configitems(b'revsetalias')
3245 stages = [
3245 stages = [
3246 (b'parsed', lambda tree: tree),
3246 (b'parsed', lambda tree: tree),
3247 (
3247 (
3248 b'expanded',
3248 b'expanded',
3249 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3249 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3250 ),
3250 ),
3251 (b'concatenated', revsetlang.foldconcat),
3251 (b'concatenated', revsetlang.foldconcat),
3252 (b'analyzed', revsetlang.analyze),
3252 (b'analyzed', revsetlang.analyze),
3253 (b'optimized', revsetlang.optimize),
3253 (b'optimized', revsetlang.optimize),
3254 ]
3254 ]
3255 if opts[b'no_optimized']:
3255 if opts[b'no_optimized']:
3256 stages = stages[:-1]
3256 stages = stages[:-1]
3257 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3257 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3258 raise error.Abort(
3258 raise error.Abort(
3259 _(b'cannot use --verify-optimized with --no-optimized')
3259 _(b'cannot use --verify-optimized with --no-optimized')
3260 )
3260 )
3261 stagenames = {n for n, f in stages}
3261 stagenames = {n for n, f in stages}
3262
3262
3263 showalways = set()
3263 showalways = set()
3264 showchanged = set()
3264 showchanged = set()
3265 if ui.verbose and not opts[b'show_stage']:
3265 if ui.verbose and not opts[b'show_stage']:
3266 # show parsed tree by --verbose (deprecated)
3266 # show parsed tree by --verbose (deprecated)
3267 showalways.add(b'parsed')
3267 showalways.add(b'parsed')
3268 showchanged.update([b'expanded', b'concatenated'])
3268 showchanged.update([b'expanded', b'concatenated'])
3269 if opts[b'optimize']:
3269 if opts[b'optimize']:
3270 showalways.add(b'optimized')
3270 showalways.add(b'optimized')
3271 if opts[b'show_stage'] and opts[b'optimize']:
3271 if opts[b'show_stage'] and opts[b'optimize']:
3272 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3272 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3273 if opts[b'show_stage'] == [b'all']:
3273 if opts[b'show_stage'] == [b'all']:
3274 showalways.update(stagenames)
3274 showalways.update(stagenames)
3275 else:
3275 else:
3276 for n in opts[b'show_stage']:
3276 for n in opts[b'show_stage']:
3277 if n not in stagenames:
3277 if n not in stagenames:
3278 raise error.Abort(_(b'invalid stage name: %s') % n)
3278 raise error.Abort(_(b'invalid stage name: %s') % n)
3279 showalways.update(opts[b'show_stage'])
3279 showalways.update(opts[b'show_stage'])
3280
3280
3281 treebystage = {}
3281 treebystage = {}
3282 printedtree = None
3282 printedtree = None
3283 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3283 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3284 for n, f in stages:
3284 for n, f in stages:
3285 treebystage[n] = tree = f(tree)
3285 treebystage[n] = tree = f(tree)
3286 if n in showalways or (n in showchanged and tree != printedtree):
3286 if n in showalways or (n in showchanged and tree != printedtree):
3287 if opts[b'show_stage'] or n != b'parsed':
3287 if opts[b'show_stage'] or n != b'parsed':
3288 ui.write(b"* %s:\n" % n)
3288 ui.write(b"* %s:\n" % n)
3289 ui.write(revsetlang.prettyformat(tree), b"\n")
3289 ui.write(revsetlang.prettyformat(tree), b"\n")
3290 printedtree = tree
3290 printedtree = tree
3291
3291
3292 if opts[b'verify_optimized']:
3292 if opts[b'verify_optimized']:
3293 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3293 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3294 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3294 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3295 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3295 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3296 ui.writenoi18n(
3296 ui.writenoi18n(
3297 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3297 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3298 )
3298 )
3299 ui.writenoi18n(
3299 ui.writenoi18n(
3300 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3300 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3301 )
3301 )
3302 arevs = list(arevs)
3302 arevs = list(arevs)
3303 brevs = list(brevs)
3303 brevs = list(brevs)
3304 if arevs == brevs:
3304 if arevs == brevs:
3305 return 0
3305 return 0
3306 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3306 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3307 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3307 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3308 sm = difflib.SequenceMatcher(None, arevs, brevs)
3308 sm = difflib.SequenceMatcher(None, arevs, brevs)
3309 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3309 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3310 if tag in ('delete', 'replace'):
3310 if tag in ('delete', 'replace'):
3311 for c in arevs[alo:ahi]:
3311 for c in arevs[alo:ahi]:
3312 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3312 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3313 if tag in ('insert', 'replace'):
3313 if tag in ('insert', 'replace'):
3314 for c in brevs[blo:bhi]:
3314 for c in brevs[blo:bhi]:
3315 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3315 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3316 if tag == 'equal':
3316 if tag == 'equal':
3317 for c in arevs[alo:ahi]:
3317 for c in arevs[alo:ahi]:
3318 ui.write(b' %d\n' % c)
3318 ui.write(b' %d\n' % c)
3319 return 1
3319 return 1
3320
3320
3321 func = revset.makematcher(tree)
3321 func = revset.makematcher(tree)
3322 revs = func(repo)
3322 revs = func(repo)
3323 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3323 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3324 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3324 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3325 if not opts[b'show_revs']:
3325 if not opts[b'show_revs']:
3326 return
3326 return
3327 for c in revs:
3327 for c in revs:
3328 ui.write(b"%d\n" % c)
3328 ui.write(b"%d\n" % c)
3329
3329
3330
3330
3331 @command(
3331 @command(
3332 b'debugserve',
3332 b'debugserve',
3333 [
3333 [
3334 (
3334 (
3335 b'',
3335 b'',
3336 b'sshstdio',
3336 b'sshstdio',
3337 False,
3337 False,
3338 _(b'run an SSH server bound to process handles'),
3338 _(b'run an SSH server bound to process handles'),
3339 ),
3339 ),
3340 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3340 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3341 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3341 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3342 ],
3342 ],
3343 b'',
3343 b'',
3344 )
3344 )
3345 def debugserve(ui, repo, **opts):
3345 def debugserve(ui, repo, **opts):
3346 """run a server with advanced settings
3346 """run a server with advanced settings
3347
3347
3348 This command is similar to :hg:`serve`. It exists partially as a
3348 This command is similar to :hg:`serve`. It exists partially as a
3349 workaround to the fact that ``hg serve --stdio`` must have specific
3349 workaround to the fact that ``hg serve --stdio`` must have specific
3350 arguments for security reasons.
3350 arguments for security reasons.
3351 """
3351 """
3352 opts = pycompat.byteskwargs(opts)
3352 opts = pycompat.byteskwargs(opts)
3353
3353
3354 if not opts[b'sshstdio']:
3354 if not opts[b'sshstdio']:
3355 raise error.Abort(_(b'only --sshstdio is currently supported'))
3355 raise error.Abort(_(b'only --sshstdio is currently supported'))
3356
3356
3357 logfh = None
3357 logfh = None
3358
3358
3359 if opts[b'logiofd'] and opts[b'logiofile']:
3359 if opts[b'logiofd'] and opts[b'logiofile']:
3360 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3360 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3361
3361
3362 if opts[b'logiofd']:
3362 if opts[b'logiofd']:
3363 # Ideally we would be line buffered. But line buffering in binary
3363 # Ideally we would be line buffered. But line buffering in binary
3364 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3364 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3365 # buffering could have performance impacts. But since this isn't
3365 # buffering could have performance impacts. But since this isn't
3366 # performance critical code, it should be fine.
3366 # performance critical code, it should be fine.
3367 try:
3367 try:
3368 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3368 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3369 except OSError as e:
3369 except OSError as e:
3370 if e.errno != errno.ESPIPE:
3370 if e.errno != errno.ESPIPE:
3371 raise
3371 raise
3372 # can't seek a pipe, so `ab` mode fails on py3
3372 # can't seek a pipe, so `ab` mode fails on py3
3373 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3373 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3374 elif opts[b'logiofile']:
3374 elif opts[b'logiofile']:
3375 logfh = open(opts[b'logiofile'], b'ab', 0)
3375 logfh = open(opts[b'logiofile'], b'ab', 0)
3376
3376
3377 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3377 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3378 s.serve_forever()
3378 s.serve_forever()
3379
3379
3380
3380
3381 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3381 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3382 def debugsetparents(ui, repo, rev1, rev2=None):
3382 def debugsetparents(ui, repo, rev1, rev2=None):
3383 """manually set the parents of the current working directory
3383 """manually set the parents of the current working directory
3384
3384
3385 This is useful for writing repository conversion tools, but should
3385 This is useful for writing repository conversion tools, but should
3386 be used with care. For example, neither the working directory nor the
3386 be used with care. For example, neither the working directory nor the
3387 dirstate is updated, so file status may be incorrect after running this
3387 dirstate is updated, so file status may be incorrect after running this
3388 command.
3388 command.
3389
3389
3390 Returns 0 on success.
3390 Returns 0 on success.
3391 """
3391 """
3392
3392
3393 node1 = scmutil.revsingle(repo, rev1).node()
3393 node1 = scmutil.revsingle(repo, rev1).node()
3394 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3394 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3395
3395
3396 with repo.wlock():
3396 with repo.wlock():
3397 repo.setparents(node1, node2)
3397 repo.setparents(node1, node2)
3398
3398
3399
3399
3400 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3400 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3401 def debugsidedata(ui, repo, file_, rev=None, **opts):
3401 def debugsidedata(ui, repo, file_, rev=None, **opts):
3402 """dump the side data for a cl/manifest/file revision
3402 """dump the side data for a cl/manifest/file revision
3403
3403
3404 Use --verbose to dump the sidedata content."""
3404 Use --verbose to dump the sidedata content."""
3405 opts = pycompat.byteskwargs(opts)
3405 opts = pycompat.byteskwargs(opts)
3406 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3406 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3407 if rev is not None:
3407 if rev is not None:
3408 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3408 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3409 file_, rev = None, file_
3409 file_, rev = None, file_
3410 elif rev is None:
3410 elif rev is None:
3411 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3411 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3412 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3412 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3413 r = getattr(r, '_revlog', r)
3413 r = getattr(r, '_revlog', r)
3414 try:
3414 try:
3415 sidedata = r.sidedata(r.lookup(rev))
3415 sidedata = r.sidedata(r.lookup(rev))
3416 except KeyError:
3416 except KeyError:
3417 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3417 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3418 if sidedata:
3418 if sidedata:
3419 sidedata = list(sidedata.items())
3419 sidedata = list(sidedata.items())
3420 sidedata.sort()
3420 sidedata.sort()
3421 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3421 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3422 for key, value in sidedata:
3422 for key, value in sidedata:
3423 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3423 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3424 if ui.verbose:
3424 if ui.verbose:
3425 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3425 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3426
3426
3427
3427
3428 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3428 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3429 def debugssl(ui, repo, source=None, **opts):
3429 def debugssl(ui, repo, source=None, **opts):
3430 '''test a secure connection to a server
3430 '''test a secure connection to a server
3431
3431
3432 This builds the certificate chain for the server on Windows, installing the
3432 This builds the certificate chain for the server on Windows, installing the
3433 missing intermediates and trusted root via Windows Update if necessary. It
3433 missing intermediates and trusted root via Windows Update if necessary. It
3434 does nothing on other platforms.
3434 does nothing on other platforms.
3435
3435
3436 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3436 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3437 that server is used. See :hg:`help urls` for more information.
3437 that server is used. See :hg:`help urls` for more information.
3438
3438
3439 If the update succeeds, retry the original operation. Otherwise, the cause
3439 If the update succeeds, retry the original operation. Otherwise, the cause
3440 of the SSL error is likely another issue.
3440 of the SSL error is likely another issue.
3441 '''
3441 '''
3442 if not pycompat.iswindows:
3442 if not pycompat.iswindows:
3443 raise error.Abort(
3443 raise error.Abort(
3444 _(b'certificate chain building is only possible on Windows')
3444 _(b'certificate chain building is only possible on Windows')
3445 )
3445 )
3446
3446
3447 if not source:
3447 if not source:
3448 if not repo:
3448 if not repo:
3449 raise error.Abort(
3449 raise error.Abort(
3450 _(
3450 _(
3451 b"there is no Mercurial repository here, and no "
3451 b"there is no Mercurial repository here, and no "
3452 b"server specified"
3452 b"server specified"
3453 )
3453 )
3454 )
3454 )
3455 source = b"default"
3455 source = b"default"
3456
3456
3457 source, branches = hg.parseurl(ui.expandpath(source))
3457 source, branches = hg.parseurl(ui.expandpath(source))
3458 url = util.url(source)
3458 url = util.url(source)
3459
3459
3460 defaultport = {b'https': 443, b'ssh': 22}
3460 defaultport = {b'https': 443, b'ssh': 22}
3461 if url.scheme in defaultport:
3461 if url.scheme in defaultport:
3462 try:
3462 try:
3463 addr = (url.host, int(url.port or defaultport[url.scheme]))
3463 addr = (url.host, int(url.port or defaultport[url.scheme]))
3464 except ValueError:
3464 except ValueError:
3465 raise error.Abort(_(b"malformed port number in URL"))
3465 raise error.Abort(_(b"malformed port number in URL"))
3466 else:
3466 else:
3467 raise error.Abort(_(b"only https and ssh connections are supported"))
3467 raise error.Abort(_(b"only https and ssh connections are supported"))
3468
3468
3469 from . import win32
3469 from . import win32
3470
3470
3471 s = ssl.wrap_socket(
3471 s = ssl.wrap_socket(
3472 socket.socket(),
3472 socket.socket(),
3473 ssl_version=ssl.PROTOCOL_TLS,
3473 ssl_version=ssl.PROTOCOL_TLS,
3474 cert_reqs=ssl.CERT_NONE,
3474 cert_reqs=ssl.CERT_NONE,
3475 ca_certs=None,
3475 ca_certs=None,
3476 )
3476 )
3477
3477
3478 try:
3478 try:
3479 s.connect(addr)
3479 s.connect(addr)
3480 cert = s.getpeercert(True)
3480 cert = s.getpeercert(True)
3481
3481
3482 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3482 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3483
3483
3484 complete = win32.checkcertificatechain(cert, build=False)
3484 complete = win32.checkcertificatechain(cert, build=False)
3485
3485
3486 if not complete:
3486 if not complete:
3487 ui.status(_(b'certificate chain is incomplete, updating... '))
3487 ui.status(_(b'certificate chain is incomplete, updating... '))
3488
3488
3489 if not win32.checkcertificatechain(cert):
3489 if not win32.checkcertificatechain(cert):
3490 ui.status(_(b'failed.\n'))
3490 ui.status(_(b'failed.\n'))
3491 else:
3491 else:
3492 ui.status(_(b'done.\n'))
3492 ui.status(_(b'done.\n'))
3493 else:
3493 else:
3494 ui.status(_(b'full certificate chain is available\n'))
3494 ui.status(_(b'full certificate chain is available\n'))
3495 finally:
3495 finally:
3496 s.close()
3496 s.close()
3497
3497
3498
3498
3499 @command(
3499 @command(
3500 b"debugbackupbundle",
3500 b"debugbackupbundle",
3501 [
3501 [
3502 (
3502 (
3503 b"",
3503 b"",
3504 b"recover",
3504 b"recover",
3505 b"",
3505 b"",
3506 b"brings the specified changeset back into the repository",
3506 b"brings the specified changeset back into the repository",
3507 )
3507 )
3508 ]
3508 ]
3509 + cmdutil.logopts,
3509 + cmdutil.logopts,
3510 _(b"hg debugbackupbundle [--recover HASH]"),
3510 _(b"hg debugbackupbundle [--recover HASH]"),
3511 )
3511 )
3512 def debugbackupbundle(ui, repo, *pats, **opts):
3512 def debugbackupbundle(ui, repo, *pats, **opts):
3513 """lists the changesets available in backup bundles
3513 """lists the changesets available in backup bundles
3514
3514
3515 Without any arguments, this command prints a list of the changesets in each
3515 Without any arguments, this command prints a list of the changesets in each
3516 backup bundle.
3516 backup bundle.
3517
3517
3518 --recover takes a changeset hash and unbundles the first bundle that
3518 --recover takes a changeset hash and unbundles the first bundle that
3519 contains that hash, which puts that changeset back in your repository.
3519 contains that hash, which puts that changeset back in your repository.
3520
3520
3521 --verbose will print the entire commit message and the bundle path for that
3521 --verbose will print the entire commit message and the bundle path for that
3522 backup.
3522 backup.
3523 """
3523 """
3524 backups = list(
3524 backups = list(
3525 filter(
3525 filter(
3526 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3526 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3527 )
3527 )
3528 )
3528 )
3529 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3529 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3530
3530
3531 opts = pycompat.byteskwargs(opts)
3531 opts = pycompat.byteskwargs(opts)
3532 opts[b"bundle"] = b""
3532 opts[b"bundle"] = b""
3533 opts[b"force"] = None
3533 opts[b"force"] = None
3534 limit = logcmdutil.getlimit(opts)
3534 limit = logcmdutil.getlimit(opts)
3535
3535
3536 def display(other, chlist, displayer):
3536 def display(other, chlist, displayer):
3537 if opts.get(b"newest_first"):
3537 if opts.get(b"newest_first"):
3538 chlist.reverse()
3538 chlist.reverse()
3539 count = 0
3539 count = 0
3540 for n in chlist:
3540 for n in chlist:
3541 if limit is not None and count >= limit:
3541 if limit is not None and count >= limit:
3542 break
3542 break
3543 parents = [True for p in other.changelog.parents(n) if p != nullid]
3543 parents = [True for p in other.changelog.parents(n) if p != nullid]
3544 if opts.get(b"no_merges") and len(parents) == 2:
3544 if opts.get(b"no_merges") and len(parents) == 2:
3545 continue
3545 continue
3546 count += 1
3546 count += 1
3547 displayer.show(other[n])
3547 displayer.show(other[n])
3548
3548
3549 recovernode = opts.get(b"recover")
3549 recovernode = opts.get(b"recover")
3550 if recovernode:
3550 if recovernode:
3551 if scmutil.isrevsymbol(repo, recovernode):
3551 if scmutil.isrevsymbol(repo, recovernode):
3552 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3552 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3553 return
3553 return
3554 elif backups:
3554 elif backups:
3555 msg = _(
3555 msg = _(
3556 b"Recover changesets using: hg debugbackupbundle --recover "
3556 b"Recover changesets using: hg debugbackupbundle --recover "
3557 b"<changeset hash>\n\nAvailable backup changesets:"
3557 b"<changeset hash>\n\nAvailable backup changesets:"
3558 )
3558 )
3559 ui.status(msg, label=b"status.removed")
3559 ui.status(msg, label=b"status.removed")
3560 else:
3560 else:
3561 ui.status(_(b"no backup changesets found\n"))
3561 ui.status(_(b"no backup changesets found\n"))
3562 return
3562 return
3563
3563
3564 for backup in backups:
3564 for backup in backups:
3565 # Much of this is copied from the hg incoming logic
3565 # Much of this is copied from the hg incoming logic
3566 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3566 source = ui.expandpath(os.path.relpath(backup, encoding.getcwd()))
3567 source, branches = hg.parseurl(source, opts.get(b"branch"))
3567 source, branches = hg.parseurl(source, opts.get(b"branch"))
3568 try:
3568 try:
3569 other = hg.peer(repo, opts, source)
3569 other = hg.peer(repo, opts, source)
3570 except error.LookupError as ex:
3570 except error.LookupError as ex:
3571 msg = _(b"\nwarning: unable to open bundle %s") % source
3571 msg = _(b"\nwarning: unable to open bundle %s") % source
3572 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3572 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3573 ui.warn(msg, hint=hint)
3573 ui.warn(msg, hint=hint)
3574 continue
3574 continue
3575 revs, checkout = hg.addbranchrevs(
3575 revs, checkout = hg.addbranchrevs(
3576 repo, other, branches, opts.get(b"rev")
3576 repo, other, branches, opts.get(b"rev")
3577 )
3577 )
3578
3578
3579 if revs:
3579 if revs:
3580 revs = [other.lookup(rev) for rev in revs]
3580 revs = [other.lookup(rev) for rev in revs]
3581
3581
3582 quiet = ui.quiet
3582 quiet = ui.quiet
3583 try:
3583 try:
3584 ui.quiet = True
3584 ui.quiet = True
3585 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3585 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3586 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3586 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3587 )
3587 )
3588 except error.LookupError:
3588 except error.LookupError:
3589 continue
3589 continue
3590 finally:
3590 finally:
3591 ui.quiet = quiet
3591 ui.quiet = quiet
3592
3592
3593 try:
3593 try:
3594 if not chlist:
3594 if not chlist:
3595 continue
3595 continue
3596 if recovernode:
3596 if recovernode:
3597 with repo.lock(), repo.transaction(b"unbundle") as tr:
3597 with repo.lock(), repo.transaction(b"unbundle") as tr:
3598 if scmutil.isrevsymbol(other, recovernode):
3598 if scmutil.isrevsymbol(other, recovernode):
3599 ui.status(_(b"Unbundling %s\n") % (recovernode))
3599 ui.status(_(b"Unbundling %s\n") % (recovernode))
3600 f = hg.openpath(ui, source)
3600 f = hg.openpath(ui, source)
3601 gen = exchange.readbundle(ui, f, source)
3601 gen = exchange.readbundle(ui, f, source)
3602 if isinstance(gen, bundle2.unbundle20):
3602 if isinstance(gen, bundle2.unbundle20):
3603 bundle2.applybundle(
3603 bundle2.applybundle(
3604 repo,
3604 repo,
3605 gen,
3605 gen,
3606 tr,
3606 tr,
3607 source=b"unbundle",
3607 source=b"unbundle",
3608 url=b"bundle:" + source,
3608 url=b"bundle:" + source,
3609 )
3609 )
3610 else:
3610 else:
3611 gen.apply(repo, b"unbundle", b"bundle:" + source)
3611 gen.apply(repo, b"unbundle", b"bundle:" + source)
3612 break
3612 break
3613 else:
3613 else:
3614 backupdate = encoding.strtolocal(
3614 backupdate = encoding.strtolocal(
3615 time.strftime(
3615 time.strftime(
3616 "%a %H:%M, %Y-%m-%d",
3616 "%a %H:%M, %Y-%m-%d",
3617 time.localtime(os.path.getmtime(source)),
3617 time.localtime(os.path.getmtime(source)),
3618 )
3618 )
3619 )
3619 )
3620 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3620 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3621 if ui.verbose:
3621 if ui.verbose:
3622 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3622 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3623 else:
3623 else:
3624 opts[
3624 opts[
3625 b"template"
3625 b"template"
3626 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3626 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3627 displayer = logcmdutil.changesetdisplayer(
3627 displayer = logcmdutil.changesetdisplayer(
3628 ui, other, opts, False
3628 ui, other, opts, False
3629 )
3629 )
3630 display(other, chlist, displayer)
3630 display(other, chlist, displayer)
3631 displayer.close()
3631 displayer.close()
3632 finally:
3632 finally:
3633 cleanupfn()
3633 cleanupfn()
3634
3634
3635
3635
3636 @command(
3636 @command(
3637 b'debugsub',
3637 b'debugsub',
3638 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3638 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3639 _(b'[-r REV] [REV]'),
3639 _(b'[-r REV] [REV]'),
3640 )
3640 )
3641 def debugsub(ui, repo, rev=None):
3641 def debugsub(ui, repo, rev=None):
3642 ctx = scmutil.revsingle(repo, rev, None)
3642 ctx = scmutil.revsingle(repo, rev, None)
3643 for k, v in sorted(ctx.substate.items()):
3643 for k, v in sorted(ctx.substate.items()):
3644 ui.writenoi18n(b'path %s\n' % k)
3644 ui.writenoi18n(b'path %s\n' % k)
3645 ui.writenoi18n(b' source %s\n' % v[0])
3645 ui.writenoi18n(b' source %s\n' % v[0])
3646 ui.writenoi18n(b' revision %s\n' % v[1])
3646 ui.writenoi18n(b' revision %s\n' % v[1])
3647
3647
3648
3648
3649 @command(
3649 @command(
3650 b'debugsuccessorssets',
3650 b'debugsuccessorssets',
3651 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3651 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3652 _(b'[REV]'),
3652 _(b'[REV]'),
3653 )
3653 )
3654 def debugsuccessorssets(ui, repo, *revs, **opts):
3654 def debugsuccessorssets(ui, repo, *revs, **opts):
3655 """show set of successors for revision
3655 """show set of successors for revision
3656
3656
3657 A successors set of changeset A is a consistent group of revisions that
3657 A successors set of changeset A is a consistent group of revisions that
3658 succeed A. It contains non-obsolete changesets only unless closests
3658 succeed A. It contains non-obsolete changesets only unless closests
3659 successors set is set.
3659 successors set is set.
3660
3660
3661 In most cases a changeset A has a single successors set containing a single
3661 In most cases a changeset A has a single successors set containing a single
3662 successor (changeset A replaced by A').
3662 successor (changeset A replaced by A').
3663
3663
3664 A changeset that is made obsolete with no successors are called "pruned".
3664 A changeset that is made obsolete with no successors are called "pruned".
3665 Such changesets have no successors sets at all.
3665 Such changesets have no successors sets at all.
3666
3666
3667 A changeset that has been "split" will have a successors set containing
3667 A changeset that has been "split" will have a successors set containing
3668 more than one successor.
3668 more than one successor.
3669
3669
3670 A changeset that has been rewritten in multiple different ways is called
3670 A changeset that has been rewritten in multiple different ways is called
3671 "divergent". Such changesets have multiple successor sets (each of which
3671 "divergent". Such changesets have multiple successor sets (each of which
3672 may also be split, i.e. have multiple successors).
3672 may also be split, i.e. have multiple successors).
3673
3673
3674 Results are displayed as follows::
3674 Results are displayed as follows::
3675
3675
3676 <rev1>
3676 <rev1>
3677 <successors-1A>
3677 <successors-1A>
3678 <rev2>
3678 <rev2>
3679 <successors-2A>
3679 <successors-2A>
3680 <successors-2B1> <successors-2B2> <successors-2B3>
3680 <successors-2B1> <successors-2B2> <successors-2B3>
3681
3681
3682 Here rev2 has two possible (i.e. divergent) successors sets. The first
3682 Here rev2 has two possible (i.e. divergent) successors sets. The first
3683 holds one element, whereas the second holds three (i.e. the changeset has
3683 holds one element, whereas the second holds three (i.e. the changeset has
3684 been split).
3684 been split).
3685 """
3685 """
3686 # passed to successorssets caching computation from one call to another
3686 # passed to successorssets caching computation from one call to another
3687 cache = {}
3687 cache = {}
3688 ctx2str = bytes
3688 ctx2str = bytes
3689 node2str = short
3689 node2str = short
3690 for rev in scmutil.revrange(repo, revs):
3690 for rev in scmutil.revrange(repo, revs):
3691 ctx = repo[rev]
3691 ctx = repo[rev]
3692 ui.write(b'%s\n' % ctx2str(ctx))
3692 ui.write(b'%s\n' % ctx2str(ctx))
3693 for succsset in obsutil.successorssets(
3693 for succsset in obsutil.successorssets(
3694 repo, ctx.node(), closest=opts['closest'], cache=cache
3694 repo, ctx.node(), closest=opts['closest'], cache=cache
3695 ):
3695 ):
3696 if succsset:
3696 if succsset:
3697 ui.write(b' ')
3697 ui.write(b' ')
3698 ui.write(node2str(succsset[0]))
3698 ui.write(node2str(succsset[0]))
3699 for node in succsset[1:]:
3699 for node in succsset[1:]:
3700 ui.write(b' ')
3700 ui.write(b' ')
3701 ui.write(node2str(node))
3701 ui.write(node2str(node))
3702 ui.write(b'\n')
3702 ui.write(b'\n')
3703
3703
3704
3704
3705 @command(b'debugtagscache', [])
3705 @command(b'debugtagscache', [])
3706 def debugtagscache(ui, repo):
3706 def debugtagscache(ui, repo):
3707 """display the contents of .hg/cache/hgtagsfnodes1"""
3707 """display the contents of .hg/cache/hgtagsfnodes1"""
3708 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3708 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3709 for r in repo:
3709 for r in repo:
3710 node = repo[r].node()
3710 node = repo[r].node()
3711 tagsnode = cache.getfnode(node, computemissing=False)
3711 tagsnode = cache.getfnode(node, computemissing=False)
3712 tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
3712 tagsnodedisplay = hex(tagsnode) if tagsnode else b'missing/invalid'
3713 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3713 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3714
3714
3715
3715
3716 @command(
3716 @command(
3717 b'debugtemplate',
3717 b'debugtemplate',
3718 [
3718 [
3719 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3719 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3720 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3720 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3721 ],
3721 ],
3722 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3722 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3723 optionalrepo=True,
3723 optionalrepo=True,
3724 )
3724 )
3725 def debugtemplate(ui, repo, tmpl, **opts):
3725 def debugtemplate(ui, repo, tmpl, **opts):
3726 """parse and apply a template
3726 """parse and apply a template
3727
3727
3728 If -r/--rev is given, the template is processed as a log template and
3728 If -r/--rev is given, the template is processed as a log template and
3729 applied to the given changesets. Otherwise, it is processed as a generic
3729 applied to the given changesets. Otherwise, it is processed as a generic
3730 template.
3730 template.
3731
3731
3732 Use --verbose to print the parsed tree.
3732 Use --verbose to print the parsed tree.
3733 """
3733 """
3734 revs = None
3734 revs = None
3735 if opts['rev']:
3735 if opts['rev']:
3736 if repo is None:
3736 if repo is None:
3737 raise error.RepoError(
3737 raise error.RepoError(
3738 _(b'there is no Mercurial repository here (.hg not found)')
3738 _(b'there is no Mercurial repository here (.hg not found)')
3739 )
3739 )
3740 revs = scmutil.revrange(repo, opts['rev'])
3740 revs = scmutil.revrange(repo, opts['rev'])
3741
3741
3742 props = {}
3742 props = {}
3743 for d in opts['define']:
3743 for d in opts['define']:
3744 try:
3744 try:
3745 k, v = (e.strip() for e in d.split(b'=', 1))
3745 k, v = (e.strip() for e in d.split(b'=', 1))
3746 if not k or k == b'ui':
3746 if not k or k == b'ui':
3747 raise ValueError
3747 raise ValueError
3748 props[k] = v
3748 props[k] = v
3749 except ValueError:
3749 except ValueError:
3750 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3750 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3751
3751
3752 if ui.verbose:
3752 if ui.verbose:
3753 aliases = ui.configitems(b'templatealias')
3753 aliases = ui.configitems(b'templatealias')
3754 tree = templater.parse(tmpl)
3754 tree = templater.parse(tmpl)
3755 ui.note(templater.prettyformat(tree), b'\n')
3755 ui.note(templater.prettyformat(tree), b'\n')
3756 newtree = templater.expandaliases(tree, aliases)
3756 newtree = templater.expandaliases(tree, aliases)
3757 if newtree != tree:
3757 if newtree != tree:
3758 ui.notenoi18n(
3758 ui.notenoi18n(
3759 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3759 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3760 )
3760 )
3761
3761
3762 if revs is None:
3762 if revs is None:
3763 tres = formatter.templateresources(ui, repo)
3763 tres = formatter.templateresources(ui, repo)
3764 t = formatter.maketemplater(ui, tmpl, resources=tres)
3764 t = formatter.maketemplater(ui, tmpl, resources=tres)
3765 if ui.verbose:
3765 if ui.verbose:
3766 kwds, funcs = t.symbolsuseddefault()
3766 kwds, funcs = t.symbolsuseddefault()
3767 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3767 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3768 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3768 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3769 ui.write(t.renderdefault(props))
3769 ui.write(t.renderdefault(props))
3770 else:
3770 else:
3771 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3771 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3772 if ui.verbose:
3772 if ui.verbose:
3773 kwds, funcs = displayer.t.symbolsuseddefault()
3773 kwds, funcs = displayer.t.symbolsuseddefault()
3774 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3774 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3775 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3775 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3776 for r in revs:
3776 for r in revs:
3777 displayer.show(repo[r], **pycompat.strkwargs(props))
3777 displayer.show(repo[r], **pycompat.strkwargs(props))
3778 displayer.close()
3778 displayer.close()
3779
3779
3780
3780
3781 @command(
3781 @command(
3782 b'debuguigetpass',
3782 b'debuguigetpass',
3783 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3783 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3784 _(b'[-p TEXT]'),
3784 _(b'[-p TEXT]'),
3785 norepo=True,
3785 norepo=True,
3786 )
3786 )
3787 def debuguigetpass(ui, prompt=b''):
3787 def debuguigetpass(ui, prompt=b''):
3788 """show prompt to type password"""
3788 """show prompt to type password"""
3789 r = ui.getpass(prompt)
3789 r = ui.getpass(prompt)
3790 ui.writenoi18n(b'response: %s\n' % r)
3790 ui.writenoi18n(b'response: %s\n' % r)
3791
3791
3792
3792
3793 @command(
3793 @command(
3794 b'debuguiprompt',
3794 b'debuguiprompt',
3795 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3795 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3796 _(b'[-p TEXT]'),
3796 _(b'[-p TEXT]'),
3797 norepo=True,
3797 norepo=True,
3798 )
3798 )
3799 def debuguiprompt(ui, prompt=b''):
3799 def debuguiprompt(ui, prompt=b''):
3800 """show plain prompt"""
3800 """show plain prompt"""
3801 r = ui.prompt(prompt)
3801 r = ui.prompt(prompt)
3802 ui.writenoi18n(b'response: %s\n' % r)
3802 ui.writenoi18n(b'response: %s\n' % r)
3803
3803
3804
3804
3805 @command(b'debugupdatecaches', [])
3805 @command(b'debugupdatecaches', [])
3806 def debugupdatecaches(ui, repo, *pats, **opts):
3806 def debugupdatecaches(ui, repo, *pats, **opts):
3807 """warm all known caches in the repository"""
3807 """warm all known caches in the repository"""
3808 with repo.wlock(), repo.lock():
3808 with repo.wlock(), repo.lock():
3809 repo.updatecaches(full=True)
3809 repo.updatecaches(full=True)
3810
3810
3811
3811
3812 @command(
3812 @command(
3813 b'debugupgraderepo',
3813 b'debugupgraderepo',
3814 [
3814 [
3815 (
3815 (
3816 b'o',
3816 b'o',
3817 b'optimize',
3817 b'optimize',
3818 [],
3818 [],
3819 _(b'extra optimization to perform'),
3819 _(b'extra optimization to perform'),
3820 _(b'NAME'),
3820 _(b'NAME'),
3821 ),
3821 ),
3822 (b'', b'run', False, _(b'performs an upgrade')),
3822 (b'', b'run', False, _(b'performs an upgrade')),
3823 (b'', b'backup', True, _(b'keep the old repository content around')),
3823 (b'', b'backup', True, _(b'keep the old repository content around')),
3824 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3824 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3825 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3825 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3826 ],
3826 ],
3827 )
3827 )
3828 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3828 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3829 """upgrade a repository to use different features
3829 """upgrade a repository to use different features
3830
3830
3831 If no arguments are specified, the repository is evaluated for upgrade
3831 If no arguments are specified, the repository is evaluated for upgrade
3832 and a list of problems and potential optimizations is printed.
3832 and a list of problems and potential optimizations is printed.
3833
3833
3834 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3834 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3835 can be influenced via additional arguments. More details will be provided
3835 can be influenced via additional arguments. More details will be provided
3836 by the command output when run without ``--run``.
3836 by the command output when run without ``--run``.
3837
3837
3838 During the upgrade, the repository will be locked and no writes will be
3838 During the upgrade, the repository will be locked and no writes will be
3839 allowed.
3839 allowed.
3840
3840
3841 At the end of the upgrade, the repository may not be readable while new
3841 At the end of the upgrade, the repository may not be readable while new
3842 repository data is swapped in. This window will be as long as it takes to
3842 repository data is swapped in. This window will be as long as it takes to
3843 rename some directories inside the ``.hg`` directory. On most machines, this
3843 rename some directories inside the ``.hg`` directory. On most machines, this
3844 should complete almost instantaneously and the chances of a consumer being
3844 should complete almost instantaneously and the chances of a consumer being
3845 unable to access the repository should be low.
3845 unable to access the repository should be low.
3846
3846
3847 By default, all revlog will be upgraded. You can restrict this using flag
3847 By default, all revlog will be upgraded. You can restrict this using flag
3848 such as `--manifest`:
3848 such as `--manifest`:
3849
3849
3850 * `--manifest`: only optimize the manifest
3850 * `--manifest`: only optimize the manifest
3851 * `--no-manifest`: optimize all revlog but the manifest
3851 * `--no-manifest`: optimize all revlog but the manifest
3852 * `--changelog`: optimize the changelog only
3852 * `--changelog`: optimize the changelog only
3853 * `--no-changelog --no-manifest`: optimize filelogs only
3853 * `--no-changelog --no-manifest`: optimize filelogs only
3854 """
3854 """
3855 return upgrade.upgraderepo(
3855 return upgrade.upgraderepo(
3856 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3856 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3857 )
3857 )
3858
3858
3859
3859
3860 @command(
3860 @command(
3861 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3861 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3862 )
3862 )
3863 def debugwalk(ui, repo, *pats, **opts):
3863 def debugwalk(ui, repo, *pats, **opts):
3864 """show how files match on given patterns"""
3864 """show how files match on given patterns"""
3865 opts = pycompat.byteskwargs(opts)
3865 opts = pycompat.byteskwargs(opts)
3866 m = scmutil.match(repo[None], pats, opts)
3866 m = scmutil.match(repo[None], pats, opts)
3867 if ui.verbose:
3867 if ui.verbose:
3868 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3868 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3869 items = list(repo[None].walk(m))
3869 items = list(repo[None].walk(m))
3870 if not items:
3870 if not items:
3871 return
3871 return
3872 f = lambda fn: fn
3872 f = lambda fn: fn
3873 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3873 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3874 f = lambda fn: util.normpath(fn)
3874 f = lambda fn: util.normpath(fn)
3875 fmt = b'f %%-%ds %%-%ds %%s' % (
3875 fmt = b'f %%-%ds %%-%ds %%s' % (
3876 max([len(abs) for abs in items]),
3876 max([len(abs) for abs in items]),
3877 max([len(repo.pathto(abs)) for abs in items]),
3877 max([len(repo.pathto(abs)) for abs in items]),
3878 )
3878 )
3879 for abs in items:
3879 for abs in items:
3880 line = fmt % (
3880 line = fmt % (
3881 abs,
3881 abs,
3882 f(repo.pathto(abs)),
3882 f(repo.pathto(abs)),
3883 m.exact(abs) and b'exact' or b'',
3883 m.exact(abs) and b'exact' or b'',
3884 )
3884 )
3885 ui.write(b"%s\n" % line.rstrip())
3885 ui.write(b"%s\n" % line.rstrip())
3886
3886
3887
3887
3888 @command(b'debugwhyunstable', [], _(b'REV'))
3888 @command(b'debugwhyunstable', [], _(b'REV'))
3889 def debugwhyunstable(ui, repo, rev):
3889 def debugwhyunstable(ui, repo, rev):
3890 """explain instabilities of a changeset"""
3890 """explain instabilities of a changeset"""
3891 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3891 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3892 dnodes = b''
3892 dnodes = b''
3893 if entry.get(b'divergentnodes'):
3893 if entry.get(b'divergentnodes'):
3894 dnodes = (
3894 dnodes = (
3895 b' '.join(
3895 b' '.join(
3896 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3896 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3897 for ctx in entry[b'divergentnodes']
3897 for ctx in entry[b'divergentnodes']
3898 )
3898 )
3899 + b' '
3899 + b' '
3900 )
3900 )
3901 ui.write(
3901 ui.write(
3902 b'%s: %s%s %s\n'
3902 b'%s: %s%s %s\n'
3903 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3903 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3904 )
3904 )
3905
3905
3906
3906
3907 @command(
3907 @command(
3908 b'debugwireargs',
3908 b'debugwireargs',
3909 [
3909 [
3910 (b'', b'three', b'', b'three'),
3910 (b'', b'three', b'', b'three'),
3911 (b'', b'four', b'', b'four'),
3911 (b'', b'four', b'', b'four'),
3912 (b'', b'five', b'', b'five'),
3912 (b'', b'five', b'', b'five'),
3913 ]
3913 ]
3914 + cmdutil.remoteopts,
3914 + cmdutil.remoteopts,
3915 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3915 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3916 norepo=True,
3916 norepo=True,
3917 )
3917 )
3918 def debugwireargs(ui, repopath, *vals, **opts):
3918 def debugwireargs(ui, repopath, *vals, **opts):
3919 opts = pycompat.byteskwargs(opts)
3919 opts = pycompat.byteskwargs(opts)
3920 repo = hg.peer(ui, opts, repopath)
3920 repo = hg.peer(ui, opts, repopath)
3921 for opt in cmdutil.remoteopts:
3921 for opt in cmdutil.remoteopts:
3922 del opts[opt[1]]
3922 del opts[opt[1]]
3923 args = {}
3923 args = {}
3924 for k, v in pycompat.iteritems(opts):
3924 for k, v in pycompat.iteritems(opts):
3925 if v:
3925 if v:
3926 args[k] = v
3926 args[k] = v
3927 args = pycompat.strkwargs(args)
3927 args = pycompat.strkwargs(args)
3928 # run twice to check that we don't mess up the stream for the next command
3928 # run twice to check that we don't mess up the stream for the next command
3929 res1 = repo.debugwireargs(*vals, **args)
3929 res1 = repo.debugwireargs(*vals, **args)
3930 res2 = repo.debugwireargs(*vals, **args)
3930 res2 = repo.debugwireargs(*vals, **args)
3931 ui.write(b"%s\n" % res1)
3931 ui.write(b"%s\n" % res1)
3932 if res1 != res2:
3932 if res1 != res2:
3933 ui.warn(b"%s\n" % res2)
3933 ui.warn(b"%s\n" % res2)
3934
3934
3935
3935
3936 def _parsewirelangblocks(fh):
3936 def _parsewirelangblocks(fh):
3937 activeaction = None
3937 activeaction = None
3938 blocklines = []
3938 blocklines = []
3939 lastindent = 0
3939 lastindent = 0
3940
3940
3941 for line in fh:
3941 for line in fh:
3942 line = line.rstrip()
3942 line = line.rstrip()
3943 if not line:
3943 if not line:
3944 continue
3944 continue
3945
3945
3946 if line.startswith(b'#'):
3946 if line.startswith(b'#'):
3947 continue
3947 continue
3948
3948
3949 if not line.startswith(b' '):
3949 if not line.startswith(b' '):
3950 # New block. Flush previous one.
3950 # New block. Flush previous one.
3951 if activeaction:
3951 if activeaction:
3952 yield activeaction, blocklines
3952 yield activeaction, blocklines
3953
3953
3954 activeaction = line
3954 activeaction = line
3955 blocklines = []
3955 blocklines = []
3956 lastindent = 0
3956 lastindent = 0
3957 continue
3957 continue
3958
3958
3959 # Else we start with an indent.
3959 # Else we start with an indent.
3960
3960
3961 if not activeaction:
3961 if not activeaction:
3962 raise error.Abort(_(b'indented line outside of block'))
3962 raise error.Abort(_(b'indented line outside of block'))
3963
3963
3964 indent = len(line) - len(line.lstrip())
3964 indent = len(line) - len(line.lstrip())
3965
3965
3966 # If this line is indented more than the last line, concatenate it.
3966 # If this line is indented more than the last line, concatenate it.
3967 if indent > lastindent and blocklines:
3967 if indent > lastindent and blocklines:
3968 blocklines[-1] += line.lstrip()
3968 blocklines[-1] += line.lstrip()
3969 else:
3969 else:
3970 blocklines.append(line)
3970 blocklines.append(line)
3971 lastindent = indent
3971 lastindent = indent
3972
3972
3973 # Flush last block.
3973 # Flush last block.
3974 if activeaction:
3974 if activeaction:
3975 yield activeaction, blocklines
3975 yield activeaction, blocklines
3976
3976
3977
3977
3978 @command(
3978 @command(
3979 b'debugwireproto',
3979 b'debugwireproto',
3980 [
3980 [
3981 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3981 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3982 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3982 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3983 (
3983 (
3984 b'',
3984 b'',
3985 b'noreadstderr',
3985 b'noreadstderr',
3986 False,
3986 False,
3987 _(b'do not read from stderr of the remote'),
3987 _(b'do not read from stderr of the remote'),
3988 ),
3988 ),
3989 (
3989 (
3990 b'',
3990 b'',
3991 b'nologhandshake',
3991 b'nologhandshake',
3992 False,
3992 False,
3993 _(b'do not log I/O related to the peer handshake'),
3993 _(b'do not log I/O related to the peer handshake'),
3994 ),
3994 ),
3995 ]
3995 ]
3996 + cmdutil.remoteopts,
3996 + cmdutil.remoteopts,
3997 _(b'[PATH]'),
3997 _(b'[PATH]'),
3998 optionalrepo=True,
3998 optionalrepo=True,
3999 )
3999 )
4000 def debugwireproto(ui, repo, path=None, **opts):
4000 def debugwireproto(ui, repo, path=None, **opts):
4001 """send wire protocol commands to a server
4001 """send wire protocol commands to a server
4002
4002
4003 This command can be used to issue wire protocol commands to remote
4003 This command can be used to issue wire protocol commands to remote
4004 peers and to debug the raw data being exchanged.
4004 peers and to debug the raw data being exchanged.
4005
4005
4006 ``--localssh`` will start an SSH server against the current repository
4006 ``--localssh`` will start an SSH server against the current repository
4007 and connect to that. By default, the connection will perform a handshake
4007 and connect to that. By default, the connection will perform a handshake
4008 and establish an appropriate peer instance.
4008 and establish an appropriate peer instance.
4009
4009
4010 ``--peer`` can be used to bypass the handshake protocol and construct a
4010 ``--peer`` can be used to bypass the handshake protocol and construct a
4011 peer instance using the specified class type. Valid values are ``raw``,
4011 peer instance using the specified class type. Valid values are ``raw``,
4012 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4012 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4013 raw data payloads and don't support higher-level command actions.
4013 raw data payloads and don't support higher-level command actions.
4014
4014
4015 ``--noreadstderr`` can be used to disable automatic reading from stderr
4015 ``--noreadstderr`` can be used to disable automatic reading from stderr
4016 of the peer (for SSH connections only). Disabling automatic reading of
4016 of the peer (for SSH connections only). Disabling automatic reading of
4017 stderr is useful for making output more deterministic.
4017 stderr is useful for making output more deterministic.
4018
4018
4019 Commands are issued via a mini language which is specified via stdin.
4019 Commands are issued via a mini language which is specified via stdin.
4020 The language consists of individual actions to perform. An action is
4020 The language consists of individual actions to perform. An action is
4021 defined by a block. A block is defined as a line with no leading
4021 defined by a block. A block is defined as a line with no leading
4022 space followed by 0 or more lines with leading space. Blocks are
4022 space followed by 0 or more lines with leading space. Blocks are
4023 effectively a high-level command with additional metadata.
4023 effectively a high-level command with additional metadata.
4024
4024
4025 Lines beginning with ``#`` are ignored.
4025 Lines beginning with ``#`` are ignored.
4026
4026
4027 The following sections denote available actions.
4027 The following sections denote available actions.
4028
4028
4029 raw
4029 raw
4030 ---
4030 ---
4031
4031
4032 Send raw data to the server.
4032 Send raw data to the server.
4033
4033
4034 The block payload contains the raw data to send as one atomic send
4034 The block payload contains the raw data to send as one atomic send
4035 operation. The data may not actually be delivered in a single system
4035 operation. The data may not actually be delivered in a single system
4036 call: it depends on the abilities of the transport being used.
4036 call: it depends on the abilities of the transport being used.
4037
4037
4038 Each line in the block is de-indented and concatenated. Then, that
4038 Each line in the block is de-indented and concatenated. Then, that
4039 value is evaluated as a Python b'' literal. This allows the use of
4039 value is evaluated as a Python b'' literal. This allows the use of
4040 backslash escaping, etc.
4040 backslash escaping, etc.
4041
4041
4042 raw+
4042 raw+
4043 ----
4043 ----
4044
4044
4045 Behaves like ``raw`` except flushes output afterwards.
4045 Behaves like ``raw`` except flushes output afterwards.
4046
4046
4047 command <X>
4047 command <X>
4048 -----------
4048 -----------
4049
4049
4050 Send a request to run a named command, whose name follows the ``command``
4050 Send a request to run a named command, whose name follows the ``command``
4051 string.
4051 string.
4052
4052
4053 Arguments to the command are defined as lines in this block. The format of
4053 Arguments to the command are defined as lines in this block. The format of
4054 each line is ``<key> <value>``. e.g.::
4054 each line is ``<key> <value>``. e.g.::
4055
4055
4056 command listkeys
4056 command listkeys
4057 namespace bookmarks
4057 namespace bookmarks
4058
4058
4059 If the value begins with ``eval:``, it will be interpreted as a Python
4059 If the value begins with ``eval:``, it will be interpreted as a Python
4060 literal expression. Otherwise values are interpreted as Python b'' literals.
4060 literal expression. Otherwise values are interpreted as Python b'' literals.
4061 This allows sending complex types and encoding special byte sequences via
4061 This allows sending complex types and encoding special byte sequences via
4062 backslash escaping.
4062 backslash escaping.
4063
4063
4064 The following arguments have special meaning:
4064 The following arguments have special meaning:
4065
4065
4066 ``PUSHFILE``
4066 ``PUSHFILE``
4067 When defined, the *push* mechanism of the peer will be used instead
4067 When defined, the *push* mechanism of the peer will be used instead
4068 of the static request-response mechanism and the content of the
4068 of the static request-response mechanism and the content of the
4069 file specified in the value of this argument will be sent as the
4069 file specified in the value of this argument will be sent as the
4070 command payload.
4070 command payload.
4071
4071
4072 This can be used to submit a local bundle file to the remote.
4072 This can be used to submit a local bundle file to the remote.
4073
4073
4074 batchbegin
4074 batchbegin
4075 ----------
4075 ----------
4076
4076
4077 Instruct the peer to begin a batched send.
4077 Instruct the peer to begin a batched send.
4078
4078
4079 All ``command`` blocks are queued for execution until the next
4079 All ``command`` blocks are queued for execution until the next
4080 ``batchsubmit`` block.
4080 ``batchsubmit`` block.
4081
4081
4082 batchsubmit
4082 batchsubmit
4083 -----------
4083 -----------
4084
4084
4085 Submit previously queued ``command`` blocks as a batch request.
4085 Submit previously queued ``command`` blocks as a batch request.
4086
4086
4087 This action MUST be paired with a ``batchbegin`` action.
4087 This action MUST be paired with a ``batchbegin`` action.
4088
4088
4089 httprequest <method> <path>
4089 httprequest <method> <path>
4090 ---------------------------
4090 ---------------------------
4091
4091
4092 (HTTP peer only)
4092 (HTTP peer only)
4093
4093
4094 Send an HTTP request to the peer.
4094 Send an HTTP request to the peer.
4095
4095
4096 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4096 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4097
4097
4098 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4098 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4099 headers to add to the request. e.g. ``Accept: foo``.
4099 headers to add to the request. e.g. ``Accept: foo``.
4100
4100
4101 The following arguments are special:
4101 The following arguments are special:
4102
4102
4103 ``BODYFILE``
4103 ``BODYFILE``
4104 The content of the file defined as the value to this argument will be
4104 The content of the file defined as the value to this argument will be
4105 transferred verbatim as the HTTP request body.
4105 transferred verbatim as the HTTP request body.
4106
4106
4107 ``frame <type> <flags> <payload>``
4107 ``frame <type> <flags> <payload>``
4108 Send a unified protocol frame as part of the request body.
4108 Send a unified protocol frame as part of the request body.
4109
4109
4110 All frames will be collected and sent as the body to the HTTP
4110 All frames will be collected and sent as the body to the HTTP
4111 request.
4111 request.
4112
4112
4113 close
4113 close
4114 -----
4114 -----
4115
4115
4116 Close the connection to the server.
4116 Close the connection to the server.
4117
4117
4118 flush
4118 flush
4119 -----
4119 -----
4120
4120
4121 Flush data written to the server.
4121 Flush data written to the server.
4122
4122
4123 readavailable
4123 readavailable
4124 -------------
4124 -------------
4125
4125
4126 Close the write end of the connection and read all available data from
4126 Close the write end of the connection and read all available data from
4127 the server.
4127 the server.
4128
4128
4129 If the connection to the server encompasses multiple pipes, we poll both
4129 If the connection to the server encompasses multiple pipes, we poll both
4130 pipes and read available data.
4130 pipes and read available data.
4131
4131
4132 readline
4132 readline
4133 --------
4133 --------
4134
4134
4135 Read a line of output from the server. If there are multiple output
4135 Read a line of output from the server. If there are multiple output
4136 pipes, reads only the main pipe.
4136 pipes, reads only the main pipe.
4137
4137
4138 ereadline
4138 ereadline
4139 ---------
4139 ---------
4140
4140
4141 Like ``readline``, but read from the stderr pipe, if available.
4141 Like ``readline``, but read from the stderr pipe, if available.
4142
4142
4143 read <X>
4143 read <X>
4144 --------
4144 --------
4145
4145
4146 ``read()`` N bytes from the server's main output pipe.
4146 ``read()`` N bytes from the server's main output pipe.
4147
4147
4148 eread <X>
4148 eread <X>
4149 ---------
4149 ---------
4150
4150
4151 ``read()`` N bytes from the server's stderr pipe, if available.
4151 ``read()`` N bytes from the server's stderr pipe, if available.
4152
4152
4153 Specifying Unified Frame-Based Protocol Frames
4153 Specifying Unified Frame-Based Protocol Frames
4154 ----------------------------------------------
4154 ----------------------------------------------
4155
4155
4156 It is possible to emit a *Unified Frame-Based Protocol* by using special
4156 It is possible to emit a *Unified Frame-Based Protocol* by using special
4157 syntax.
4157 syntax.
4158
4158
4159 A frame is composed as a type, flags, and payload. These can be parsed
4159 A frame is composed as a type, flags, and payload. These can be parsed
4160 from a string of the form:
4160 from a string of the form:
4161
4161
4162 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4162 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4163
4163
4164 ``request-id`` and ``stream-id`` are integers defining the request and
4164 ``request-id`` and ``stream-id`` are integers defining the request and
4165 stream identifiers.
4165 stream identifiers.
4166
4166
4167 ``type`` can be an integer value for the frame type or the string name
4167 ``type`` can be an integer value for the frame type or the string name
4168 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4168 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4169 ``command-name``.
4169 ``command-name``.
4170
4170
4171 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4171 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4172 components. Each component (and there can be just one) can be an integer
4172 components. Each component (and there can be just one) can be an integer
4173 or a flag name for stream flags or frame flags, respectively. Values are
4173 or a flag name for stream flags or frame flags, respectively. Values are
4174 resolved to integers and then bitwise OR'd together.
4174 resolved to integers and then bitwise OR'd together.
4175
4175
4176 ``payload`` represents the raw frame payload. If it begins with
4176 ``payload`` represents the raw frame payload. If it begins with
4177 ``cbor:``, the following string is evaluated as Python code and the
4177 ``cbor:``, the following string is evaluated as Python code and the
4178 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4178 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4179 as a Python byte string literal.
4179 as a Python byte string literal.
4180 """
4180 """
4181 opts = pycompat.byteskwargs(opts)
4181 opts = pycompat.byteskwargs(opts)
4182
4182
4183 if opts[b'localssh'] and not repo:
4183 if opts[b'localssh'] and not repo:
4184 raise error.Abort(_(b'--localssh requires a repository'))
4184 raise error.Abort(_(b'--localssh requires a repository'))
4185
4185
4186 if opts[b'peer'] and opts[b'peer'] not in (
4186 if opts[b'peer'] and opts[b'peer'] not in (
4187 b'raw',
4187 b'raw',
4188 b'http2',
4188 b'http2',
4189 b'ssh1',
4189 b'ssh1',
4190 b'ssh2',
4190 b'ssh2',
4191 ):
4191 ):
4192 raise error.Abort(
4192 raise error.Abort(
4193 _(b'invalid value for --peer'),
4193 _(b'invalid value for --peer'),
4194 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4194 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4195 )
4195 )
4196
4196
4197 if path and opts[b'localssh']:
4197 if path and opts[b'localssh']:
4198 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4198 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4199
4199
4200 if ui.interactive():
4200 if ui.interactive():
4201 ui.write(_(b'(waiting for commands on stdin)\n'))
4201 ui.write(_(b'(waiting for commands on stdin)\n'))
4202
4202
4203 blocks = list(_parsewirelangblocks(ui.fin))
4203 blocks = list(_parsewirelangblocks(ui.fin))
4204
4204
4205 proc = None
4205 proc = None
4206 stdin = None
4206 stdin = None
4207 stdout = None
4207 stdout = None
4208 stderr = None
4208 stderr = None
4209 opener = None
4209 opener = None
4210
4210
4211 if opts[b'localssh']:
4211 if opts[b'localssh']:
4212 # We start the SSH server in its own process so there is process
4212 # We start the SSH server in its own process so there is process
4213 # separation. This prevents a whole class of potential bugs around
4213 # separation. This prevents a whole class of potential bugs around
4214 # shared state from interfering with server operation.
4214 # shared state from interfering with server operation.
4215 args = procutil.hgcmd() + [
4215 args = procutil.hgcmd() + [
4216 b'-R',
4216 b'-R',
4217 repo.root,
4217 repo.root,
4218 b'debugserve',
4218 b'debugserve',
4219 b'--sshstdio',
4219 b'--sshstdio',
4220 ]
4220 ]
4221 proc = subprocess.Popen(
4221 proc = subprocess.Popen(
4222 pycompat.rapply(procutil.tonativestr, args),
4222 pycompat.rapply(procutil.tonativestr, args),
4223 stdin=subprocess.PIPE,
4223 stdin=subprocess.PIPE,
4224 stdout=subprocess.PIPE,
4224 stdout=subprocess.PIPE,
4225 stderr=subprocess.PIPE,
4225 stderr=subprocess.PIPE,
4226 bufsize=0,
4226 bufsize=0,
4227 )
4227 )
4228
4228
4229 stdin = proc.stdin
4229 stdin = proc.stdin
4230 stdout = proc.stdout
4230 stdout = proc.stdout
4231 stderr = proc.stderr
4231 stderr = proc.stderr
4232
4232
4233 # We turn the pipes into observers so we can log I/O.
4233 # We turn the pipes into observers so we can log I/O.
4234 if ui.verbose or opts[b'peer'] == b'raw':
4234 if ui.verbose or opts[b'peer'] == b'raw':
4235 stdin = util.makeloggingfileobject(
4235 stdin = util.makeloggingfileobject(
4236 ui, proc.stdin, b'i', logdata=True
4236 ui, proc.stdin, b'i', logdata=True
4237 )
4237 )
4238 stdout = util.makeloggingfileobject(
4238 stdout = util.makeloggingfileobject(
4239 ui, proc.stdout, b'o', logdata=True
4239 ui, proc.stdout, b'o', logdata=True
4240 )
4240 )
4241 stderr = util.makeloggingfileobject(
4241 stderr = util.makeloggingfileobject(
4242 ui, proc.stderr, b'e', logdata=True
4242 ui, proc.stderr, b'e', logdata=True
4243 )
4243 )
4244
4244
4245 # --localssh also implies the peer connection settings.
4245 # --localssh also implies the peer connection settings.
4246
4246
4247 url = b'ssh://localserver'
4247 url = b'ssh://localserver'
4248 autoreadstderr = not opts[b'noreadstderr']
4248 autoreadstderr = not opts[b'noreadstderr']
4249
4249
4250 if opts[b'peer'] == b'ssh1':
4250 if opts[b'peer'] == b'ssh1':
4251 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4251 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4252 peer = sshpeer.sshv1peer(
4252 peer = sshpeer.sshv1peer(
4253 ui,
4253 ui,
4254 url,
4254 url,
4255 proc,
4255 proc,
4256 stdin,
4256 stdin,
4257 stdout,
4257 stdout,
4258 stderr,
4258 stderr,
4259 None,
4259 None,
4260 autoreadstderr=autoreadstderr,
4260 autoreadstderr=autoreadstderr,
4261 )
4261 )
4262 elif opts[b'peer'] == b'ssh2':
4262 elif opts[b'peer'] == b'ssh2':
4263 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4263 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4264 peer = sshpeer.sshv2peer(
4264 peer = sshpeer.sshv2peer(
4265 ui,
4265 ui,
4266 url,
4266 url,
4267 proc,
4267 proc,
4268 stdin,
4268 stdin,
4269 stdout,
4269 stdout,
4270 stderr,
4270 stderr,
4271 None,
4271 None,
4272 autoreadstderr=autoreadstderr,
4272 autoreadstderr=autoreadstderr,
4273 )
4273 )
4274 elif opts[b'peer'] == b'raw':
4274 elif opts[b'peer'] == b'raw':
4275 ui.write(_(b'using raw connection to peer\n'))
4275 ui.write(_(b'using raw connection to peer\n'))
4276 peer = None
4276 peer = None
4277 else:
4277 else:
4278 ui.write(_(b'creating ssh peer from handshake results\n'))
4278 ui.write(_(b'creating ssh peer from handshake results\n'))
4279 peer = sshpeer.makepeer(
4279 peer = sshpeer.makepeer(
4280 ui,
4280 ui,
4281 url,
4281 url,
4282 proc,
4282 proc,
4283 stdin,
4283 stdin,
4284 stdout,
4284 stdout,
4285 stderr,
4285 stderr,
4286 autoreadstderr=autoreadstderr,
4286 autoreadstderr=autoreadstderr,
4287 )
4287 )
4288
4288
4289 elif path:
4289 elif path:
4290 # We bypass hg.peer() so we can proxy the sockets.
4290 # We bypass hg.peer() so we can proxy the sockets.
4291 # TODO consider not doing this because we skip
4291 # TODO consider not doing this because we skip
4292 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4292 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4293 u = util.url(path)
4293 u = util.url(path)
4294 if u.scheme != b'http':
4294 if u.scheme != b'http':
4295 raise error.Abort(_(b'only http:// paths are currently supported'))
4295 raise error.Abort(_(b'only http:// paths are currently supported'))
4296
4296
4297 url, authinfo = u.authinfo()
4297 url, authinfo = u.authinfo()
4298 openerargs = {
4298 openerargs = {
4299 'useragent': b'Mercurial debugwireproto',
4299 'useragent': b'Mercurial debugwireproto',
4300 }
4300 }
4301
4301
4302 # Turn pipes/sockets into observers so we can log I/O.
4302 # Turn pipes/sockets into observers so we can log I/O.
4303 if ui.verbose:
4303 if ui.verbose:
4304 openerargs.update(
4304 openerargs.update(
4305 {
4305 {
4306 'loggingfh': ui,
4306 'loggingfh': ui,
4307 'loggingname': b's',
4307 'loggingname': b's',
4308 'loggingopts': {'logdata': True, 'logdataapis': False,},
4308 'loggingopts': {'logdata': True, 'logdataapis': False,},
4309 }
4309 }
4310 )
4310 )
4311
4311
4312 if ui.debugflag:
4312 if ui.debugflag:
4313 openerargs['loggingopts']['logdataapis'] = True
4313 openerargs['loggingopts']['logdataapis'] = True
4314
4314
4315 # Don't send default headers when in raw mode. This allows us to
4315 # Don't send default headers when in raw mode. This allows us to
4316 # bypass most of the behavior of our URL handling code so we can
4316 # bypass most of the behavior of our URL handling code so we can
4317 # have near complete control over what's sent on the wire.
4317 # have near complete control over what's sent on the wire.
4318 if opts[b'peer'] == b'raw':
4318 if opts[b'peer'] == b'raw':
4319 openerargs['sendaccept'] = False
4319 openerargs['sendaccept'] = False
4320
4320
4321 opener = urlmod.opener(ui, authinfo, **openerargs)
4321 opener = urlmod.opener(ui, authinfo, **openerargs)
4322
4322
4323 if opts[b'peer'] == b'http2':
4323 if opts[b'peer'] == b'http2':
4324 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4324 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4325 # We go through makepeer() because we need an API descriptor for
4325 # We go through makepeer() because we need an API descriptor for
4326 # the peer instance to be useful.
4326 # the peer instance to be useful.
4327 with ui.configoverride(
4327 with ui.configoverride(
4328 {(b'experimental', b'httppeer.advertise-v2'): True}
4328 {(b'experimental', b'httppeer.advertise-v2'): True}
4329 ):
4329 ):
4330 if opts[b'nologhandshake']:
4330 if opts[b'nologhandshake']:
4331 ui.pushbuffer()
4331 ui.pushbuffer()
4332
4332
4333 peer = httppeer.makepeer(ui, path, opener=opener)
4333 peer = httppeer.makepeer(ui, path, opener=opener)
4334
4334
4335 if opts[b'nologhandshake']:
4335 if opts[b'nologhandshake']:
4336 ui.popbuffer()
4336 ui.popbuffer()
4337
4337
4338 if not isinstance(peer, httppeer.httpv2peer):
4338 if not isinstance(peer, httppeer.httpv2peer):
4339 raise error.Abort(
4339 raise error.Abort(
4340 _(
4340 _(
4341 b'could not instantiate HTTP peer for '
4341 b'could not instantiate HTTP peer for '
4342 b'wire protocol version 2'
4342 b'wire protocol version 2'
4343 ),
4343 ),
4344 hint=_(
4344 hint=_(
4345 b'the server may not have the feature '
4345 b'the server may not have the feature '
4346 b'enabled or is not allowing this '
4346 b'enabled or is not allowing this '
4347 b'client version'
4347 b'client version'
4348 ),
4348 ),
4349 )
4349 )
4350
4350
4351 elif opts[b'peer'] == b'raw':
4351 elif opts[b'peer'] == b'raw':
4352 ui.write(_(b'using raw connection to peer\n'))
4352 ui.write(_(b'using raw connection to peer\n'))
4353 peer = None
4353 peer = None
4354 elif opts[b'peer']:
4354 elif opts[b'peer']:
4355 raise error.Abort(
4355 raise error.Abort(
4356 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4356 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4357 )
4357 )
4358 else:
4358 else:
4359 peer = httppeer.makepeer(ui, path, opener=opener)
4359 peer = httppeer.makepeer(ui, path, opener=opener)
4360
4360
4361 # We /could/ populate stdin/stdout with sock.makefile()...
4361 # We /could/ populate stdin/stdout with sock.makefile()...
4362 else:
4362 else:
4363 raise error.Abort(_(b'unsupported connection configuration'))
4363 raise error.Abort(_(b'unsupported connection configuration'))
4364
4364
4365 batchedcommands = None
4365 batchedcommands = None
4366
4366
4367 # Now perform actions based on the parsed wire language instructions.
4367 # Now perform actions based on the parsed wire language instructions.
4368 for action, lines in blocks:
4368 for action, lines in blocks:
4369 if action in (b'raw', b'raw+'):
4369 if action in (b'raw', b'raw+'):
4370 if not stdin:
4370 if not stdin:
4371 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4371 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4372
4372
4373 # Concatenate the data together.
4373 # Concatenate the data together.
4374 data = b''.join(l.lstrip() for l in lines)
4374 data = b''.join(l.lstrip() for l in lines)
4375 data = stringutil.unescapestr(data)
4375 data = stringutil.unescapestr(data)
4376 stdin.write(data)
4376 stdin.write(data)
4377
4377
4378 if action == b'raw+':
4378 if action == b'raw+':
4379 stdin.flush()
4379 stdin.flush()
4380 elif action == b'flush':
4380 elif action == b'flush':
4381 if not stdin:
4381 if not stdin:
4382 raise error.Abort(_(b'cannot call flush on this peer'))
4382 raise error.Abort(_(b'cannot call flush on this peer'))
4383 stdin.flush()
4383 stdin.flush()
4384 elif action.startswith(b'command'):
4384 elif action.startswith(b'command'):
4385 if not peer:
4385 if not peer:
4386 raise error.Abort(
4386 raise error.Abort(
4387 _(
4387 _(
4388 b'cannot send commands unless peer instance '
4388 b'cannot send commands unless peer instance '
4389 b'is available'
4389 b'is available'
4390 )
4390 )
4391 )
4391 )
4392
4392
4393 command = action.split(b' ', 1)[1]
4393 command = action.split(b' ', 1)[1]
4394
4394
4395 args = {}
4395 args = {}
4396 for line in lines:
4396 for line in lines:
4397 # We need to allow empty values.
4397 # We need to allow empty values.
4398 fields = line.lstrip().split(b' ', 1)
4398 fields = line.lstrip().split(b' ', 1)
4399 if len(fields) == 1:
4399 if len(fields) == 1:
4400 key = fields[0]
4400 key = fields[0]
4401 value = b''
4401 value = b''
4402 else:
4402 else:
4403 key, value = fields
4403 key, value = fields
4404
4404
4405 if value.startswith(b'eval:'):
4405 if value.startswith(b'eval:'):
4406 value = stringutil.evalpythonliteral(value[5:])
4406 value = stringutil.evalpythonliteral(value[5:])
4407 else:
4407 else:
4408 value = stringutil.unescapestr(value)
4408 value = stringutil.unescapestr(value)
4409
4409
4410 args[key] = value
4410 args[key] = value
4411
4411
4412 if batchedcommands is not None:
4412 if batchedcommands is not None:
4413 batchedcommands.append((command, args))
4413 batchedcommands.append((command, args))
4414 continue
4414 continue
4415
4415
4416 ui.status(_(b'sending %s command\n') % command)
4416 ui.status(_(b'sending %s command\n') % command)
4417
4417
4418 if b'PUSHFILE' in args:
4418 if b'PUSHFILE' in args:
4419 with open(args[b'PUSHFILE'], 'rb') as fh:
4419 with open(args[b'PUSHFILE'], 'rb') as fh:
4420 del args[b'PUSHFILE']
4420 del args[b'PUSHFILE']
4421 res, output = peer._callpush(
4421 res, output = peer._callpush(
4422 command, fh, **pycompat.strkwargs(args)
4422 command, fh, **pycompat.strkwargs(args)
4423 )
4423 )
4424 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4424 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4425 ui.status(
4425 ui.status(
4426 _(b'remote output: %s\n') % stringutil.escapestr(output)
4426 _(b'remote output: %s\n') % stringutil.escapestr(output)
4427 )
4427 )
4428 else:
4428 else:
4429 with peer.commandexecutor() as e:
4429 with peer.commandexecutor() as e:
4430 res = e.callcommand(command, args).result()
4430 res = e.callcommand(command, args).result()
4431
4431
4432 if isinstance(res, wireprotov2peer.commandresponse):
4432 if isinstance(res, wireprotov2peer.commandresponse):
4433 val = res.objects()
4433 val = res.objects()
4434 ui.status(
4434 ui.status(
4435 _(b'response: %s\n')
4435 _(b'response: %s\n')
4436 % stringutil.pprint(val, bprefix=True, indent=2)
4436 % stringutil.pprint(val, bprefix=True, indent=2)
4437 )
4437 )
4438 else:
4438 else:
4439 ui.status(
4439 ui.status(
4440 _(b'response: %s\n')
4440 _(b'response: %s\n')
4441 % stringutil.pprint(res, bprefix=True, indent=2)
4441 % stringutil.pprint(res, bprefix=True, indent=2)
4442 )
4442 )
4443
4443
4444 elif action == b'batchbegin':
4444 elif action == b'batchbegin':
4445 if batchedcommands is not None:
4445 if batchedcommands is not None:
4446 raise error.Abort(_(b'nested batchbegin not allowed'))
4446 raise error.Abort(_(b'nested batchbegin not allowed'))
4447
4447
4448 batchedcommands = []
4448 batchedcommands = []
4449 elif action == b'batchsubmit':
4449 elif action == b'batchsubmit':
4450 # There is a batching API we could go through. But it would be
4450 # There is a batching API we could go through. But it would be
4451 # difficult to normalize requests into function calls. It is easier
4451 # difficult to normalize requests into function calls. It is easier
4452 # to bypass this layer and normalize to commands + args.
4452 # to bypass this layer and normalize to commands + args.
4453 ui.status(
4453 ui.status(
4454 _(b'sending batch with %d sub-commands\n')
4454 _(b'sending batch with %d sub-commands\n')
4455 % len(batchedcommands)
4455 % len(batchedcommands)
4456 )
4456 )
4457 assert peer is not None
4457 assert peer is not None
4458 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4458 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4459 ui.status(
4459 ui.status(
4460 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4460 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4461 )
4461 )
4462
4462
4463 batchedcommands = None
4463 batchedcommands = None
4464
4464
4465 elif action.startswith(b'httprequest '):
4465 elif action.startswith(b'httprequest '):
4466 if not opener:
4466 if not opener:
4467 raise error.Abort(
4467 raise error.Abort(
4468 _(b'cannot use httprequest without an HTTP peer')
4468 _(b'cannot use httprequest without an HTTP peer')
4469 )
4469 )
4470
4470
4471 request = action.split(b' ', 2)
4471 request = action.split(b' ', 2)
4472 if len(request) != 3:
4472 if len(request) != 3:
4473 raise error.Abort(
4473 raise error.Abort(
4474 _(
4474 _(
4475 b'invalid httprequest: expected format is '
4475 b'invalid httprequest: expected format is '
4476 b'"httprequest <method> <path>'
4476 b'"httprequest <method> <path>'
4477 )
4477 )
4478 )
4478 )
4479
4479
4480 method, httppath = request[1:]
4480 method, httppath = request[1:]
4481 headers = {}
4481 headers = {}
4482 body = None
4482 body = None
4483 frames = []
4483 frames = []
4484 for line in lines:
4484 for line in lines:
4485 line = line.lstrip()
4485 line = line.lstrip()
4486 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4486 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4487 if m:
4487 if m:
4488 # Headers need to use native strings.
4488 # Headers need to use native strings.
4489 key = pycompat.strurl(m.group(1))
4489 key = pycompat.strurl(m.group(1))
4490 value = pycompat.strurl(m.group(2))
4490 value = pycompat.strurl(m.group(2))
4491 headers[key] = value
4491 headers[key] = value
4492 continue
4492 continue
4493
4493
4494 if line.startswith(b'BODYFILE '):
4494 if line.startswith(b'BODYFILE '):
4495 with open(line.split(b' ', 1), b'rb') as fh:
4495 with open(line.split(b' ', 1), b'rb') as fh:
4496 body = fh.read()
4496 body = fh.read()
4497 elif line.startswith(b'frame '):
4497 elif line.startswith(b'frame '):
4498 frame = wireprotoframing.makeframefromhumanstring(
4498 frame = wireprotoframing.makeframefromhumanstring(
4499 line[len(b'frame ') :]
4499 line[len(b'frame ') :]
4500 )
4500 )
4501
4501
4502 frames.append(frame)
4502 frames.append(frame)
4503 else:
4503 else:
4504 raise error.Abort(
4504 raise error.Abort(
4505 _(b'unknown argument to httprequest: %s') % line
4505 _(b'unknown argument to httprequest: %s') % line
4506 )
4506 )
4507
4507
4508 url = path + httppath
4508 url = path + httppath
4509
4509
4510 if frames:
4510 if frames:
4511 body = b''.join(bytes(f) for f in frames)
4511 body = b''.join(bytes(f) for f in frames)
4512
4512
4513 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4513 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4514
4514
4515 # urllib.Request insists on using has_data() as a proxy for
4515 # urllib.Request insists on using has_data() as a proxy for
4516 # determining the request method. Override that to use our
4516 # determining the request method. Override that to use our
4517 # explicitly requested method.
4517 # explicitly requested method.
4518 req.get_method = lambda: pycompat.sysstr(method)
4518 req.get_method = lambda: pycompat.sysstr(method)
4519
4519
4520 try:
4520 try:
4521 res = opener.open(req)
4521 res = opener.open(req)
4522 body = res.read()
4522 body = res.read()
4523 except util.urlerr.urlerror as e:
4523 except util.urlerr.urlerror as e:
4524 # read() method must be called, but only exists in Python 2
4524 # read() method must be called, but only exists in Python 2
4525 getattr(e, 'read', lambda: None)()
4525 getattr(e, 'read', lambda: None)()
4526 continue
4526 continue
4527
4527
4528 ct = res.headers.get('Content-Type')
4528 ct = res.headers.get('Content-Type')
4529 if ct == 'application/mercurial-cbor':
4529 if ct == 'application/mercurial-cbor':
4530 ui.write(
4530 ui.write(
4531 _(b'cbor> %s\n')
4531 _(b'cbor> %s\n')
4532 % stringutil.pprint(
4532 % stringutil.pprint(
4533 cborutil.decodeall(body), bprefix=True, indent=2
4533 cborutil.decodeall(body), bprefix=True, indent=2
4534 )
4534 )
4535 )
4535 )
4536
4536
4537 elif action == b'close':
4537 elif action == b'close':
4538 assert peer is not None
4538 assert peer is not None
4539 peer.close()
4539 peer.close()
4540 elif action == b'readavailable':
4540 elif action == b'readavailable':
4541 if not stdout or not stderr:
4541 if not stdout or not stderr:
4542 raise error.Abort(
4542 raise error.Abort(
4543 _(b'readavailable not available on this peer')
4543 _(b'readavailable not available on this peer')
4544 )
4544 )
4545
4545
4546 stdin.close()
4546 stdin.close()
4547 stdout.read()
4547 stdout.read()
4548 stderr.read()
4548 stderr.read()
4549
4549
4550 elif action == b'readline':
4550 elif action == b'readline':
4551 if not stdout:
4551 if not stdout:
4552 raise error.Abort(_(b'readline not available on this peer'))
4552 raise error.Abort(_(b'readline not available on this peer'))
4553 stdout.readline()
4553 stdout.readline()
4554 elif action == b'ereadline':
4554 elif action == b'ereadline':
4555 if not stderr:
4555 if not stderr:
4556 raise error.Abort(_(b'ereadline not available on this peer'))
4556 raise error.Abort(_(b'ereadline not available on this peer'))
4557 stderr.readline()
4557 stderr.readline()
4558 elif action.startswith(b'read '):
4558 elif action.startswith(b'read '):
4559 count = int(action.split(b' ', 1)[1])
4559 count = int(action.split(b' ', 1)[1])
4560 if not stdout:
4560 if not stdout:
4561 raise error.Abort(_(b'read not available on this peer'))
4561 raise error.Abort(_(b'read not available on this peer'))
4562 stdout.read(count)
4562 stdout.read(count)
4563 elif action.startswith(b'eread '):
4563 elif action.startswith(b'eread '):
4564 count = int(action.split(b' ', 1)[1])
4564 count = int(action.split(b' ', 1)[1])
4565 if not stderr:
4565 if not stderr:
4566 raise error.Abort(_(b'eread not available on this peer'))
4566 raise error.Abort(_(b'eread not available on this peer'))
4567 stderr.read(count)
4567 stderr.read(count)
4568 else:
4568 else:
4569 raise error.Abort(_(b'unknown action: %s') % action)
4569 raise error.Abort(_(b'unknown action: %s') % action)
4570
4570
4571 if batchedcommands is not None:
4571 if batchedcommands is not None:
4572 raise error.Abort(_(b'unclosed "batchbegin" request'))
4572 raise error.Abort(_(b'unclosed "batchbegin" request'))
4573
4573
4574 if peer:
4574 if peer:
4575 peer.close()
4575 peer.close()
4576
4576
4577 if proc:
4577 if proc:
4578 proc.kill()
4578 proc.kill()
@@ -1,1337 +1,1337
1 # dispatch.py - command dispatching for mercurial
1 # dispatch.py - command dispatching for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import getopt
12 import getopt
13 import io
13 import io
14 import os
14 import os
15 import pdb
15 import pdb
16 import re
16 import re
17 import signal
17 import signal
18 import sys
18 import sys
19 import traceback
19 import traceback
20
20
21
21
22 from .i18n import _
22 from .i18n import _
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from hgdemandimport import tracing
25 from hgdemandimport import tracing
26
26
27 from . import (
27 from . import (
28 cmdutil,
28 cmdutil,
29 color,
29 color,
30 commands,
30 commands,
31 demandimport,
31 demandimport,
32 encoding,
32 encoding,
33 error,
33 error,
34 extensions,
34 extensions,
35 fancyopts,
35 fancyopts,
36 help,
36 help,
37 hg,
37 hg,
38 hook,
38 hook,
39 profiling,
39 profiling,
40 pycompat,
40 pycompat,
41 rcutil,
41 rcutil,
42 registrar,
42 registrar,
43 scmutil,
43 scmutil,
44 ui as uimod,
44 ui as uimod,
45 util,
45 util,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 procutil,
49 procutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53
53
54 class request(object):
54 class request(object):
55 def __init__(
55 def __init__(
56 self,
56 self,
57 args,
57 args,
58 ui=None,
58 ui=None,
59 repo=None,
59 repo=None,
60 fin=None,
60 fin=None,
61 fout=None,
61 fout=None,
62 ferr=None,
62 ferr=None,
63 fmsg=None,
63 fmsg=None,
64 prereposetups=None,
64 prereposetups=None,
65 ):
65 ):
66 self.args = args
66 self.args = args
67 self.ui = ui
67 self.ui = ui
68 self.repo = repo
68 self.repo = repo
69
69
70 # input/output/error streams
70 # input/output/error streams
71 self.fin = fin
71 self.fin = fin
72 self.fout = fout
72 self.fout = fout
73 self.ferr = ferr
73 self.ferr = ferr
74 # separate stream for status/error messages
74 # separate stream for status/error messages
75 self.fmsg = fmsg
75 self.fmsg = fmsg
76
76
77 # remember options pre-parsed by _earlyparseopts()
77 # remember options pre-parsed by _earlyparseopts()
78 self.earlyoptions = {}
78 self.earlyoptions = {}
79
79
80 # reposetups which run before extensions, useful for chg to pre-fill
80 # reposetups which run before extensions, useful for chg to pre-fill
81 # low-level repo state (for example, changelog) before extensions.
81 # low-level repo state (for example, changelog) before extensions.
82 self.prereposetups = prereposetups or []
82 self.prereposetups = prereposetups or []
83
83
84 # store the parsed and canonical command
84 # store the parsed and canonical command
85 self.canonical_command = None
85 self.canonical_command = None
86
86
87 def _runexithandlers(self):
87 def _runexithandlers(self):
88 exc = None
88 exc = None
89 handlers = self.ui._exithandlers
89 handlers = self.ui._exithandlers
90 try:
90 try:
91 while handlers:
91 while handlers:
92 func, args, kwargs = handlers.pop()
92 func, args, kwargs = handlers.pop()
93 try:
93 try:
94 func(*args, **kwargs)
94 func(*args, **kwargs)
95 except: # re-raises below
95 except: # re-raises below
96 if exc is None:
96 if exc is None:
97 exc = sys.exc_info()[1]
97 exc = sys.exc_info()[1]
98 self.ui.warnnoi18n(b'error in exit handlers:\n')
98 self.ui.warnnoi18n(b'error in exit handlers:\n')
99 self.ui.traceback(force=True)
99 self.ui.traceback(force=True)
100 finally:
100 finally:
101 if exc is not None:
101 if exc is not None:
102 raise exc
102 raise exc
103
103
104
104
105 def run():
105 def run():
106 """run the command in sys.argv"""
106 """run the command in sys.argv"""
107 try:
107 try:
108 initstdio()
108 initstdio()
109 with tracing.log('parse args into request'):
109 with tracing.log('parse args into request'):
110 req = request(pycompat.sysargv[1:])
110 req = request(pycompat.sysargv[1:])
111 err = None
111 err = None
112 try:
112 try:
113 status = dispatch(req)
113 status = dispatch(req)
114 except error.StdioError as e:
114 except error.StdioError as e:
115 err = e
115 err = e
116 status = -1
116 status = -1
117
117
118 # In all cases we try to flush stdio streams.
118 # In all cases we try to flush stdio streams.
119 if util.safehasattr(req.ui, b'fout'):
119 if util.safehasattr(req.ui, b'fout'):
120 assert req.ui is not None # help pytype
120 assert req.ui is not None # help pytype
121 assert req.ui.fout is not None # help pytype
121 assert req.ui.fout is not None # help pytype
122 try:
122 try:
123 req.ui.fout.flush()
123 req.ui.fout.flush()
124 except IOError as e:
124 except IOError as e:
125 err = e
125 err = e
126 status = -1
126 status = -1
127
127
128 if util.safehasattr(req.ui, b'ferr'):
128 if util.safehasattr(req.ui, b'ferr'):
129 assert req.ui is not None # help pytype
129 assert req.ui is not None # help pytype
130 assert req.ui.ferr is not None # help pytype
130 assert req.ui.ferr is not None # help pytype
131 try:
131 try:
132 if err is not None and err.errno != errno.EPIPE:
132 if err is not None and err.errno != errno.EPIPE:
133 req.ui.ferr.write(
133 req.ui.ferr.write(
134 b'abort: %s\n' % encoding.strtolocal(err.strerror)
134 b'abort: %s\n' % encoding.strtolocal(err.strerror)
135 )
135 )
136 req.ui.ferr.flush()
136 req.ui.ferr.flush()
137 # There's not much we can do about an I/O error here. So (possibly)
137 # There's not much we can do about an I/O error here. So (possibly)
138 # change the status code and move on.
138 # change the status code and move on.
139 except IOError:
139 except IOError:
140 status = -1
140 status = -1
141
141
142 _silencestdio()
142 _silencestdio()
143 except KeyboardInterrupt:
143 except KeyboardInterrupt:
144 # Catch early/late KeyboardInterrupt as last ditch. Here nothing will
144 # Catch early/late KeyboardInterrupt as last ditch. Here nothing will
145 # be printed to console to avoid another IOError/KeyboardInterrupt.
145 # be printed to console to avoid another IOError/KeyboardInterrupt.
146 status = -1
146 status = -1
147 sys.exit(status & 255)
147 sys.exit(status & 255)
148
148
149
149
150 if pycompat.ispy3:
150 if pycompat.ispy3:
151
151
152 def initstdio():
152 def initstdio():
153 # stdio streams on Python 3 are io.TextIOWrapper instances proxying another
153 # stdio streams on Python 3 are io.TextIOWrapper instances proxying another
154 # buffer. These streams will normalize \n to \r\n by default. Mercurial's
154 # buffer. These streams will normalize \n to \r\n by default. Mercurial's
155 # preferred mechanism for writing output (ui.write()) uses io.BufferedWriter
155 # preferred mechanism for writing output (ui.write()) uses io.BufferedWriter
156 # instances, which write to the underlying stdio file descriptor in binary
156 # instances, which write to the underlying stdio file descriptor in binary
157 # mode. ui.write() uses \n for line endings and no line ending normalization
157 # mode. ui.write() uses \n for line endings and no line ending normalization
158 # is attempted through this interface. This "just works," even if the system
158 # is attempted through this interface. This "just works," even if the system
159 # preferred line ending is not \n.
159 # preferred line ending is not \n.
160 #
160 #
161 # But some parts of Mercurial (e.g. hooks) can still send data to sys.stdout
161 # But some parts of Mercurial (e.g. hooks) can still send data to sys.stdout
162 # and sys.stderr. They will inherit the line ending normalization settings,
162 # and sys.stderr. They will inherit the line ending normalization settings,
163 # potentially causing e.g. \r\n to be emitted. Since emitting \n should
163 # potentially causing e.g. \r\n to be emitted. Since emitting \n should
164 # "just work," here we change the sys.* streams to disable line ending
164 # "just work," here we change the sys.* streams to disable line ending
165 # normalization, ensuring compatibility with our ui type.
165 # normalization, ensuring compatibility with our ui type.
166
166
167 # write_through is new in Python 3.7.
167 # write_through is new in Python 3.7.
168 kwargs = {
168 kwargs = {
169 "newline": "\n",
169 "newline": "\n",
170 "line_buffering": sys.stdout.line_buffering,
170 "line_buffering": sys.stdout.line_buffering,
171 }
171 }
172 if util.safehasattr(sys.stdout, "write_through"):
172 if util.safehasattr(sys.stdout, "write_through"):
173 kwargs["write_through"] = sys.stdout.write_through
173 kwargs["write_through"] = sys.stdout.write_through
174 sys.stdout = io.TextIOWrapper(
174 sys.stdout = io.TextIOWrapper(
175 sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors, **kwargs
175 sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors, **kwargs
176 )
176 )
177
177
178 kwargs = {
178 kwargs = {
179 "newline": "\n",
179 "newline": "\n",
180 "line_buffering": sys.stderr.line_buffering,
180 "line_buffering": sys.stderr.line_buffering,
181 }
181 }
182 if util.safehasattr(sys.stderr, "write_through"):
182 if util.safehasattr(sys.stderr, "write_through"):
183 kwargs["write_through"] = sys.stderr.write_through
183 kwargs["write_through"] = sys.stderr.write_through
184 sys.stderr = io.TextIOWrapper(
184 sys.stderr = io.TextIOWrapper(
185 sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors, **kwargs
185 sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors, **kwargs
186 )
186 )
187
187
188 # No write_through on read-only stream.
188 # No write_through on read-only stream.
189 sys.stdin = io.TextIOWrapper(
189 sys.stdin = io.TextIOWrapper(
190 sys.stdin.buffer,
190 sys.stdin.buffer,
191 sys.stdin.encoding,
191 sys.stdin.encoding,
192 sys.stdin.errors,
192 sys.stdin.errors,
193 # None is universal newlines mode.
193 # None is universal newlines mode.
194 newline=None,
194 newline=None,
195 line_buffering=sys.stdin.line_buffering,
195 line_buffering=sys.stdin.line_buffering,
196 )
196 )
197
197
198 def _silencestdio():
198 def _silencestdio():
199 for fp in (sys.stdout, sys.stderr):
199 for fp in (sys.stdout, sys.stderr):
200 # Check if the file is okay
200 # Check if the file is okay
201 try:
201 try:
202 fp.flush()
202 fp.flush()
203 continue
203 continue
204 except IOError:
204 except IOError:
205 pass
205 pass
206 # Otherwise mark it as closed to silence "Exception ignored in"
206 # Otherwise mark it as closed to silence "Exception ignored in"
207 # message emitted by the interpreter finalizer. Be careful to
207 # message emitted by the interpreter finalizer. Be careful to
208 # not close procutil.stdout, which may be a fdopen-ed file object
208 # not close procutil.stdout, which may be a fdopen-ed file object
209 # and its close() actually closes the underlying file descriptor.
209 # and its close() actually closes the underlying file descriptor.
210 try:
210 try:
211 fp.close()
211 fp.close()
212 except IOError:
212 except IOError:
213 pass
213 pass
214
214
215
215
216 else:
216 else:
217
217
218 def initstdio():
218 def initstdio():
219 for fp in (sys.stdin, sys.stdout, sys.stderr):
219 for fp in (sys.stdin, sys.stdout, sys.stderr):
220 procutil.setbinary(fp)
220 procutil.setbinary(fp)
221
221
222 def _silencestdio():
222 def _silencestdio():
223 pass
223 pass
224
224
225
225
226 def _getsimilar(symbols, value):
226 def _getsimilar(symbols, value):
227 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
227 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
228 # The cutoff for similarity here is pretty arbitrary. It should
228 # The cutoff for similarity here is pretty arbitrary. It should
229 # probably be investigated and tweaked.
229 # probably be investigated and tweaked.
230 return [s for s in symbols if sim(s) > 0.6]
230 return [s for s in symbols if sim(s) > 0.6]
231
231
232
232
233 def _reportsimilar(write, similar):
233 def _reportsimilar(write, similar):
234 if len(similar) == 1:
234 if len(similar) == 1:
235 write(_(b"(did you mean %s?)\n") % similar[0])
235 write(_(b"(did you mean %s?)\n") % similar[0])
236 elif similar:
236 elif similar:
237 ss = b", ".join(sorted(similar))
237 ss = b", ".join(sorted(similar))
238 write(_(b"(did you mean one of %s?)\n") % ss)
238 write(_(b"(did you mean one of %s?)\n") % ss)
239
239
240
240
241 def _formatparse(write, inst):
241 def _formatparse(write, inst):
242 similar = []
242 similar = []
243 if isinstance(inst, error.UnknownIdentifier):
243 if isinstance(inst, error.UnknownIdentifier):
244 # make sure to check fileset first, as revset can invoke fileset
244 # make sure to check fileset first, as revset can invoke fileset
245 similar = _getsimilar(inst.symbols, inst.function)
245 similar = _getsimilar(inst.symbols, inst.function)
246 if len(inst.args) > 1:
246 if len(inst.args) > 1:
247 write(
247 write(
248 _(b"hg: parse error at %s: %s\n")
248 _(b"hg: parse error at %s: %s\n")
249 % (pycompat.bytestr(inst.args[1]), inst.args[0])
249 % (pycompat.bytestr(inst.args[1]), inst.args[0])
250 )
250 )
251 if inst.args[0].startswith(b' '):
251 if inst.args[0].startswith(b' '):
252 write(_(b"unexpected leading whitespace\n"))
252 write(_(b"unexpected leading whitespace\n"))
253 else:
253 else:
254 write(_(b"hg: parse error: %s\n") % inst.args[0])
254 write(_(b"hg: parse error: %s\n") % inst.args[0])
255 _reportsimilar(write, similar)
255 _reportsimilar(write, similar)
256 if inst.hint:
256 if inst.hint:
257 write(_(b"(%s)\n") % inst.hint)
257 write(_(b"(%s)\n") % inst.hint)
258
258
259
259
260 def _formatargs(args):
260 def _formatargs(args):
261 return b' '.join(procutil.shellquote(a) for a in args)
261 return b' '.join(procutil.shellquote(a) for a in args)
262
262
263
263
264 def dispatch(req):
264 def dispatch(req):
265 """run the command specified in req.args; returns an integer status code"""
265 """run the command specified in req.args; returns an integer status code"""
266 with tracing.log('dispatch.dispatch'):
266 with tracing.log('dispatch.dispatch'):
267 if req.ferr:
267 if req.ferr:
268 ferr = req.ferr
268 ferr = req.ferr
269 elif req.ui:
269 elif req.ui:
270 ferr = req.ui.ferr
270 ferr = req.ui.ferr
271 else:
271 else:
272 ferr = procutil.stderr
272 ferr = procutil.stderr
273
273
274 try:
274 try:
275 if not req.ui:
275 if not req.ui:
276 req.ui = uimod.ui.load()
276 req.ui = uimod.ui.load()
277 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
277 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
278 if req.earlyoptions[b'traceback']:
278 if req.earlyoptions[b'traceback']:
279 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
279 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
280
280
281 # set ui streams from the request
281 # set ui streams from the request
282 if req.fin:
282 if req.fin:
283 req.ui.fin = req.fin
283 req.ui.fin = req.fin
284 if req.fout:
284 if req.fout:
285 req.ui.fout = req.fout
285 req.ui.fout = req.fout
286 if req.ferr:
286 if req.ferr:
287 req.ui.ferr = req.ferr
287 req.ui.ferr = req.ferr
288 if req.fmsg:
288 if req.fmsg:
289 req.ui.fmsg = req.fmsg
289 req.ui.fmsg = req.fmsg
290 except error.Abort as inst:
290 except error.Abort as inst:
291 ferr.write(_(b"abort: %s\n") % inst)
291 ferr.write(_(b"abort: %s\n") % inst.message)
292 if inst.hint:
292 if inst.hint:
293 ferr.write(_(b"(%s)\n") % inst.hint)
293 ferr.write(_(b"(%s)\n") % inst.hint)
294 return -1
294 return -1
295 except error.ParseError as inst:
295 except error.ParseError as inst:
296 _formatparse(ferr.write, inst)
296 _formatparse(ferr.write, inst)
297 return -1
297 return -1
298
298
299 msg = _formatargs(req.args)
299 msg = _formatargs(req.args)
300 starttime = util.timer()
300 starttime = util.timer()
301 ret = 1 # default of Python exit code on unhandled exception
301 ret = 1 # default of Python exit code on unhandled exception
302 try:
302 try:
303 ret = _runcatch(req) or 0
303 ret = _runcatch(req) or 0
304 except error.ProgrammingError as inst:
304 except error.ProgrammingError as inst:
305 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
305 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
306 if inst.hint:
306 if inst.hint:
307 req.ui.error(_(b'** (%s)\n') % inst.hint)
307 req.ui.error(_(b'** (%s)\n') % inst.hint)
308 raise
308 raise
309 except KeyboardInterrupt as inst:
309 except KeyboardInterrupt as inst:
310 try:
310 try:
311 if isinstance(inst, error.SignalInterrupt):
311 if isinstance(inst, error.SignalInterrupt):
312 msg = _(b"killed!\n")
312 msg = _(b"killed!\n")
313 else:
313 else:
314 msg = _(b"interrupted!\n")
314 msg = _(b"interrupted!\n")
315 req.ui.error(msg)
315 req.ui.error(msg)
316 except error.SignalInterrupt:
316 except error.SignalInterrupt:
317 # maybe pager would quit without consuming all the output, and
317 # maybe pager would quit without consuming all the output, and
318 # SIGPIPE was raised. we cannot print anything in this case.
318 # SIGPIPE was raised. we cannot print anything in this case.
319 pass
319 pass
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.EPIPE:
321 if inst.errno != errno.EPIPE:
322 raise
322 raise
323 ret = -1
323 ret = -1
324 finally:
324 finally:
325 duration = util.timer() - starttime
325 duration = util.timer() - starttime
326 req.ui.flush() # record blocked times
326 req.ui.flush() # record blocked times
327 if req.ui.logblockedtimes:
327 if req.ui.logblockedtimes:
328 req.ui._blockedtimes[b'command_duration'] = duration * 1000
328 req.ui._blockedtimes[b'command_duration'] = duration * 1000
329 req.ui.log(
329 req.ui.log(
330 b'uiblocked',
330 b'uiblocked',
331 b'ui blocked ms\n',
331 b'ui blocked ms\n',
332 **pycompat.strkwargs(req.ui._blockedtimes)
332 **pycompat.strkwargs(req.ui._blockedtimes)
333 )
333 )
334 return_code = ret & 255
334 return_code = ret & 255
335 req.ui.log(
335 req.ui.log(
336 b"commandfinish",
336 b"commandfinish",
337 b"%s exited %d after %0.2f seconds\n",
337 b"%s exited %d after %0.2f seconds\n",
338 msg,
338 msg,
339 return_code,
339 return_code,
340 duration,
340 duration,
341 return_code=return_code,
341 return_code=return_code,
342 duration=duration,
342 duration=duration,
343 canonical_command=req.canonical_command,
343 canonical_command=req.canonical_command,
344 )
344 )
345 try:
345 try:
346 req._runexithandlers()
346 req._runexithandlers()
347 except: # exiting, so no re-raises
347 except: # exiting, so no re-raises
348 ret = ret or -1
348 ret = ret or -1
349 # do flush again since ui.log() and exit handlers may write to ui
349 # do flush again since ui.log() and exit handlers may write to ui
350 req.ui.flush()
350 req.ui.flush()
351 return ret
351 return ret
352
352
353
353
354 def _runcatch(req):
354 def _runcatch(req):
355 with tracing.log('dispatch._runcatch'):
355 with tracing.log('dispatch._runcatch'):
356
356
357 def catchterm(*args):
357 def catchterm(*args):
358 raise error.SignalInterrupt
358 raise error.SignalInterrupt
359
359
360 ui = req.ui
360 ui = req.ui
361 try:
361 try:
362 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
362 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
363 num = getattr(signal, name, None)
363 num = getattr(signal, name, None)
364 if num:
364 if num:
365 signal.signal(num, catchterm)
365 signal.signal(num, catchterm)
366 except ValueError:
366 except ValueError:
367 pass # happens if called in a thread
367 pass # happens if called in a thread
368
368
369 def _runcatchfunc():
369 def _runcatchfunc():
370 realcmd = None
370 realcmd = None
371 try:
371 try:
372 cmdargs = fancyopts.fancyopts(
372 cmdargs = fancyopts.fancyopts(
373 req.args[:], commands.globalopts, {}
373 req.args[:], commands.globalopts, {}
374 )
374 )
375 cmd = cmdargs[0]
375 cmd = cmdargs[0]
376 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
376 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
377 realcmd = aliases[0]
377 realcmd = aliases[0]
378 except (
378 except (
379 error.UnknownCommand,
379 error.UnknownCommand,
380 error.AmbiguousCommand,
380 error.AmbiguousCommand,
381 IndexError,
381 IndexError,
382 getopt.GetoptError,
382 getopt.GetoptError,
383 ):
383 ):
384 # Don't handle this here. We know the command is
384 # Don't handle this here. We know the command is
385 # invalid, but all we're worried about for now is that
385 # invalid, but all we're worried about for now is that
386 # it's not a command that server operators expect to
386 # it's not a command that server operators expect to
387 # be safe to offer to users in a sandbox.
387 # be safe to offer to users in a sandbox.
388 pass
388 pass
389 if realcmd == b'serve' and b'--stdio' in cmdargs:
389 if realcmd == b'serve' and b'--stdio' in cmdargs:
390 # We want to constrain 'hg serve --stdio' instances pretty
390 # We want to constrain 'hg serve --stdio' instances pretty
391 # closely, as many shared-ssh access tools want to grant
391 # closely, as many shared-ssh access tools want to grant
392 # access to run *only* 'hg -R $repo serve --stdio'. We
392 # access to run *only* 'hg -R $repo serve --stdio'. We
393 # restrict to exactly that set of arguments, and prohibit
393 # restrict to exactly that set of arguments, and prohibit
394 # any repo name that starts with '--' to prevent
394 # any repo name that starts with '--' to prevent
395 # shenanigans wherein a user does something like pass
395 # shenanigans wherein a user does something like pass
396 # --debugger or --config=ui.debugger=1 as a repo
396 # --debugger or --config=ui.debugger=1 as a repo
397 # name. This used to actually run the debugger.
397 # name. This used to actually run the debugger.
398 if (
398 if (
399 len(req.args) != 4
399 len(req.args) != 4
400 or req.args[0] != b'-R'
400 or req.args[0] != b'-R'
401 or req.args[1].startswith(b'--')
401 or req.args[1].startswith(b'--')
402 or req.args[2] != b'serve'
402 or req.args[2] != b'serve'
403 or req.args[3] != b'--stdio'
403 or req.args[3] != b'--stdio'
404 ):
404 ):
405 raise error.Abort(
405 raise error.Abort(
406 _(b'potentially unsafe serve --stdio invocation: %s')
406 _(b'potentially unsafe serve --stdio invocation: %s')
407 % (stringutil.pprint(req.args),)
407 % (stringutil.pprint(req.args),)
408 )
408 )
409
409
410 try:
410 try:
411 debugger = b'pdb'
411 debugger = b'pdb'
412 debugtrace = {b'pdb': pdb.set_trace}
412 debugtrace = {b'pdb': pdb.set_trace}
413 debugmortem = {b'pdb': pdb.post_mortem}
413 debugmortem = {b'pdb': pdb.post_mortem}
414
414
415 # read --config before doing anything else
415 # read --config before doing anything else
416 # (e.g. to change trust settings for reading .hg/hgrc)
416 # (e.g. to change trust settings for reading .hg/hgrc)
417 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
417 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
418
418
419 if req.repo:
419 if req.repo:
420 # copy configs that were passed on the cmdline (--config) to
420 # copy configs that were passed on the cmdline (--config) to
421 # the repo ui
421 # the repo ui
422 for sec, name, val in cfgs:
422 for sec, name, val in cfgs:
423 req.repo.ui.setconfig(
423 req.repo.ui.setconfig(
424 sec, name, val, source=b'--config'
424 sec, name, val, source=b'--config'
425 )
425 )
426
426
427 # developer config: ui.debugger
427 # developer config: ui.debugger
428 debugger = ui.config(b"ui", b"debugger")
428 debugger = ui.config(b"ui", b"debugger")
429 debugmod = pdb
429 debugmod = pdb
430 if not debugger or ui.plain():
430 if not debugger or ui.plain():
431 # if we are in HGPLAIN mode, then disable custom debugging
431 # if we are in HGPLAIN mode, then disable custom debugging
432 debugger = b'pdb'
432 debugger = b'pdb'
433 elif req.earlyoptions[b'debugger']:
433 elif req.earlyoptions[b'debugger']:
434 # This import can be slow for fancy debuggers, so only
434 # This import can be slow for fancy debuggers, so only
435 # do it when absolutely necessary, i.e. when actual
435 # do it when absolutely necessary, i.e. when actual
436 # debugging has been requested
436 # debugging has been requested
437 with demandimport.deactivated():
437 with demandimport.deactivated():
438 try:
438 try:
439 debugmod = __import__(debugger)
439 debugmod = __import__(debugger)
440 except ImportError:
440 except ImportError:
441 pass # Leave debugmod = pdb
441 pass # Leave debugmod = pdb
442
442
443 debugtrace[debugger] = debugmod.set_trace
443 debugtrace[debugger] = debugmod.set_trace
444 debugmortem[debugger] = debugmod.post_mortem
444 debugmortem[debugger] = debugmod.post_mortem
445
445
446 # enter the debugger before command execution
446 # enter the debugger before command execution
447 if req.earlyoptions[b'debugger']:
447 if req.earlyoptions[b'debugger']:
448 ui.warn(
448 ui.warn(
449 _(
449 _(
450 b"entering debugger - "
450 b"entering debugger - "
451 b"type c to continue starting hg or h for help\n"
451 b"type c to continue starting hg or h for help\n"
452 )
452 )
453 )
453 )
454
454
455 if (
455 if (
456 debugger != b'pdb'
456 debugger != b'pdb'
457 and debugtrace[debugger] == debugtrace[b'pdb']
457 and debugtrace[debugger] == debugtrace[b'pdb']
458 ):
458 ):
459 ui.warn(
459 ui.warn(
460 _(
460 _(
461 b"%s debugger specified "
461 b"%s debugger specified "
462 b"but its module was not found\n"
462 b"but its module was not found\n"
463 )
463 )
464 % debugger
464 % debugger
465 )
465 )
466 with demandimport.deactivated():
466 with demandimport.deactivated():
467 debugtrace[debugger]()
467 debugtrace[debugger]()
468 try:
468 try:
469 return _dispatch(req)
469 return _dispatch(req)
470 finally:
470 finally:
471 ui.flush()
471 ui.flush()
472 except: # re-raises
472 except: # re-raises
473 # enter the debugger when we hit an exception
473 # enter the debugger when we hit an exception
474 if req.earlyoptions[b'debugger']:
474 if req.earlyoptions[b'debugger']:
475 traceback.print_exc()
475 traceback.print_exc()
476 debugmortem[debugger](sys.exc_info()[2])
476 debugmortem[debugger](sys.exc_info()[2])
477 raise
477 raise
478
478
479 return _callcatch(ui, _runcatchfunc)
479 return _callcatch(ui, _runcatchfunc)
480
480
481
481
482 def _callcatch(ui, func):
482 def _callcatch(ui, func):
483 """like scmutil.callcatch but handles more high-level exceptions about
483 """like scmutil.callcatch but handles more high-level exceptions about
484 config parsing and commands. besides, use handlecommandexception to handle
484 config parsing and commands. besides, use handlecommandexception to handle
485 uncaught exceptions.
485 uncaught exceptions.
486 """
486 """
487 try:
487 try:
488 return scmutil.callcatch(ui, func)
488 return scmutil.callcatch(ui, func)
489 except error.AmbiguousCommand as inst:
489 except error.AmbiguousCommand as inst:
490 ui.warn(
490 ui.warn(
491 _(b"hg: command '%s' is ambiguous:\n %s\n")
491 _(b"hg: command '%s' is ambiguous:\n %s\n")
492 % (inst.prefix, b" ".join(inst.matches))
492 % (inst.prefix, b" ".join(inst.matches))
493 )
493 )
494 except error.CommandError as inst:
494 except error.CommandError as inst:
495 if inst.command:
495 if inst.command:
496 ui.pager(b'help')
496 ui.pager(b'help')
497 msgbytes = pycompat.bytestr(inst.message)
497 msgbytes = pycompat.bytestr(inst.message)
498 ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
498 ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
499 commands.help_(ui, inst.command, full=False, command=True)
499 commands.help_(ui, inst.command, full=False, command=True)
500 else:
500 else:
501 ui.warn(_(b"hg: %s\n") % inst.message)
501 ui.warn(_(b"hg: %s\n") % inst.message)
502 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
502 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
503 except error.ParseError as inst:
503 except error.ParseError as inst:
504 _formatparse(ui.warn, inst)
504 _formatparse(ui.warn, inst)
505 return -1
505 return -1
506 except error.UnknownCommand as inst:
506 except error.UnknownCommand as inst:
507 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
507 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
508 try:
508 try:
509 # check if the command is in a disabled extension
509 # check if the command is in a disabled extension
510 # (but don't check for extensions themselves)
510 # (but don't check for extensions themselves)
511 formatted = help.formattedhelp(
511 formatted = help.formattedhelp(
512 ui, commands, inst.command, unknowncmd=True
512 ui, commands, inst.command, unknowncmd=True
513 )
513 )
514 ui.warn(nocmdmsg)
514 ui.warn(nocmdmsg)
515 ui.write(formatted)
515 ui.write(formatted)
516 except (error.UnknownCommand, error.Abort):
516 except (error.UnknownCommand, error.Abort):
517 suggested = False
517 suggested = False
518 if inst.all_commands:
518 if inst.all_commands:
519 sim = _getsimilar(inst.all_commands, inst.command)
519 sim = _getsimilar(inst.all_commands, inst.command)
520 if sim:
520 if sim:
521 ui.warn(nocmdmsg)
521 ui.warn(nocmdmsg)
522 _reportsimilar(ui.warn, sim)
522 _reportsimilar(ui.warn, sim)
523 suggested = True
523 suggested = True
524 if not suggested:
524 if not suggested:
525 ui.warn(nocmdmsg)
525 ui.warn(nocmdmsg)
526 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
526 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
527 except IOError:
527 except IOError:
528 raise
528 raise
529 except KeyboardInterrupt:
529 except KeyboardInterrupt:
530 raise
530 raise
531 except: # probably re-raises
531 except: # probably re-raises
532 if not handlecommandexception(ui):
532 if not handlecommandexception(ui):
533 raise
533 raise
534
534
535 return -1
535 return -1
536
536
537
537
538 def aliasargs(fn, givenargs):
538 def aliasargs(fn, givenargs):
539 args = []
539 args = []
540 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
540 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
541 if not util.safehasattr(fn, b'_origfunc'):
541 if not util.safehasattr(fn, b'_origfunc'):
542 args = getattr(fn, 'args', args)
542 args = getattr(fn, 'args', args)
543 if args:
543 if args:
544 cmd = b' '.join(map(procutil.shellquote, args))
544 cmd = b' '.join(map(procutil.shellquote, args))
545
545
546 nums = []
546 nums = []
547
547
548 def replacer(m):
548 def replacer(m):
549 num = int(m.group(1)) - 1
549 num = int(m.group(1)) - 1
550 nums.append(num)
550 nums.append(num)
551 if num < len(givenargs):
551 if num < len(givenargs):
552 return givenargs[num]
552 return givenargs[num]
553 raise error.Abort(_(b'too few arguments for command alias'))
553 raise error.Abort(_(b'too few arguments for command alias'))
554
554
555 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
555 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
556 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
556 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
557 args = pycompat.shlexsplit(cmd)
557 args = pycompat.shlexsplit(cmd)
558 return args + givenargs
558 return args + givenargs
559
559
560
560
561 def aliasinterpolate(name, args, cmd):
561 def aliasinterpolate(name, args, cmd):
562 '''interpolate args into cmd for shell aliases
562 '''interpolate args into cmd for shell aliases
563
563
564 This also handles $0, $@ and "$@".
564 This also handles $0, $@ and "$@".
565 '''
565 '''
566 # util.interpolate can't deal with "$@" (with quotes) because it's only
566 # util.interpolate can't deal with "$@" (with quotes) because it's only
567 # built to match prefix + patterns.
567 # built to match prefix + patterns.
568 replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)}
568 replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)}
569 replacemap[b'$0'] = name
569 replacemap[b'$0'] = name
570 replacemap[b'$$'] = b'$'
570 replacemap[b'$$'] = b'$'
571 replacemap[b'$@'] = b' '.join(args)
571 replacemap[b'$@'] = b' '.join(args)
572 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
572 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
573 # parameters, separated out into words. Emulate the same behavior here by
573 # parameters, separated out into words. Emulate the same behavior here by
574 # quoting the arguments individually. POSIX shells will then typically
574 # quoting the arguments individually. POSIX shells will then typically
575 # tokenize each argument into exactly one word.
575 # tokenize each argument into exactly one word.
576 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
576 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
577 # escape '\$' for regex
577 # escape '\$' for regex
578 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
578 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
579 r = re.compile(regex)
579 r = re.compile(regex)
580 return r.sub(lambda x: replacemap[x.group()], cmd)
580 return r.sub(lambda x: replacemap[x.group()], cmd)
581
581
582
582
583 class cmdalias(object):
583 class cmdalias(object):
584 def __init__(self, ui, name, definition, cmdtable, source):
584 def __init__(self, ui, name, definition, cmdtable, source):
585 self.name = self.cmd = name
585 self.name = self.cmd = name
586 self.cmdname = b''
586 self.cmdname = b''
587 self.definition = definition
587 self.definition = definition
588 self.fn = None
588 self.fn = None
589 self.givenargs = []
589 self.givenargs = []
590 self.opts = []
590 self.opts = []
591 self.help = b''
591 self.help = b''
592 self.badalias = None
592 self.badalias = None
593 self.unknowncmd = False
593 self.unknowncmd = False
594 self.source = source
594 self.source = source
595
595
596 try:
596 try:
597 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
597 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
598 for alias, e in pycompat.iteritems(cmdtable):
598 for alias, e in pycompat.iteritems(cmdtable):
599 if e is entry:
599 if e is entry:
600 self.cmd = alias
600 self.cmd = alias
601 break
601 break
602 self.shadows = True
602 self.shadows = True
603 except error.UnknownCommand:
603 except error.UnknownCommand:
604 self.shadows = False
604 self.shadows = False
605
605
606 if not self.definition:
606 if not self.definition:
607 self.badalias = _(b"no definition for alias '%s'") % self.name
607 self.badalias = _(b"no definition for alias '%s'") % self.name
608 return
608 return
609
609
610 if self.definition.startswith(b'!'):
610 if self.definition.startswith(b'!'):
611 shdef = self.definition[1:]
611 shdef = self.definition[1:]
612 self.shell = True
612 self.shell = True
613
613
614 def fn(ui, *args):
614 def fn(ui, *args):
615 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
615 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
616
616
617 def _checkvar(m):
617 def _checkvar(m):
618 if m.groups()[0] == b'$':
618 if m.groups()[0] == b'$':
619 return m.group()
619 return m.group()
620 elif int(m.groups()[0]) <= len(args):
620 elif int(m.groups()[0]) <= len(args):
621 return m.group()
621 return m.group()
622 else:
622 else:
623 ui.debug(
623 ui.debug(
624 b"No argument found for substitution "
624 b"No argument found for substitution "
625 b"of %i variable in alias '%s' definition.\n"
625 b"of %i variable in alias '%s' definition.\n"
626 % (int(m.groups()[0]), self.name)
626 % (int(m.groups()[0]), self.name)
627 )
627 )
628 return b''
628 return b''
629
629
630 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
630 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
631 cmd = aliasinterpolate(self.name, args, cmd)
631 cmd = aliasinterpolate(self.name, args, cmd)
632 return ui.system(
632 return ui.system(
633 cmd, environ=env, blockedtag=b'alias_%s' % self.name
633 cmd, environ=env, blockedtag=b'alias_%s' % self.name
634 )
634 )
635
635
636 self.fn = fn
636 self.fn = fn
637 self.alias = True
637 self.alias = True
638 self._populatehelp(ui, name, shdef, self.fn)
638 self._populatehelp(ui, name, shdef, self.fn)
639 return
639 return
640
640
641 try:
641 try:
642 args = pycompat.shlexsplit(self.definition)
642 args = pycompat.shlexsplit(self.definition)
643 except ValueError as inst:
643 except ValueError as inst:
644 self.badalias = _(b"error in definition for alias '%s': %s") % (
644 self.badalias = _(b"error in definition for alias '%s': %s") % (
645 self.name,
645 self.name,
646 stringutil.forcebytestr(inst),
646 stringutil.forcebytestr(inst),
647 )
647 )
648 return
648 return
649 earlyopts, args = _earlysplitopts(args)
649 earlyopts, args = _earlysplitopts(args)
650 if earlyopts:
650 if earlyopts:
651 self.badalias = _(
651 self.badalias = _(
652 b"error in definition for alias '%s': %s may "
652 b"error in definition for alias '%s': %s may "
653 b"only be given on the command line"
653 b"only be given on the command line"
654 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
654 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
655 return
655 return
656 self.cmdname = cmd = args.pop(0)
656 self.cmdname = cmd = args.pop(0)
657 self.givenargs = args
657 self.givenargs = args
658
658
659 try:
659 try:
660 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
660 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
661 if len(tableentry) > 2:
661 if len(tableentry) > 2:
662 self.fn, self.opts, cmdhelp = tableentry
662 self.fn, self.opts, cmdhelp = tableentry
663 else:
663 else:
664 self.fn, self.opts = tableentry
664 self.fn, self.opts = tableentry
665 cmdhelp = None
665 cmdhelp = None
666
666
667 self.alias = True
667 self.alias = True
668 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
668 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
669
669
670 except error.UnknownCommand:
670 except error.UnknownCommand:
671 self.badalias = _(
671 self.badalias = _(
672 b"alias '%s' resolves to unknown command '%s'"
672 b"alias '%s' resolves to unknown command '%s'"
673 ) % (self.name, cmd,)
673 ) % (self.name, cmd,)
674 self.unknowncmd = True
674 self.unknowncmd = True
675 except error.AmbiguousCommand:
675 except error.AmbiguousCommand:
676 self.badalias = _(
676 self.badalias = _(
677 b"alias '%s' resolves to ambiguous command '%s'"
677 b"alias '%s' resolves to ambiguous command '%s'"
678 ) % (self.name, cmd,)
678 ) % (self.name, cmd,)
679
679
680 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
680 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
681 # confine strings to be passed to i18n.gettext()
681 # confine strings to be passed to i18n.gettext()
682 cfg = {}
682 cfg = {}
683 for k in (b'doc', b'help', b'category'):
683 for k in (b'doc', b'help', b'category'):
684 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
684 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
685 if v is None:
685 if v is None:
686 continue
686 continue
687 if not encoding.isasciistr(v):
687 if not encoding.isasciistr(v):
688 self.badalias = _(
688 self.badalias = _(
689 b"non-ASCII character in alias definition '%s:%s'"
689 b"non-ASCII character in alias definition '%s:%s'"
690 ) % (name, k)
690 ) % (name, k)
691 return
691 return
692 cfg[k] = v
692 cfg[k] = v
693
693
694 self.help = cfg.get(b'help', defaulthelp or b'')
694 self.help = cfg.get(b'help', defaulthelp or b'')
695 if self.help and self.help.startswith(b"hg " + cmd):
695 if self.help and self.help.startswith(b"hg " + cmd):
696 # drop prefix in old-style help lines so hg shows the alias
696 # drop prefix in old-style help lines so hg shows the alias
697 self.help = self.help[4 + len(cmd) :]
697 self.help = self.help[4 + len(cmd) :]
698
698
699 self.owndoc = b'doc' in cfg
699 self.owndoc = b'doc' in cfg
700 doc = cfg.get(b'doc', pycompat.getdoc(fn))
700 doc = cfg.get(b'doc', pycompat.getdoc(fn))
701 if doc is not None:
701 if doc is not None:
702 doc = pycompat.sysstr(doc)
702 doc = pycompat.sysstr(doc)
703 self.__doc__ = doc
703 self.__doc__ = doc
704
704
705 self.helpcategory = cfg.get(
705 self.helpcategory = cfg.get(
706 b'category', registrar.command.CATEGORY_NONE
706 b'category', registrar.command.CATEGORY_NONE
707 )
707 )
708
708
709 @property
709 @property
710 def args(self):
710 def args(self):
711 args = pycompat.maplist(util.expandpath, self.givenargs)
711 args = pycompat.maplist(util.expandpath, self.givenargs)
712 return aliasargs(self.fn, args)
712 return aliasargs(self.fn, args)
713
713
714 def __getattr__(self, name):
714 def __getattr__(self, name):
715 adefaults = {
715 adefaults = {
716 'norepo': True,
716 'norepo': True,
717 'intents': set(),
717 'intents': set(),
718 'optionalrepo': False,
718 'optionalrepo': False,
719 'inferrepo': False,
719 'inferrepo': False,
720 }
720 }
721 if name not in adefaults:
721 if name not in adefaults:
722 raise AttributeError(name)
722 raise AttributeError(name)
723 if self.badalias or util.safehasattr(self, b'shell'):
723 if self.badalias or util.safehasattr(self, b'shell'):
724 return adefaults[name]
724 return adefaults[name]
725 return getattr(self.fn, name)
725 return getattr(self.fn, name)
726
726
727 def __call__(self, ui, *args, **opts):
727 def __call__(self, ui, *args, **opts):
728 if self.badalias:
728 if self.badalias:
729 hint = None
729 hint = None
730 if self.unknowncmd:
730 if self.unknowncmd:
731 try:
731 try:
732 # check if the command is in a disabled extension
732 # check if the command is in a disabled extension
733 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
733 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
734 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
734 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
735 except error.UnknownCommand:
735 except error.UnknownCommand:
736 pass
736 pass
737 raise error.Abort(self.badalias, hint=hint)
737 raise error.Abort(self.badalias, hint=hint)
738 if self.shadows:
738 if self.shadows:
739 ui.debug(
739 ui.debug(
740 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
740 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
741 )
741 )
742
742
743 ui.log(
743 ui.log(
744 b'commandalias',
744 b'commandalias',
745 b"alias '%s' expands to '%s'\n",
745 b"alias '%s' expands to '%s'\n",
746 self.name,
746 self.name,
747 self.definition,
747 self.definition,
748 )
748 )
749 if util.safehasattr(self, b'shell'):
749 if util.safehasattr(self, b'shell'):
750 return self.fn(ui, *args, **opts)
750 return self.fn(ui, *args, **opts)
751 else:
751 else:
752 try:
752 try:
753 return util.checksignature(self.fn)(ui, *args, **opts)
753 return util.checksignature(self.fn)(ui, *args, **opts)
754 except error.SignatureError:
754 except error.SignatureError:
755 args = b' '.join([self.cmdname] + self.args)
755 args = b' '.join([self.cmdname] + self.args)
756 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
756 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
757 raise
757 raise
758
758
759
759
760 class lazyaliasentry(object):
760 class lazyaliasentry(object):
761 """like a typical command entry (func, opts, help), but is lazy"""
761 """like a typical command entry (func, opts, help), but is lazy"""
762
762
763 def __init__(self, ui, name, definition, cmdtable, source):
763 def __init__(self, ui, name, definition, cmdtable, source):
764 self.ui = ui
764 self.ui = ui
765 self.name = name
765 self.name = name
766 self.definition = definition
766 self.definition = definition
767 self.cmdtable = cmdtable.copy()
767 self.cmdtable = cmdtable.copy()
768 self.source = source
768 self.source = source
769 self.alias = True
769 self.alias = True
770
770
771 @util.propertycache
771 @util.propertycache
772 def _aliasdef(self):
772 def _aliasdef(self):
773 return cmdalias(
773 return cmdalias(
774 self.ui, self.name, self.definition, self.cmdtable, self.source
774 self.ui, self.name, self.definition, self.cmdtable, self.source
775 )
775 )
776
776
777 def __getitem__(self, n):
777 def __getitem__(self, n):
778 aliasdef = self._aliasdef
778 aliasdef = self._aliasdef
779 if n == 0:
779 if n == 0:
780 return aliasdef
780 return aliasdef
781 elif n == 1:
781 elif n == 1:
782 return aliasdef.opts
782 return aliasdef.opts
783 elif n == 2:
783 elif n == 2:
784 return aliasdef.help
784 return aliasdef.help
785 else:
785 else:
786 raise IndexError
786 raise IndexError
787
787
788 def __iter__(self):
788 def __iter__(self):
789 for i in range(3):
789 for i in range(3):
790 yield self[i]
790 yield self[i]
791
791
792 def __len__(self):
792 def __len__(self):
793 return 3
793 return 3
794
794
795
795
796 def addaliases(ui, cmdtable):
796 def addaliases(ui, cmdtable):
797 # aliases are processed after extensions have been loaded, so they
797 # aliases are processed after extensions have been loaded, so they
798 # may use extension commands. Aliases can also use other alias definitions,
798 # may use extension commands. Aliases can also use other alias definitions,
799 # but only if they have been defined prior to the current definition.
799 # but only if they have been defined prior to the current definition.
800 for alias, definition in ui.configitems(b'alias', ignoresub=True):
800 for alias, definition in ui.configitems(b'alias', ignoresub=True):
801 try:
801 try:
802 if cmdtable[alias].definition == definition:
802 if cmdtable[alias].definition == definition:
803 continue
803 continue
804 except (KeyError, AttributeError):
804 except (KeyError, AttributeError):
805 # definition might not exist or it might not be a cmdalias
805 # definition might not exist or it might not be a cmdalias
806 pass
806 pass
807
807
808 source = ui.configsource(b'alias', alias)
808 source = ui.configsource(b'alias', alias)
809 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
809 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
810 cmdtable[alias] = entry
810 cmdtable[alias] = entry
811
811
812
812
813 def _parse(ui, args):
813 def _parse(ui, args):
814 options = {}
814 options = {}
815 cmdoptions = {}
815 cmdoptions = {}
816
816
817 try:
817 try:
818 args = fancyopts.fancyopts(args, commands.globalopts, options)
818 args = fancyopts.fancyopts(args, commands.globalopts, options)
819 except getopt.GetoptError as inst:
819 except getopt.GetoptError as inst:
820 raise error.CommandError(None, stringutil.forcebytestr(inst))
820 raise error.CommandError(None, stringutil.forcebytestr(inst))
821
821
822 if args:
822 if args:
823 cmd, args = args[0], args[1:]
823 cmd, args = args[0], args[1:]
824 aliases, entry = cmdutil.findcmd(
824 aliases, entry = cmdutil.findcmd(
825 cmd, commands.table, ui.configbool(b"ui", b"strict")
825 cmd, commands.table, ui.configbool(b"ui", b"strict")
826 )
826 )
827 cmd = aliases[0]
827 cmd = aliases[0]
828 args = aliasargs(entry[0], args)
828 args = aliasargs(entry[0], args)
829 defaults = ui.config(b"defaults", cmd)
829 defaults = ui.config(b"defaults", cmd)
830 if defaults:
830 if defaults:
831 args = (
831 args = (
832 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
832 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
833 + args
833 + args
834 )
834 )
835 c = list(entry[1])
835 c = list(entry[1])
836 else:
836 else:
837 cmd = None
837 cmd = None
838 c = []
838 c = []
839
839
840 # combine global options into local
840 # combine global options into local
841 for o in commands.globalopts:
841 for o in commands.globalopts:
842 c.append((o[0], o[1], options[o[1]], o[3]))
842 c.append((o[0], o[1], options[o[1]], o[3]))
843
843
844 try:
844 try:
845 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
845 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
846 except getopt.GetoptError as inst:
846 except getopt.GetoptError as inst:
847 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
847 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
848
848
849 # separate global options back out
849 # separate global options back out
850 for o in commands.globalopts:
850 for o in commands.globalopts:
851 n = o[1]
851 n = o[1]
852 options[n] = cmdoptions[n]
852 options[n] = cmdoptions[n]
853 del cmdoptions[n]
853 del cmdoptions[n]
854
854
855 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
855 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
856
856
857
857
858 def _parseconfig(ui, config):
858 def _parseconfig(ui, config):
859 """parse the --config options from the command line"""
859 """parse the --config options from the command line"""
860 configs = []
860 configs = []
861
861
862 for cfg in config:
862 for cfg in config:
863 try:
863 try:
864 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
864 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
865 section, name = name.split(b'.', 1)
865 section, name = name.split(b'.', 1)
866 if not section or not name:
866 if not section or not name:
867 raise IndexError
867 raise IndexError
868 ui.setconfig(section, name, value, b'--config')
868 ui.setconfig(section, name, value, b'--config')
869 configs.append((section, name, value))
869 configs.append((section, name, value))
870 except (IndexError, ValueError):
870 except (IndexError, ValueError):
871 raise error.Abort(
871 raise error.Abort(
872 _(
872 _(
873 b'malformed --config option: %r '
873 b'malformed --config option: %r '
874 b'(use --config section.name=value)'
874 b'(use --config section.name=value)'
875 )
875 )
876 % pycompat.bytestr(cfg)
876 % pycompat.bytestr(cfg)
877 )
877 )
878
878
879 return configs
879 return configs
880
880
881
881
882 def _earlyparseopts(ui, args):
882 def _earlyparseopts(ui, args):
883 options = {}
883 options = {}
884 fancyopts.fancyopts(
884 fancyopts.fancyopts(
885 args,
885 args,
886 commands.globalopts,
886 commands.globalopts,
887 options,
887 options,
888 gnu=not ui.plain(b'strictflags'),
888 gnu=not ui.plain(b'strictflags'),
889 early=True,
889 early=True,
890 optaliases={b'repository': [b'repo']},
890 optaliases={b'repository': [b'repo']},
891 )
891 )
892 return options
892 return options
893
893
894
894
895 def _earlysplitopts(args):
895 def _earlysplitopts(args):
896 """Split args into a list of possible early options and remainder args"""
896 """Split args into a list of possible early options and remainder args"""
897 shortoptions = b'R:'
897 shortoptions = b'R:'
898 # TODO: perhaps 'debugger' should be included
898 # TODO: perhaps 'debugger' should be included
899 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
899 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
900 return fancyopts.earlygetopt(
900 return fancyopts.earlygetopt(
901 args, shortoptions, longoptions, gnu=True, keepsep=True
901 args, shortoptions, longoptions, gnu=True, keepsep=True
902 )
902 )
903
903
904
904
905 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
905 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
906 # run pre-hook, and abort if it fails
906 # run pre-hook, and abort if it fails
907 hook.hook(
907 hook.hook(
908 lui,
908 lui,
909 repo,
909 repo,
910 b"pre-%s" % cmd,
910 b"pre-%s" % cmd,
911 True,
911 True,
912 args=b" ".join(fullargs),
912 args=b" ".join(fullargs),
913 pats=cmdpats,
913 pats=cmdpats,
914 opts=cmdoptions,
914 opts=cmdoptions,
915 )
915 )
916 try:
916 try:
917 ret = _runcommand(ui, options, cmd, d)
917 ret = _runcommand(ui, options, cmd, d)
918 # run post-hook, passing command result
918 # run post-hook, passing command result
919 hook.hook(
919 hook.hook(
920 lui,
920 lui,
921 repo,
921 repo,
922 b"post-%s" % cmd,
922 b"post-%s" % cmd,
923 False,
923 False,
924 args=b" ".join(fullargs),
924 args=b" ".join(fullargs),
925 result=ret,
925 result=ret,
926 pats=cmdpats,
926 pats=cmdpats,
927 opts=cmdoptions,
927 opts=cmdoptions,
928 )
928 )
929 except Exception:
929 except Exception:
930 # run failure hook and re-raise
930 # run failure hook and re-raise
931 hook.hook(
931 hook.hook(
932 lui,
932 lui,
933 repo,
933 repo,
934 b"fail-%s" % cmd,
934 b"fail-%s" % cmd,
935 False,
935 False,
936 args=b" ".join(fullargs),
936 args=b" ".join(fullargs),
937 pats=cmdpats,
937 pats=cmdpats,
938 opts=cmdoptions,
938 opts=cmdoptions,
939 )
939 )
940 raise
940 raise
941 return ret
941 return ret
942
942
943
943
944 def _getlocal(ui, rpath, wd=None):
944 def _getlocal(ui, rpath, wd=None):
945 """Return (path, local ui object) for the given target path.
945 """Return (path, local ui object) for the given target path.
946
946
947 Takes paths in [cwd]/.hg/hgrc into account."
947 Takes paths in [cwd]/.hg/hgrc into account."
948 """
948 """
949 if wd is None:
949 if wd is None:
950 try:
950 try:
951 wd = encoding.getcwd()
951 wd = encoding.getcwd()
952 except OSError as e:
952 except OSError as e:
953 raise error.Abort(
953 raise error.Abort(
954 _(b"error getting current working directory: %s")
954 _(b"error getting current working directory: %s")
955 % encoding.strtolocal(e.strerror)
955 % encoding.strtolocal(e.strerror)
956 )
956 )
957
957
958 path = cmdutil.findrepo(wd) or b""
958 path = cmdutil.findrepo(wd) or b""
959 if not path:
959 if not path:
960 lui = ui
960 lui = ui
961 else:
961 else:
962 lui = ui.copy()
962 lui = ui.copy()
963 if rcutil.use_repo_hgrc():
963 if rcutil.use_repo_hgrc():
964 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
964 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
965
965
966 if rpath:
966 if rpath:
967 path = lui.expandpath(rpath)
967 path = lui.expandpath(rpath)
968 lui = ui.copy()
968 lui = ui.copy()
969 if rcutil.use_repo_hgrc():
969 if rcutil.use_repo_hgrc():
970 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
970 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
971
971
972 return path, lui
972 return path, lui
973
973
974
974
975 def _checkshellalias(lui, ui, args):
975 def _checkshellalias(lui, ui, args):
976 """Return the function to run the shell alias, if it is required"""
976 """Return the function to run the shell alias, if it is required"""
977 options = {}
977 options = {}
978
978
979 try:
979 try:
980 args = fancyopts.fancyopts(args, commands.globalopts, options)
980 args = fancyopts.fancyopts(args, commands.globalopts, options)
981 except getopt.GetoptError:
981 except getopt.GetoptError:
982 return
982 return
983
983
984 if not args:
984 if not args:
985 return
985 return
986
986
987 cmdtable = commands.table
987 cmdtable = commands.table
988
988
989 cmd = args[0]
989 cmd = args[0]
990 try:
990 try:
991 strict = ui.configbool(b"ui", b"strict")
991 strict = ui.configbool(b"ui", b"strict")
992 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
992 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
993 except (error.AmbiguousCommand, error.UnknownCommand):
993 except (error.AmbiguousCommand, error.UnknownCommand):
994 return
994 return
995
995
996 cmd = aliases[0]
996 cmd = aliases[0]
997 fn = entry[0]
997 fn = entry[0]
998
998
999 if cmd and util.safehasattr(fn, b'shell'):
999 if cmd and util.safehasattr(fn, b'shell'):
1000 # shell alias shouldn't receive early options which are consumed by hg
1000 # shell alias shouldn't receive early options which are consumed by hg
1001 _earlyopts, args = _earlysplitopts(args)
1001 _earlyopts, args = _earlysplitopts(args)
1002 d = lambda: fn(ui, *args[1:])
1002 d = lambda: fn(ui, *args[1:])
1003 return lambda: runcommand(
1003 return lambda: runcommand(
1004 lui, None, cmd, args[:1], ui, options, d, [], {}
1004 lui, None, cmd, args[:1], ui, options, d, [], {}
1005 )
1005 )
1006
1006
1007
1007
1008 def _dispatch(req):
1008 def _dispatch(req):
1009 args = req.args
1009 args = req.args
1010 ui = req.ui
1010 ui = req.ui
1011
1011
1012 # check for cwd
1012 # check for cwd
1013 cwd = req.earlyoptions[b'cwd']
1013 cwd = req.earlyoptions[b'cwd']
1014 if cwd:
1014 if cwd:
1015 os.chdir(cwd)
1015 os.chdir(cwd)
1016
1016
1017 rpath = req.earlyoptions[b'repository']
1017 rpath = req.earlyoptions[b'repository']
1018 path, lui = _getlocal(ui, rpath)
1018 path, lui = _getlocal(ui, rpath)
1019
1019
1020 uis = {ui, lui}
1020 uis = {ui, lui}
1021
1021
1022 if req.repo:
1022 if req.repo:
1023 uis.add(req.repo.ui)
1023 uis.add(req.repo.ui)
1024
1024
1025 if (
1025 if (
1026 req.earlyoptions[b'verbose']
1026 req.earlyoptions[b'verbose']
1027 or req.earlyoptions[b'debug']
1027 or req.earlyoptions[b'debug']
1028 or req.earlyoptions[b'quiet']
1028 or req.earlyoptions[b'quiet']
1029 ):
1029 ):
1030 for opt in (b'verbose', b'debug', b'quiet'):
1030 for opt in (b'verbose', b'debug', b'quiet'):
1031 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
1031 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
1032 for ui_ in uis:
1032 for ui_ in uis:
1033 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1033 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1034
1034
1035 if req.earlyoptions[b'profile']:
1035 if req.earlyoptions[b'profile']:
1036 for ui_ in uis:
1036 for ui_ in uis:
1037 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
1037 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
1038
1038
1039 profile = lui.configbool(b'profiling', b'enabled')
1039 profile = lui.configbool(b'profiling', b'enabled')
1040 with profiling.profile(lui, enabled=profile) as profiler:
1040 with profiling.profile(lui, enabled=profile) as profiler:
1041 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
1041 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
1042 # reposetup
1042 # reposetup
1043 extensions.loadall(lui)
1043 extensions.loadall(lui)
1044 # Propagate any changes to lui.__class__ by extensions
1044 # Propagate any changes to lui.__class__ by extensions
1045 ui.__class__ = lui.__class__
1045 ui.__class__ = lui.__class__
1046
1046
1047 # (uisetup and extsetup are handled in extensions.loadall)
1047 # (uisetup and extsetup are handled in extensions.loadall)
1048
1048
1049 # (reposetup is handled in hg.repository)
1049 # (reposetup is handled in hg.repository)
1050
1050
1051 addaliases(lui, commands.table)
1051 addaliases(lui, commands.table)
1052
1052
1053 # All aliases and commands are completely defined, now.
1053 # All aliases and commands are completely defined, now.
1054 # Check abbreviation/ambiguity of shell alias.
1054 # Check abbreviation/ambiguity of shell alias.
1055 shellaliasfn = _checkshellalias(lui, ui, args)
1055 shellaliasfn = _checkshellalias(lui, ui, args)
1056 if shellaliasfn:
1056 if shellaliasfn:
1057 # no additional configs will be set, set up the ui instances
1057 # no additional configs will be set, set up the ui instances
1058 for ui_ in uis:
1058 for ui_ in uis:
1059 extensions.populateui(ui_)
1059 extensions.populateui(ui_)
1060 return shellaliasfn()
1060 return shellaliasfn()
1061
1061
1062 # check for fallback encoding
1062 # check for fallback encoding
1063 fallback = lui.config(b'ui', b'fallbackencoding')
1063 fallback = lui.config(b'ui', b'fallbackencoding')
1064 if fallback:
1064 if fallback:
1065 encoding.fallbackencoding = fallback
1065 encoding.fallbackencoding = fallback
1066
1066
1067 fullargs = args
1067 fullargs = args
1068 cmd, func, args, options, cmdoptions = _parse(lui, args)
1068 cmd, func, args, options, cmdoptions = _parse(lui, args)
1069
1069
1070 # store the canonical command name in request object for later access
1070 # store the canonical command name in request object for later access
1071 req.canonical_command = cmd
1071 req.canonical_command = cmd
1072
1072
1073 if options[b"config"] != req.earlyoptions[b"config"]:
1073 if options[b"config"] != req.earlyoptions[b"config"]:
1074 raise error.Abort(_(b"option --config may not be abbreviated!"))
1074 raise error.Abort(_(b"option --config may not be abbreviated!"))
1075 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1075 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1076 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1076 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1077 if options[b"repository"] != req.earlyoptions[b"repository"]:
1077 if options[b"repository"] != req.earlyoptions[b"repository"]:
1078 raise error.Abort(
1078 raise error.Abort(
1079 _(
1079 _(
1080 b"option -R has to be separated from other options (e.g. not "
1080 b"option -R has to be separated from other options (e.g. not "
1081 b"-qR) and --repository may only be abbreviated as --repo!"
1081 b"-qR) and --repository may only be abbreviated as --repo!"
1082 )
1082 )
1083 )
1083 )
1084 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1084 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1085 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1085 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1086 # don't validate --profile/--traceback, which can be enabled from now
1086 # don't validate --profile/--traceback, which can be enabled from now
1087
1087
1088 if options[b"encoding"]:
1088 if options[b"encoding"]:
1089 encoding.encoding = options[b"encoding"]
1089 encoding.encoding = options[b"encoding"]
1090 if options[b"encodingmode"]:
1090 if options[b"encodingmode"]:
1091 encoding.encodingmode = options[b"encodingmode"]
1091 encoding.encodingmode = options[b"encodingmode"]
1092 if options[b"time"]:
1092 if options[b"time"]:
1093
1093
1094 def get_times():
1094 def get_times():
1095 t = os.times()
1095 t = os.times()
1096 if t[4] == 0.0:
1096 if t[4] == 0.0:
1097 # Windows leaves this as zero, so use time.perf_counter()
1097 # Windows leaves this as zero, so use time.perf_counter()
1098 t = (t[0], t[1], t[2], t[3], util.timer())
1098 t = (t[0], t[1], t[2], t[3], util.timer())
1099 return t
1099 return t
1100
1100
1101 s = get_times()
1101 s = get_times()
1102
1102
1103 def print_time():
1103 def print_time():
1104 t = get_times()
1104 t = get_times()
1105 ui.warn(
1105 ui.warn(
1106 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1106 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1107 % (
1107 % (
1108 t[4] - s[4],
1108 t[4] - s[4],
1109 t[0] - s[0],
1109 t[0] - s[0],
1110 t[2] - s[2],
1110 t[2] - s[2],
1111 t[1] - s[1],
1111 t[1] - s[1],
1112 t[3] - s[3],
1112 t[3] - s[3],
1113 )
1113 )
1114 )
1114 )
1115
1115
1116 ui.atexit(print_time)
1116 ui.atexit(print_time)
1117 if options[b"profile"]:
1117 if options[b"profile"]:
1118 profiler.start()
1118 profiler.start()
1119
1119
1120 # if abbreviated version of this were used, take them in account, now
1120 # if abbreviated version of this were used, take them in account, now
1121 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1121 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1122 for opt in (b'verbose', b'debug', b'quiet'):
1122 for opt in (b'verbose', b'debug', b'quiet'):
1123 if options[opt] == req.earlyoptions[opt]:
1123 if options[opt] == req.earlyoptions[opt]:
1124 continue
1124 continue
1125 val = pycompat.bytestr(bool(options[opt]))
1125 val = pycompat.bytestr(bool(options[opt]))
1126 for ui_ in uis:
1126 for ui_ in uis:
1127 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1127 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1128
1128
1129 if options[b'traceback']:
1129 if options[b'traceback']:
1130 for ui_ in uis:
1130 for ui_ in uis:
1131 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1131 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1132
1132
1133 if options[b'noninteractive']:
1133 if options[b'noninteractive']:
1134 for ui_ in uis:
1134 for ui_ in uis:
1135 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1135 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1136
1136
1137 if cmdoptions.get(b'insecure', False):
1137 if cmdoptions.get(b'insecure', False):
1138 for ui_ in uis:
1138 for ui_ in uis:
1139 ui_.insecureconnections = True
1139 ui_.insecureconnections = True
1140
1140
1141 # setup color handling before pager, because setting up pager
1141 # setup color handling before pager, because setting up pager
1142 # might cause incorrect console information
1142 # might cause incorrect console information
1143 coloropt = options[b'color']
1143 coloropt = options[b'color']
1144 for ui_ in uis:
1144 for ui_ in uis:
1145 if coloropt:
1145 if coloropt:
1146 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1146 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1147 color.setup(ui_)
1147 color.setup(ui_)
1148
1148
1149 if stringutil.parsebool(options[b'pager']):
1149 if stringutil.parsebool(options[b'pager']):
1150 # ui.pager() expects 'internal-always-' prefix in this case
1150 # ui.pager() expects 'internal-always-' prefix in this case
1151 ui.pager(b'internal-always-' + cmd)
1151 ui.pager(b'internal-always-' + cmd)
1152 elif options[b'pager'] != b'auto':
1152 elif options[b'pager'] != b'auto':
1153 for ui_ in uis:
1153 for ui_ in uis:
1154 ui_.disablepager()
1154 ui_.disablepager()
1155
1155
1156 # configs are fully loaded, set up the ui instances
1156 # configs are fully loaded, set up the ui instances
1157 for ui_ in uis:
1157 for ui_ in uis:
1158 extensions.populateui(ui_)
1158 extensions.populateui(ui_)
1159
1159
1160 if options[b'version']:
1160 if options[b'version']:
1161 return commands.version_(ui)
1161 return commands.version_(ui)
1162 if options[b'help']:
1162 if options[b'help']:
1163 return commands.help_(ui, cmd, command=cmd is not None)
1163 return commands.help_(ui, cmd, command=cmd is not None)
1164 elif not cmd:
1164 elif not cmd:
1165 return commands.help_(ui, b'shortlist')
1165 return commands.help_(ui, b'shortlist')
1166
1166
1167 repo = None
1167 repo = None
1168 cmdpats = args[:]
1168 cmdpats = args[:]
1169 assert func is not None # help out pytype
1169 assert func is not None # help out pytype
1170 if not func.norepo:
1170 if not func.norepo:
1171 # use the repo from the request only if we don't have -R
1171 # use the repo from the request only if we don't have -R
1172 if not rpath and not cwd:
1172 if not rpath and not cwd:
1173 repo = req.repo
1173 repo = req.repo
1174
1174
1175 if repo:
1175 if repo:
1176 # set the descriptors of the repo ui to those of ui
1176 # set the descriptors of the repo ui to those of ui
1177 repo.ui.fin = ui.fin
1177 repo.ui.fin = ui.fin
1178 repo.ui.fout = ui.fout
1178 repo.ui.fout = ui.fout
1179 repo.ui.ferr = ui.ferr
1179 repo.ui.ferr = ui.ferr
1180 repo.ui.fmsg = ui.fmsg
1180 repo.ui.fmsg = ui.fmsg
1181 else:
1181 else:
1182 try:
1182 try:
1183 repo = hg.repository(
1183 repo = hg.repository(
1184 ui,
1184 ui,
1185 path=path,
1185 path=path,
1186 presetupfuncs=req.prereposetups,
1186 presetupfuncs=req.prereposetups,
1187 intents=func.intents,
1187 intents=func.intents,
1188 )
1188 )
1189 if not repo.local():
1189 if not repo.local():
1190 raise error.Abort(
1190 raise error.Abort(
1191 _(b"repository '%s' is not local") % path
1191 _(b"repository '%s' is not local") % path
1192 )
1192 )
1193 repo.ui.setconfig(
1193 repo.ui.setconfig(
1194 b"bundle", b"mainreporoot", repo.root, b'repo'
1194 b"bundle", b"mainreporoot", repo.root, b'repo'
1195 )
1195 )
1196 except error.RequirementError:
1196 except error.RequirementError:
1197 raise
1197 raise
1198 except error.RepoError:
1198 except error.RepoError:
1199 if rpath: # invalid -R path
1199 if rpath: # invalid -R path
1200 raise
1200 raise
1201 if not func.optionalrepo:
1201 if not func.optionalrepo:
1202 if func.inferrepo and args and not path:
1202 if func.inferrepo and args and not path:
1203 # try to infer -R from command args
1203 # try to infer -R from command args
1204 repos = pycompat.maplist(cmdutil.findrepo, args)
1204 repos = pycompat.maplist(cmdutil.findrepo, args)
1205 guess = repos[0]
1205 guess = repos[0]
1206 if guess and repos.count(guess) == len(repos):
1206 if guess and repos.count(guess) == len(repos):
1207 req.args = [b'--repository', guess] + fullargs
1207 req.args = [b'--repository', guess] + fullargs
1208 req.earlyoptions[b'repository'] = guess
1208 req.earlyoptions[b'repository'] = guess
1209 return _dispatch(req)
1209 return _dispatch(req)
1210 if not path:
1210 if not path:
1211 raise error.RepoError(
1211 raise error.RepoError(
1212 _(
1212 _(
1213 b"no repository found in"
1213 b"no repository found in"
1214 b" '%s' (.hg not found)"
1214 b" '%s' (.hg not found)"
1215 )
1215 )
1216 % encoding.getcwd()
1216 % encoding.getcwd()
1217 )
1217 )
1218 raise
1218 raise
1219 if repo:
1219 if repo:
1220 ui = repo.ui
1220 ui = repo.ui
1221 if options[b'hidden']:
1221 if options[b'hidden']:
1222 repo = repo.unfiltered()
1222 repo = repo.unfiltered()
1223 args.insert(0, repo)
1223 args.insert(0, repo)
1224 elif rpath:
1224 elif rpath:
1225 ui.warn(_(b"warning: --repository ignored\n"))
1225 ui.warn(_(b"warning: --repository ignored\n"))
1226
1226
1227 msg = _formatargs(fullargs)
1227 msg = _formatargs(fullargs)
1228 ui.log(b"command", b'%s\n', msg)
1228 ui.log(b"command", b'%s\n', msg)
1229 strcmdopt = pycompat.strkwargs(cmdoptions)
1229 strcmdopt = pycompat.strkwargs(cmdoptions)
1230 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1230 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1231 try:
1231 try:
1232 return runcommand(
1232 return runcommand(
1233 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1233 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1234 )
1234 )
1235 finally:
1235 finally:
1236 if repo and repo != req.repo:
1236 if repo and repo != req.repo:
1237 repo.close()
1237 repo.close()
1238
1238
1239
1239
1240 def _runcommand(ui, options, cmd, cmdfunc):
1240 def _runcommand(ui, options, cmd, cmdfunc):
1241 """Run a command function, possibly with profiling enabled."""
1241 """Run a command function, possibly with profiling enabled."""
1242 try:
1242 try:
1243 with tracing.log("Running %s command" % cmd):
1243 with tracing.log("Running %s command" % cmd):
1244 return cmdfunc()
1244 return cmdfunc()
1245 except error.SignatureError:
1245 except error.SignatureError:
1246 raise error.CommandError(cmd, _(b'invalid arguments'))
1246 raise error.CommandError(cmd, _(b'invalid arguments'))
1247
1247
1248
1248
1249 def _exceptionwarning(ui):
1249 def _exceptionwarning(ui):
1250 """Produce a warning message for the current active exception"""
1250 """Produce a warning message for the current active exception"""
1251
1251
1252 # For compatibility checking, we discard the portion of the hg
1252 # For compatibility checking, we discard the portion of the hg
1253 # version after the + on the assumption that if a "normal
1253 # version after the + on the assumption that if a "normal
1254 # user" is running a build with a + in it the packager
1254 # user" is running a build with a + in it the packager
1255 # probably built from fairly close to a tag and anyone with a
1255 # probably built from fairly close to a tag and anyone with a
1256 # 'make local' copy of hg (where the version number can be out
1256 # 'make local' copy of hg (where the version number can be out
1257 # of date) will be clueful enough to notice the implausible
1257 # of date) will be clueful enough to notice the implausible
1258 # version number and try updating.
1258 # version number and try updating.
1259 ct = util.versiontuple(n=2)
1259 ct = util.versiontuple(n=2)
1260 worst = None, ct, b''
1260 worst = None, ct, b''
1261 if ui.config(b'ui', b'supportcontact') is None:
1261 if ui.config(b'ui', b'supportcontact') is None:
1262 for name, mod in extensions.extensions():
1262 for name, mod in extensions.extensions():
1263 # 'testedwith' should be bytes, but not all extensions are ported
1263 # 'testedwith' should be bytes, but not all extensions are ported
1264 # to py3 and we don't want UnicodeException because of that.
1264 # to py3 and we don't want UnicodeException because of that.
1265 testedwith = stringutil.forcebytestr(
1265 testedwith = stringutil.forcebytestr(
1266 getattr(mod, 'testedwith', b'')
1266 getattr(mod, 'testedwith', b'')
1267 )
1267 )
1268 report = getattr(mod, 'buglink', _(b'the extension author.'))
1268 report = getattr(mod, 'buglink', _(b'the extension author.'))
1269 if not testedwith.strip():
1269 if not testedwith.strip():
1270 # We found an untested extension. It's likely the culprit.
1270 # We found an untested extension. It's likely the culprit.
1271 worst = name, b'unknown', report
1271 worst = name, b'unknown', report
1272 break
1272 break
1273
1273
1274 # Never blame on extensions bundled with Mercurial.
1274 # Never blame on extensions bundled with Mercurial.
1275 if extensions.ismoduleinternal(mod):
1275 if extensions.ismoduleinternal(mod):
1276 continue
1276 continue
1277
1277
1278 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1278 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1279 if ct in tested:
1279 if ct in tested:
1280 continue
1280 continue
1281
1281
1282 lower = [t for t in tested if t < ct]
1282 lower = [t for t in tested if t < ct]
1283 nearest = max(lower or tested)
1283 nearest = max(lower or tested)
1284 if worst[0] is None or nearest < worst[1]:
1284 if worst[0] is None or nearest < worst[1]:
1285 worst = name, nearest, report
1285 worst = name, nearest, report
1286 if worst[0] is not None:
1286 if worst[0] is not None:
1287 name, testedwith, report = worst
1287 name, testedwith, report = worst
1288 if not isinstance(testedwith, (bytes, str)):
1288 if not isinstance(testedwith, (bytes, str)):
1289 testedwith = b'.'.join(
1289 testedwith = b'.'.join(
1290 [stringutil.forcebytestr(c) for c in testedwith]
1290 [stringutil.forcebytestr(c) for c in testedwith]
1291 )
1291 )
1292 warning = _(
1292 warning = _(
1293 b'** Unknown exception encountered with '
1293 b'** Unknown exception encountered with '
1294 b'possibly-broken third-party extension %s\n'
1294 b'possibly-broken third-party extension %s\n'
1295 b'** which supports versions %s of Mercurial.\n'
1295 b'** which supports versions %s of Mercurial.\n'
1296 b'** Please disable %s and try your action again.\n'
1296 b'** Please disable %s and try your action again.\n'
1297 b'** If that fixes the bug please report it to %s\n'
1297 b'** If that fixes the bug please report it to %s\n'
1298 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1298 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1299 else:
1299 else:
1300 bugtracker = ui.config(b'ui', b'supportcontact')
1300 bugtracker = ui.config(b'ui', b'supportcontact')
1301 if bugtracker is None:
1301 if bugtracker is None:
1302 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1302 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1303 warning = (
1303 warning = (
1304 _(
1304 _(
1305 b"** unknown exception encountered, "
1305 b"** unknown exception encountered, "
1306 b"please report by visiting\n** "
1306 b"please report by visiting\n** "
1307 )
1307 )
1308 + bugtracker
1308 + bugtracker
1309 + b'\n'
1309 + b'\n'
1310 )
1310 )
1311 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1311 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1312 warning += (
1312 warning += (
1313 (_(b"** Python %s\n") % sysversion)
1313 (_(b"** Python %s\n") % sysversion)
1314 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1314 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1315 + (
1315 + (
1316 _(b"** Extensions loaded: %s\n")
1316 _(b"** Extensions loaded: %s\n")
1317 % b", ".join([x[0] for x in extensions.extensions()])
1317 % b", ".join([x[0] for x in extensions.extensions()])
1318 )
1318 )
1319 )
1319 )
1320 return warning
1320 return warning
1321
1321
1322
1322
1323 def handlecommandexception(ui):
1323 def handlecommandexception(ui):
1324 """Produce a warning message for broken commands
1324 """Produce a warning message for broken commands
1325
1325
1326 Called when handling an exception; the exception is reraised if
1326 Called when handling an exception; the exception is reraised if
1327 this function returns False, ignored otherwise.
1327 this function returns False, ignored otherwise.
1328 """
1328 """
1329 warning = _exceptionwarning(ui)
1329 warning = _exceptionwarning(ui)
1330 ui.log(
1330 ui.log(
1331 b"commandexception",
1331 b"commandexception",
1332 b"%s\n%s\n",
1332 b"%s\n%s\n",
1333 warning,
1333 warning,
1334 pycompat.sysbytes(traceback.format_exc()),
1334 pycompat.sysbytes(traceback.format_exc()),
1335 )
1335 )
1336 ui.warn(warning)
1336 ui.warn(warning)
1337 return False # re-raise the exception
1337 return False # re-raise the exception
@@ -1,472 +1,480
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 # Do not import anything but pycompat here, please
16 # Do not import anything but pycompat here, please
17 from . import pycompat
17 from . import pycompat
18
18
19
19
20 def _tobytes(exc):
20 def _tobytes(exc):
21 """Byte-stringify exception in the same way as BaseException_str()"""
21 """Byte-stringify exception in the same way as BaseException_str()"""
22 if not exc.args:
22 if not exc.args:
23 return b''
23 return b''
24 if len(exc.args) == 1:
24 if len(exc.args) == 1:
25 return pycompat.bytestr(exc.args[0])
25 return pycompat.bytestr(exc.args[0])
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
27
27
28
28
29 class Hint(object):
29 class Hint(object):
30 """Mix-in to provide a hint of an error
30 """Mix-in to provide a hint of an error
31
31
32 This should come first in the inheritance list to consume a hint and
32 This should come first in the inheritance list to consume a hint and
33 pass remaining arguments to the exception class.
33 pass remaining arguments to the exception class.
34 """
34 """
35
35
36 def __init__(self, *args, **kw):
36 def __init__(self, *args, **kw):
37 self.hint = kw.pop('hint', None)
37 self.hint = kw.pop('hint', None)
38 super(Hint, self).__init__(*args, **kw)
38 super(Hint, self).__init__(*args, **kw)
39
39
40
40
41 class StorageError(Hint, Exception):
41 class StorageError(Hint, Exception):
42 """Raised when an error occurs in a storage layer.
42 """Raised when an error occurs in a storage layer.
43
43
44 Usually subclassed by a storage-specific exception.
44 Usually subclassed by a storage-specific exception.
45 """
45 """
46
46
47 __bytes__ = _tobytes
47 __bytes__ = _tobytes
48
48
49
49
50 class RevlogError(StorageError):
50 class RevlogError(StorageError):
51 __bytes__ = _tobytes
51 __bytes__ = _tobytes
52
52
53
53
54 class SidedataHashError(RevlogError):
54 class SidedataHashError(RevlogError):
55 def __init__(self, key, expected, got):
55 def __init__(self, key, expected, got):
56 self.sidedatakey = key
56 self.sidedatakey = key
57 self.expecteddigest = expected
57 self.expecteddigest = expected
58 self.actualdigest = got
58 self.actualdigest = got
59
59
60
60
61 class FilteredIndexError(IndexError):
61 class FilteredIndexError(IndexError):
62 __bytes__ = _tobytes
62 __bytes__ = _tobytes
63
63
64
64
65 class LookupError(RevlogError, KeyError):
65 class LookupError(RevlogError, KeyError):
66 def __init__(self, name, index, message):
66 def __init__(self, name, index, message):
67 self.name = name
67 self.name = name
68 self.index = index
68 self.index = index
69 # this can't be called 'message' because at least some installs of
69 # this can't be called 'message' because at least some installs of
70 # Python 2.6+ complain about the 'message' property being deprecated
70 # Python 2.6+ complain about the 'message' property being deprecated
71 self.lookupmessage = message
71 self.lookupmessage = message
72 if isinstance(name, bytes) and len(name) == 20:
72 if isinstance(name, bytes) and len(name) == 20:
73 from .node import short
73 from .node import short
74
74
75 name = short(name)
75 name = short(name)
76 # if name is a binary node, it can be None
76 # if name is a binary node, it can be None
77 RevlogError.__init__(
77 RevlogError.__init__(
78 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
78 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
79 )
79 )
80
80
81 def __bytes__(self):
81 def __bytes__(self):
82 return RevlogError.__bytes__(self)
82 return RevlogError.__bytes__(self)
83
83
84 def __str__(self):
84 def __str__(self):
85 return RevlogError.__str__(self)
85 return RevlogError.__str__(self)
86
86
87
87
88 class AmbiguousPrefixLookupError(LookupError):
88 class AmbiguousPrefixLookupError(LookupError):
89 pass
89 pass
90
90
91
91
92 class FilteredLookupError(LookupError):
92 class FilteredLookupError(LookupError):
93 pass
93 pass
94
94
95
95
96 class ManifestLookupError(LookupError):
96 class ManifestLookupError(LookupError):
97 pass
97 pass
98
98
99
99
100 class CommandError(Exception):
100 class CommandError(Exception):
101 """Exception raised on errors in parsing the command line."""
101 """Exception raised on errors in parsing the command line."""
102
102
103 def __init__(self, command, message):
103 def __init__(self, command, message):
104 self.command = command
104 self.command = command
105 self.message = message
105 self.message = message
106 super(CommandError, self).__init__()
106 super(CommandError, self).__init__()
107
107
108 __bytes__ = _tobytes
108 __bytes__ = _tobytes
109
109
110
110
111 class UnknownCommand(Exception):
111 class UnknownCommand(Exception):
112 """Exception raised if command is not in the command table."""
112 """Exception raised if command is not in the command table."""
113
113
114 def __init__(self, command, all_commands=None):
114 def __init__(self, command, all_commands=None):
115 self.command = command
115 self.command = command
116 self.all_commands = all_commands
116 self.all_commands = all_commands
117 super(UnknownCommand, self).__init__()
117 super(UnknownCommand, self).__init__()
118
118
119 __bytes__ = _tobytes
119 __bytes__ = _tobytes
120
120
121
121
122 class AmbiguousCommand(Exception):
122 class AmbiguousCommand(Exception):
123 """Exception raised if command shortcut matches more than one command."""
123 """Exception raised if command shortcut matches more than one command."""
124
124
125 def __init__(self, prefix, matches):
125 def __init__(self, prefix, matches):
126 self.prefix = prefix
126 self.prefix = prefix
127 self.matches = matches
127 self.matches = matches
128 super(AmbiguousCommand, self).__init__()
128 super(AmbiguousCommand, self).__init__()
129
129
130 __bytes__ = _tobytes
130 __bytes__ = _tobytes
131
131
132
132
133 class InterventionRequired(Hint, Exception):
133 class InterventionRequired(Hint, Exception):
134 """Exception raised when a command requires human intervention."""
134 """Exception raised when a command requires human intervention."""
135
135
136 __bytes__ = _tobytes
136 __bytes__ = _tobytes
137
137
138
138
139 class ConflictResolutionRequired(InterventionRequired):
139 class ConflictResolutionRequired(InterventionRequired):
140 """Exception raised when a continuable command required merge conflict resolution."""
140 """Exception raised when a continuable command required merge conflict resolution."""
141
141
142 def __init__(self, opname):
142 def __init__(self, opname):
143 from .i18n import _
143 from .i18n import _
144
144
145 self.opname = opname
145 self.opname = opname
146 InterventionRequired.__init__(
146 InterventionRequired.__init__(
147 self,
147 self,
148 _(
148 _(
149 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
149 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
150 )
150 )
151 % opname,
151 % opname,
152 )
152 )
153
153
154
154
155 class Abort(Hint, Exception):
155 class Abort(Hint, Exception):
156 """Raised if a command needs to print an error and exit."""
156 """Raised if a command needs to print an error and exit."""
157
157
158 __bytes__ = _tobytes
158 def __init__(self, message, hint=None):
159 self.message = message
160 self.hint = hint
161 # Pass the message into the Exception constructor to help extensions
162 # that look for exc.args[0].
163 Exception.__init__(self, message)
164
165 def __bytes__(self):
166 return self.message
159
167
160 if pycompat.ispy3:
168 if pycompat.ispy3:
161
169
162 def __str__(self):
170 def __str__(self):
163 # the output would be unreadable if the message was translated,
171 # the output would be unreadable if the message was translated,
164 # but do not replace it with encoding.strfromlocal(), which
172 # but do not replace it with encoding.strfromlocal(), which
165 # may raise another exception.
173 # may raise another exception.
166 return pycompat.sysstr(self.__bytes__())
174 return pycompat.sysstr(self.__bytes__())
167
175
168
176
169 class HookLoadError(Abort):
177 class HookLoadError(Abort):
170 """raised when loading a hook fails, aborting an operation
178 """raised when loading a hook fails, aborting an operation
171
179
172 Exists to allow more specialized catching."""
180 Exists to allow more specialized catching."""
173
181
174
182
175 class HookAbort(Abort):
183 class HookAbort(Abort):
176 """raised when a validation hook fails, aborting an operation
184 """raised when a validation hook fails, aborting an operation
177
185
178 Exists to allow more specialized catching."""
186 Exists to allow more specialized catching."""
179
187
180
188
181 class ConfigError(Abort):
189 class ConfigError(Abort):
182 """Exception raised when parsing config files"""
190 """Exception raised when parsing config files"""
183
191
184
192
185 class UpdateAbort(Abort):
193 class UpdateAbort(Abort):
186 """Raised when an update is aborted for destination issue"""
194 """Raised when an update is aborted for destination issue"""
187
195
188
196
189 class MergeDestAbort(Abort):
197 class MergeDestAbort(Abort):
190 """Raised when an update is aborted for destination issues"""
198 """Raised when an update is aborted for destination issues"""
191
199
192
200
193 class NoMergeDestAbort(MergeDestAbort):
201 class NoMergeDestAbort(MergeDestAbort):
194 """Raised when an update is aborted because there is nothing to merge"""
202 """Raised when an update is aborted because there is nothing to merge"""
195
203
196
204
197 class ManyMergeDestAbort(MergeDestAbort):
205 class ManyMergeDestAbort(MergeDestAbort):
198 """Raised when an update is aborted because destination is ambiguous"""
206 """Raised when an update is aborted because destination is ambiguous"""
199
207
200
208
201 class ResponseExpected(Abort):
209 class ResponseExpected(Abort):
202 """Raised when an EOF is received for a prompt"""
210 """Raised when an EOF is received for a prompt"""
203
211
204 def __init__(self):
212 def __init__(self):
205 from .i18n import _
213 from .i18n import _
206
214
207 Abort.__init__(self, _(b'response expected'))
215 Abort.__init__(self, _(b'response expected'))
208
216
209
217
210 class OutOfBandError(Hint, Exception):
218 class OutOfBandError(Hint, Exception):
211 """Exception raised when a remote repo reports failure"""
219 """Exception raised when a remote repo reports failure"""
212
220
213 __bytes__ = _tobytes
221 __bytes__ = _tobytes
214
222
215
223
216 class ParseError(Hint, Exception):
224 class ParseError(Hint, Exception):
217 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
225 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
218
226
219 __bytes__ = _tobytes
227 __bytes__ = _tobytes
220
228
221
229
222 class PatchError(Exception):
230 class PatchError(Exception):
223 __bytes__ = _tobytes
231 __bytes__ = _tobytes
224
232
225
233
226 class UnknownIdentifier(ParseError):
234 class UnknownIdentifier(ParseError):
227 """Exception raised when a {rev,file}set references an unknown identifier"""
235 """Exception raised when a {rev,file}set references an unknown identifier"""
228
236
229 def __init__(self, function, symbols):
237 def __init__(self, function, symbols):
230 from .i18n import _
238 from .i18n import _
231
239
232 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
240 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
233 self.function = function
241 self.function = function
234 self.symbols = symbols
242 self.symbols = symbols
235
243
236
244
237 class RepoError(Hint, Exception):
245 class RepoError(Hint, Exception):
238 __bytes__ = _tobytes
246 __bytes__ = _tobytes
239
247
240
248
241 class RepoLookupError(RepoError):
249 class RepoLookupError(RepoError):
242 pass
250 pass
243
251
244
252
245 class FilteredRepoLookupError(RepoLookupError):
253 class FilteredRepoLookupError(RepoLookupError):
246 pass
254 pass
247
255
248
256
249 class CapabilityError(RepoError):
257 class CapabilityError(RepoError):
250 pass
258 pass
251
259
252
260
253 class RequirementError(RepoError):
261 class RequirementError(RepoError):
254 """Exception raised if .hg/requires has an unknown entry."""
262 """Exception raised if .hg/requires has an unknown entry."""
255
263
256
264
257 class StdioError(IOError):
265 class StdioError(IOError):
258 """Raised if I/O to stdout or stderr fails"""
266 """Raised if I/O to stdout or stderr fails"""
259
267
260 def __init__(self, err):
268 def __init__(self, err):
261 IOError.__init__(self, err.errno, err.strerror)
269 IOError.__init__(self, err.errno, err.strerror)
262
270
263 # no __bytes__() because error message is derived from the standard IOError
271 # no __bytes__() because error message is derived from the standard IOError
264
272
265
273
266 class UnsupportedMergeRecords(Abort):
274 class UnsupportedMergeRecords(Abort):
267 def __init__(self, recordtypes):
275 def __init__(self, recordtypes):
268 from .i18n import _
276 from .i18n import _
269
277
270 self.recordtypes = sorted(recordtypes)
278 self.recordtypes = sorted(recordtypes)
271 s = b' '.join(self.recordtypes)
279 s = b' '.join(self.recordtypes)
272 Abort.__init__(
280 Abort.__init__(
273 self,
281 self,
274 _(b'unsupported merge state records: %s') % s,
282 _(b'unsupported merge state records: %s') % s,
275 hint=_(
283 hint=_(
276 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
284 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
277 b'more information'
285 b'more information'
278 ),
286 ),
279 )
287 )
280
288
281
289
282 class UnknownVersion(Abort):
290 class UnknownVersion(Abort):
283 """generic exception for aborting from an encounter with an unknown version
291 """generic exception for aborting from an encounter with an unknown version
284 """
292 """
285
293
286 def __init__(self, msg, hint=None, version=None):
294 def __init__(self, msg, hint=None, version=None):
287 self.version = version
295 self.version = version
288 super(UnknownVersion, self).__init__(msg, hint=hint)
296 super(UnknownVersion, self).__init__(msg, hint=hint)
289
297
290
298
291 class LockError(IOError):
299 class LockError(IOError):
292 def __init__(self, errno, strerror, filename, desc):
300 def __init__(self, errno, strerror, filename, desc):
293 IOError.__init__(self, errno, strerror, filename)
301 IOError.__init__(self, errno, strerror, filename)
294 self.desc = desc
302 self.desc = desc
295
303
296 # no __bytes__() because error message is derived from the standard IOError
304 # no __bytes__() because error message is derived from the standard IOError
297
305
298
306
299 class LockHeld(LockError):
307 class LockHeld(LockError):
300 def __init__(self, errno, filename, desc, locker):
308 def __init__(self, errno, filename, desc, locker):
301 LockError.__init__(self, errno, b'Lock held', filename, desc)
309 LockError.__init__(self, errno, b'Lock held', filename, desc)
302 self.locker = locker
310 self.locker = locker
303
311
304
312
305 class LockUnavailable(LockError):
313 class LockUnavailable(LockError):
306 pass
314 pass
307
315
308
316
309 # LockError is for errors while acquiring the lock -- this is unrelated
317 # LockError is for errors while acquiring the lock -- this is unrelated
310 class LockInheritanceContractViolation(RuntimeError):
318 class LockInheritanceContractViolation(RuntimeError):
311 __bytes__ = _tobytes
319 __bytes__ = _tobytes
312
320
313
321
314 class ResponseError(Exception):
322 class ResponseError(Exception):
315 """Raised to print an error with part of output and exit."""
323 """Raised to print an error with part of output and exit."""
316
324
317 __bytes__ = _tobytes
325 __bytes__ = _tobytes
318
326
319
327
320 # derived from KeyboardInterrupt to simplify some breakout code
328 # derived from KeyboardInterrupt to simplify some breakout code
321 class SignalInterrupt(KeyboardInterrupt):
329 class SignalInterrupt(KeyboardInterrupt):
322 """Exception raised on SIGTERM and SIGHUP."""
330 """Exception raised on SIGTERM and SIGHUP."""
323
331
324
332
325 class SignatureError(Exception):
333 class SignatureError(Exception):
326 __bytes__ = _tobytes
334 __bytes__ = _tobytes
327
335
328
336
329 class PushRaced(RuntimeError):
337 class PushRaced(RuntimeError):
330 """An exception raised during unbundling that indicate a push race"""
338 """An exception raised during unbundling that indicate a push race"""
331
339
332 __bytes__ = _tobytes
340 __bytes__ = _tobytes
333
341
334
342
335 class ProgrammingError(Hint, RuntimeError):
343 class ProgrammingError(Hint, RuntimeError):
336 """Raised if a mercurial (core or extension) developer made a mistake"""
344 """Raised if a mercurial (core or extension) developer made a mistake"""
337
345
338 def __init__(self, msg, *args, **kwargs):
346 def __init__(self, msg, *args, **kwargs):
339 # On Python 3, turn the message back into a string since this is
347 # On Python 3, turn the message back into a string since this is
340 # an internal-only error that won't be printed except in a
348 # an internal-only error that won't be printed except in a
341 # stack traces.
349 # stack traces.
342 msg = pycompat.sysstr(msg)
350 msg = pycompat.sysstr(msg)
343 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
351 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
344
352
345 __bytes__ = _tobytes
353 __bytes__ = _tobytes
346
354
347
355
348 class WdirUnsupported(Exception):
356 class WdirUnsupported(Exception):
349 """An exception which is raised when 'wdir()' is not supported"""
357 """An exception which is raised when 'wdir()' is not supported"""
350
358
351 __bytes__ = _tobytes
359 __bytes__ = _tobytes
352
360
353
361
354 # bundle2 related errors
362 # bundle2 related errors
355 class BundleValueError(ValueError):
363 class BundleValueError(ValueError):
356 """error raised when bundle2 cannot be processed"""
364 """error raised when bundle2 cannot be processed"""
357
365
358 __bytes__ = _tobytes
366 __bytes__ = _tobytes
359
367
360
368
361 class BundleUnknownFeatureError(BundleValueError):
369 class BundleUnknownFeatureError(BundleValueError):
362 def __init__(self, parttype=None, params=(), values=()):
370 def __init__(self, parttype=None, params=(), values=()):
363 self.parttype = parttype
371 self.parttype = parttype
364 self.params = params
372 self.params = params
365 self.values = values
373 self.values = values
366 if self.parttype is None:
374 if self.parttype is None:
367 msg = b'Stream Parameter'
375 msg = b'Stream Parameter'
368 else:
376 else:
369 msg = parttype
377 msg = parttype
370 entries = self.params
378 entries = self.params
371 if self.params and self.values:
379 if self.params and self.values:
372 assert len(self.params) == len(self.values)
380 assert len(self.params) == len(self.values)
373 entries = []
381 entries = []
374 for idx, par in enumerate(self.params):
382 for idx, par in enumerate(self.params):
375 val = self.values[idx]
383 val = self.values[idx]
376 if val is None:
384 if val is None:
377 entries.append(val)
385 entries.append(val)
378 else:
386 else:
379 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
387 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
380 if entries:
388 if entries:
381 msg = b'%s - %s' % (msg, b', '.join(entries))
389 msg = b'%s - %s' % (msg, b', '.join(entries))
382 ValueError.__init__(self, msg)
390 ValueError.__init__(self, msg)
383
391
384
392
385 class ReadOnlyPartError(RuntimeError):
393 class ReadOnlyPartError(RuntimeError):
386 """error raised when code tries to alter a part being generated"""
394 """error raised when code tries to alter a part being generated"""
387
395
388 __bytes__ = _tobytes
396 __bytes__ = _tobytes
389
397
390
398
391 class PushkeyFailed(Abort):
399 class PushkeyFailed(Abort):
392 """error raised when a pushkey part failed to update a value"""
400 """error raised when a pushkey part failed to update a value"""
393
401
394 def __init__(
402 def __init__(
395 self, partid, namespace=None, key=None, new=None, old=None, ret=None
403 self, partid, namespace=None, key=None, new=None, old=None, ret=None
396 ):
404 ):
397 self.partid = partid
405 self.partid = partid
398 self.namespace = namespace
406 self.namespace = namespace
399 self.key = key
407 self.key = key
400 self.new = new
408 self.new = new
401 self.old = old
409 self.old = old
402 self.ret = ret
410 self.ret = ret
403 # no i18n expected to be processed into a better message
411 # no i18n expected to be processed into a better message
404 Abort.__init__(
412 Abort.__init__(
405 self, b'failed to update value for "%s/%s"' % (namespace, key)
413 self, b'failed to update value for "%s/%s"' % (namespace, key)
406 )
414 )
407
415
408
416
409 class CensoredNodeError(StorageError):
417 class CensoredNodeError(StorageError):
410 """error raised when content verification fails on a censored node
418 """error raised when content verification fails on a censored node
411
419
412 Also contains the tombstone data substituted for the uncensored data.
420 Also contains the tombstone data substituted for the uncensored data.
413 """
421 """
414
422
415 def __init__(self, filename, node, tombstone):
423 def __init__(self, filename, node, tombstone):
416 from .node import short
424 from .node import short
417
425
418 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
426 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
419 self.tombstone = tombstone
427 self.tombstone = tombstone
420
428
421
429
422 class CensoredBaseError(StorageError):
430 class CensoredBaseError(StorageError):
423 """error raised when a delta is rejected because its base is censored
431 """error raised when a delta is rejected because its base is censored
424
432
425 A delta based on a censored revision must be formed as single patch
433 A delta based on a censored revision must be formed as single patch
426 operation which replaces the entire base with new content. This ensures
434 operation which replaces the entire base with new content. This ensures
427 the delta may be applied by clones which have not censored the base.
435 the delta may be applied by clones which have not censored the base.
428 """
436 """
429
437
430
438
431 class InvalidBundleSpecification(Exception):
439 class InvalidBundleSpecification(Exception):
432 """error raised when a bundle specification is invalid.
440 """error raised when a bundle specification is invalid.
433
441
434 This is used for syntax errors as opposed to support errors.
442 This is used for syntax errors as opposed to support errors.
435 """
443 """
436
444
437 __bytes__ = _tobytes
445 __bytes__ = _tobytes
438
446
439
447
440 class UnsupportedBundleSpecification(Exception):
448 class UnsupportedBundleSpecification(Exception):
441 """error raised when a bundle specification is not supported."""
449 """error raised when a bundle specification is not supported."""
442
450
443 __bytes__ = _tobytes
451 __bytes__ = _tobytes
444
452
445
453
446 class CorruptedState(Exception):
454 class CorruptedState(Exception):
447 """error raised when a command is not able to read its state from file"""
455 """error raised when a command is not able to read its state from file"""
448
456
449 __bytes__ = _tobytes
457 __bytes__ = _tobytes
450
458
451
459
452 class PeerTransportError(Abort):
460 class PeerTransportError(Abort):
453 """Transport-level I/O error when communicating with a peer repo."""
461 """Transport-level I/O error when communicating with a peer repo."""
454
462
455
463
456 class InMemoryMergeConflictsError(Exception):
464 class InMemoryMergeConflictsError(Exception):
457 """Exception raised when merge conflicts arose during an in-memory merge."""
465 """Exception raised when merge conflicts arose during an in-memory merge."""
458
466
459 __bytes__ = _tobytes
467 __bytes__ = _tobytes
460
468
461
469
462 class WireprotoCommandError(Exception):
470 class WireprotoCommandError(Exception):
463 """Represents an error during execution of a wire protocol command.
471 """Represents an error during execution of a wire protocol command.
464
472
465 Should only be thrown by wire protocol version 2 commands.
473 Should only be thrown by wire protocol version 2 commands.
466
474
467 The error is a formatter string and an optional iterable of arguments.
475 The error is a formatter string and an optional iterable of arguments.
468 """
476 """
469
477
470 def __init__(self, message, args=None):
478 def __init__(self, message, args=None):
471 self.message = message
479 self.message = message
472 self.messageargs = args
480 self.messageargs = args
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now