##// END OF EJS Templates
manifestcache: protect write with `wlock` instead of `lock`...
marmoute -
r42130:d1218230 default
parent child Browse files
Show More
@@ -1,3429 +1,3429 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import re
17 import re
18 import socket
18 import socket
19 import ssl
19 import ssl
20 import stat
20 import stat
21 import string
21 import string
22 import subprocess
22 import subprocess
23 import sys
23 import sys
24 import time
24 import time
25
25
26 from .i18n import _
26 from .i18n import _
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullhex,
30 nullhex,
31 nullid,
31 nullid,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from . import (
35 from . import (
36 bundle2,
36 bundle2,
37 changegroup,
37 changegroup,
38 cmdutil,
38 cmdutil,
39 color,
39 color,
40 context,
40 context,
41 copies,
41 copies,
42 dagparser,
42 dagparser,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filemerge,
47 filemerge,
48 filesetlang,
48 filesetlang,
49 formatter,
49 formatter,
50 hg,
50 hg,
51 httppeer,
51 httppeer,
52 localrepo,
52 localrepo,
53 lock as lockmod,
53 lock as lockmod,
54 logcmdutil,
54 logcmdutil,
55 merge as mergemod,
55 merge as mergemod,
56 obsolete,
56 obsolete,
57 obsutil,
57 obsutil,
58 phases,
58 phases,
59 policy,
59 policy,
60 pvec,
60 pvec,
61 pycompat,
61 pycompat,
62 registrar,
62 registrar,
63 repair,
63 repair,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 setdiscovery,
68 setdiscovery,
69 simplemerge,
69 simplemerge,
70 sshpeer,
70 sshpeer,
71 sslutil,
71 sslutil,
72 streamclone,
72 streamclone,
73 templater,
73 templater,
74 treediscovery,
74 treediscovery,
75 upgrade,
75 upgrade,
76 url as urlmod,
76 url as urlmod,
77 util,
77 util,
78 vfs as vfsmod,
78 vfs as vfsmod,
79 wireprotoframing,
79 wireprotoframing,
80 wireprotoserver,
80 wireprotoserver,
81 wireprotov2peer,
81 wireprotov2peer,
82 )
82 )
83 from .utils import (
83 from .utils import (
84 cborutil,
84 cborutil,
85 dateutil,
85 dateutil,
86 procutil,
86 procutil,
87 stringutil,
87 stringutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 deltas as deltautil
91 deltas as deltautil
92 )
92 )
93
93
94 release = lockmod.release
94 release = lockmod.release
95
95
96 command = registrar.command()
96 command = registrar.command()
97
97
98 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
98 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
99 def debugancestor(ui, repo, *args):
99 def debugancestor(ui, repo, *args):
100 """find the ancestor revision of two revisions in a given index"""
100 """find the ancestor revision of two revisions in a given index"""
101 if len(args) == 3:
101 if len(args) == 3:
102 index, rev1, rev2 = args
102 index, rev1, rev2 = args
103 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
103 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
104 lookup = r.lookup
104 lookup = r.lookup
105 elif len(args) == 2:
105 elif len(args) == 2:
106 if not repo:
106 if not repo:
107 raise error.Abort(_('there is no Mercurial repository here '
107 raise error.Abort(_('there is no Mercurial repository here '
108 '(.hg not found)'))
108 '(.hg not found)'))
109 rev1, rev2 = args
109 rev1, rev2 = args
110 r = repo.changelog
110 r = repo.changelog
111 lookup = repo.lookup
111 lookup = repo.lookup
112 else:
112 else:
113 raise error.Abort(_('either two or three arguments required'))
113 raise error.Abort(_('either two or three arguments required'))
114 a = r.ancestor(lookup(rev1), lookup(rev2))
114 a = r.ancestor(lookup(rev1), lookup(rev2))
115 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
115 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
116
116
117 @command('debugapplystreamclonebundle', [], 'FILE')
117 @command('debugapplystreamclonebundle', [], 'FILE')
118 def debugapplystreamclonebundle(ui, repo, fname):
118 def debugapplystreamclonebundle(ui, repo, fname):
119 """apply a stream clone bundle file"""
119 """apply a stream clone bundle file"""
120 f = hg.openpath(ui, fname)
120 f = hg.openpath(ui, fname)
121 gen = exchange.readbundle(ui, f, fname)
121 gen = exchange.readbundle(ui, f, fname)
122 gen.apply(repo)
122 gen.apply(repo)
123
123
124 @command('debugbuilddag',
124 @command('debugbuilddag',
125 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
125 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
126 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
126 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
127 ('n', 'new-file', None, _('add new file at each rev'))],
127 ('n', 'new-file', None, _('add new file at each rev'))],
128 _('[OPTION]... [TEXT]'))
128 _('[OPTION]... [TEXT]'))
129 def debugbuilddag(ui, repo, text=None,
129 def debugbuilddag(ui, repo, text=None,
130 mergeable_file=False,
130 mergeable_file=False,
131 overwritten_file=False,
131 overwritten_file=False,
132 new_file=False):
132 new_file=False):
133 """builds a repo with a given DAG from scratch in the current empty repo
133 """builds a repo with a given DAG from scratch in the current empty repo
134
134
135 The description of the DAG is read from stdin if not given on the
135 The description of the DAG is read from stdin if not given on the
136 command line.
136 command line.
137
137
138 Elements:
138 Elements:
139
139
140 - "+n" is a linear run of n nodes based on the current default parent
140 - "+n" is a linear run of n nodes based on the current default parent
141 - "." is a single node based on the current default parent
141 - "." is a single node based on the current default parent
142 - "$" resets the default parent to null (implied at the start);
142 - "$" resets the default parent to null (implied at the start);
143 otherwise the default parent is always the last node created
143 otherwise the default parent is always the last node created
144 - "<p" sets the default parent to the backref p
144 - "<p" sets the default parent to the backref p
145 - "*p" is a fork at parent p, which is a backref
145 - "*p" is a fork at parent p, which is a backref
146 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
146 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
147 - "/p2" is a merge of the preceding node and p2
147 - "/p2" is a merge of the preceding node and p2
148 - ":tag" defines a local tag for the preceding node
148 - ":tag" defines a local tag for the preceding node
149 - "@branch" sets the named branch for subsequent nodes
149 - "@branch" sets the named branch for subsequent nodes
150 - "#...\\n" is a comment up to the end of the line
150 - "#...\\n" is a comment up to the end of the line
151
151
152 Whitespace between the above elements is ignored.
152 Whitespace between the above elements is ignored.
153
153
154 A backref is either
154 A backref is either
155
155
156 - a number n, which references the node curr-n, where curr is the current
156 - a number n, which references the node curr-n, where curr is the current
157 node, or
157 node, or
158 - the name of a local tag you placed earlier using ":tag", or
158 - the name of a local tag you placed earlier using ":tag", or
159 - empty to denote the default parent.
159 - empty to denote the default parent.
160
160
161 All string valued-elements are either strictly alphanumeric, or must
161 All string valued-elements are either strictly alphanumeric, or must
162 be enclosed in double quotes ("..."), with "\\" as escape character.
162 be enclosed in double quotes ("..."), with "\\" as escape character.
163 """
163 """
164
164
165 if text is None:
165 if text is None:
166 ui.status(_("reading DAG from stdin\n"))
166 ui.status(_("reading DAG from stdin\n"))
167 text = ui.fin.read()
167 text = ui.fin.read()
168
168
169 cl = repo.changelog
169 cl = repo.changelog
170 if len(cl) > 0:
170 if len(cl) > 0:
171 raise error.Abort(_('repository is not empty'))
171 raise error.Abort(_('repository is not empty'))
172
172
173 # determine number of revs in DAG
173 # determine number of revs in DAG
174 total = 0
174 total = 0
175 for type, data in dagparser.parsedag(text):
175 for type, data in dagparser.parsedag(text):
176 if type == 'n':
176 if type == 'n':
177 total += 1
177 total += 1
178
178
179 if mergeable_file:
179 if mergeable_file:
180 linesperrev = 2
180 linesperrev = 2
181 # make a file with k lines per rev
181 # make a file with k lines per rev
182 initialmergedlines = ['%d' % i
182 initialmergedlines = ['%d' % i
183 for i in pycompat.xrange(0, total * linesperrev)]
183 for i in pycompat.xrange(0, total * linesperrev)]
184 initialmergedlines.append("")
184 initialmergedlines.append("")
185
185
186 tags = []
186 tags = []
187 progress = ui.makeprogress(_('building'), unit=_('revisions'),
187 progress = ui.makeprogress(_('building'), unit=_('revisions'),
188 total=total)
188 total=total)
189 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
189 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
190 at = -1
190 at = -1
191 atbranch = 'default'
191 atbranch = 'default'
192 nodeids = []
192 nodeids = []
193 id = 0
193 id = 0
194 progress.update(id)
194 progress.update(id)
195 for type, data in dagparser.parsedag(text):
195 for type, data in dagparser.parsedag(text):
196 if type == 'n':
196 if type == 'n':
197 ui.note(('node %s\n' % pycompat.bytestr(data)))
197 ui.note(('node %s\n' % pycompat.bytestr(data)))
198 id, ps = data
198 id, ps = data
199
199
200 files = []
200 files = []
201 filecontent = {}
201 filecontent = {}
202
202
203 p2 = None
203 p2 = None
204 if mergeable_file:
204 if mergeable_file:
205 fn = "mf"
205 fn = "mf"
206 p1 = repo[ps[0]]
206 p1 = repo[ps[0]]
207 if len(ps) > 1:
207 if len(ps) > 1:
208 p2 = repo[ps[1]]
208 p2 = repo[ps[1]]
209 pa = p1.ancestor(p2)
209 pa = p1.ancestor(p2)
210 base, local, other = [x[fn].data() for x in (pa, p1,
210 base, local, other = [x[fn].data() for x in (pa, p1,
211 p2)]
211 p2)]
212 m3 = simplemerge.Merge3Text(base, local, other)
212 m3 = simplemerge.Merge3Text(base, local, other)
213 ml = [l.strip() for l in m3.merge_lines()]
213 ml = [l.strip() for l in m3.merge_lines()]
214 ml.append("")
214 ml.append("")
215 elif at > 0:
215 elif at > 0:
216 ml = p1[fn].data().split("\n")
216 ml = p1[fn].data().split("\n")
217 else:
217 else:
218 ml = initialmergedlines
218 ml = initialmergedlines
219 ml[id * linesperrev] += " r%i" % id
219 ml[id * linesperrev] += " r%i" % id
220 mergedtext = "\n".join(ml)
220 mergedtext = "\n".join(ml)
221 files.append(fn)
221 files.append(fn)
222 filecontent[fn] = mergedtext
222 filecontent[fn] = mergedtext
223
223
224 if overwritten_file:
224 if overwritten_file:
225 fn = "of"
225 fn = "of"
226 files.append(fn)
226 files.append(fn)
227 filecontent[fn] = "r%i\n" % id
227 filecontent[fn] = "r%i\n" % id
228
228
229 if new_file:
229 if new_file:
230 fn = "nf%i" % id
230 fn = "nf%i" % id
231 files.append(fn)
231 files.append(fn)
232 filecontent[fn] = "r%i\n" % id
232 filecontent[fn] = "r%i\n" % id
233 if len(ps) > 1:
233 if len(ps) > 1:
234 if not p2:
234 if not p2:
235 p2 = repo[ps[1]]
235 p2 = repo[ps[1]]
236 for fn in p2:
236 for fn in p2:
237 if fn.startswith("nf"):
237 if fn.startswith("nf"):
238 files.append(fn)
238 files.append(fn)
239 filecontent[fn] = p2[fn].data()
239 filecontent[fn] = p2[fn].data()
240
240
241 def fctxfn(repo, cx, path):
241 def fctxfn(repo, cx, path):
242 if path in filecontent:
242 if path in filecontent:
243 return context.memfilectx(repo, cx, path,
243 return context.memfilectx(repo, cx, path,
244 filecontent[path])
244 filecontent[path])
245 return None
245 return None
246
246
247 if len(ps) == 0 or ps[0] < 0:
247 if len(ps) == 0 or ps[0] < 0:
248 pars = [None, None]
248 pars = [None, None]
249 elif len(ps) == 1:
249 elif len(ps) == 1:
250 pars = [nodeids[ps[0]], None]
250 pars = [nodeids[ps[0]], None]
251 else:
251 else:
252 pars = [nodeids[p] for p in ps]
252 pars = [nodeids[p] for p in ps]
253 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
253 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
254 date=(id, 0),
254 date=(id, 0),
255 user="debugbuilddag",
255 user="debugbuilddag",
256 extra={'branch': atbranch})
256 extra={'branch': atbranch})
257 nodeid = repo.commitctx(cx)
257 nodeid = repo.commitctx(cx)
258 nodeids.append(nodeid)
258 nodeids.append(nodeid)
259 at = id
259 at = id
260 elif type == 'l':
260 elif type == 'l':
261 id, name = data
261 id, name = data
262 ui.note(('tag %s\n' % name))
262 ui.note(('tag %s\n' % name))
263 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
263 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
264 elif type == 'a':
264 elif type == 'a':
265 ui.note(('branch %s\n' % data))
265 ui.note(('branch %s\n' % data))
266 atbranch = data
266 atbranch = data
267 progress.update(id)
267 progress.update(id)
268
268
269 if tags:
269 if tags:
270 repo.vfs.write("localtags", "".join(tags))
270 repo.vfs.write("localtags", "".join(tags))
271
271
272 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
272 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
273 indent_string = ' ' * indent
273 indent_string = ' ' * indent
274 if all:
274 if all:
275 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
275 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
276 % indent_string)
276 % indent_string)
277
277
278 def showchunks(named):
278 def showchunks(named):
279 ui.write("\n%s%s\n" % (indent_string, named))
279 ui.write("\n%s%s\n" % (indent_string, named))
280 for deltadata in gen.deltaiter():
280 for deltadata in gen.deltaiter():
281 node, p1, p2, cs, deltabase, delta, flags = deltadata
281 node, p1, p2, cs, deltabase, delta, flags = deltadata
282 ui.write("%s%s %s %s %s %s %d\n" %
282 ui.write("%s%s %s %s %s %s %d\n" %
283 (indent_string, hex(node), hex(p1), hex(p2),
283 (indent_string, hex(node), hex(p1), hex(p2),
284 hex(cs), hex(deltabase), len(delta)))
284 hex(cs), hex(deltabase), len(delta)))
285
285
286 chunkdata = gen.changelogheader()
286 chunkdata = gen.changelogheader()
287 showchunks("changelog")
287 showchunks("changelog")
288 chunkdata = gen.manifestheader()
288 chunkdata = gen.manifestheader()
289 showchunks("manifest")
289 showchunks("manifest")
290 for chunkdata in iter(gen.filelogheader, {}):
290 for chunkdata in iter(gen.filelogheader, {}):
291 fname = chunkdata['filename']
291 fname = chunkdata['filename']
292 showchunks(fname)
292 showchunks(fname)
293 else:
293 else:
294 if isinstance(gen, bundle2.unbundle20):
294 if isinstance(gen, bundle2.unbundle20):
295 raise error.Abort(_('use debugbundle2 for this file'))
295 raise error.Abort(_('use debugbundle2 for this file'))
296 chunkdata = gen.changelogheader()
296 chunkdata = gen.changelogheader()
297 for deltadata in gen.deltaiter():
297 for deltadata in gen.deltaiter():
298 node, p1, p2, cs, deltabase, delta, flags = deltadata
298 node, p1, p2, cs, deltabase, delta, flags = deltadata
299 ui.write("%s%s\n" % (indent_string, hex(node)))
299 ui.write("%s%s\n" % (indent_string, hex(node)))
300
300
301 def _debugobsmarkers(ui, part, indent=0, **opts):
301 def _debugobsmarkers(ui, part, indent=0, **opts):
302 """display version and markers contained in 'data'"""
302 """display version and markers contained in 'data'"""
303 opts = pycompat.byteskwargs(opts)
303 opts = pycompat.byteskwargs(opts)
304 data = part.read()
304 data = part.read()
305 indent_string = ' ' * indent
305 indent_string = ' ' * indent
306 try:
306 try:
307 version, markers = obsolete._readmarkers(data)
307 version, markers = obsolete._readmarkers(data)
308 except error.UnknownVersion as exc:
308 except error.UnknownVersion as exc:
309 msg = "%sunsupported version: %s (%d bytes)\n"
309 msg = "%sunsupported version: %s (%d bytes)\n"
310 msg %= indent_string, exc.version, len(data)
310 msg %= indent_string, exc.version, len(data)
311 ui.write(msg)
311 ui.write(msg)
312 else:
312 else:
313 msg = "%sversion: %d (%d bytes)\n"
313 msg = "%sversion: %d (%d bytes)\n"
314 msg %= indent_string, version, len(data)
314 msg %= indent_string, version, len(data)
315 ui.write(msg)
315 ui.write(msg)
316 fm = ui.formatter('debugobsolete', opts)
316 fm = ui.formatter('debugobsolete', opts)
317 for rawmarker in sorted(markers):
317 for rawmarker in sorted(markers):
318 m = obsutil.marker(None, rawmarker)
318 m = obsutil.marker(None, rawmarker)
319 fm.startitem()
319 fm.startitem()
320 fm.plain(indent_string)
320 fm.plain(indent_string)
321 cmdutil.showmarker(fm, m)
321 cmdutil.showmarker(fm, m)
322 fm.end()
322 fm.end()
323
323
324 def _debugphaseheads(ui, data, indent=0):
324 def _debugphaseheads(ui, data, indent=0):
325 """display version and markers contained in 'data'"""
325 """display version and markers contained in 'data'"""
326 indent_string = ' ' * indent
326 indent_string = ' ' * indent
327 headsbyphase = phases.binarydecode(data)
327 headsbyphase = phases.binarydecode(data)
328 for phase in phases.allphases:
328 for phase in phases.allphases:
329 for head in headsbyphase[phase]:
329 for head in headsbyphase[phase]:
330 ui.write(indent_string)
330 ui.write(indent_string)
331 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
331 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
332
332
333 def _quasirepr(thing):
333 def _quasirepr(thing):
334 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
334 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
335 return '{%s}' % (
335 return '{%s}' % (
336 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
336 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
337 return pycompat.bytestr(repr(thing))
337 return pycompat.bytestr(repr(thing))
338
338
339 def _debugbundle2(ui, gen, all=None, **opts):
339 def _debugbundle2(ui, gen, all=None, **opts):
340 """lists the contents of a bundle2"""
340 """lists the contents of a bundle2"""
341 if not isinstance(gen, bundle2.unbundle20):
341 if not isinstance(gen, bundle2.unbundle20):
342 raise error.Abort(_('not a bundle2 file'))
342 raise error.Abort(_('not a bundle2 file'))
343 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
343 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
344 parttypes = opts.get(r'part_type', [])
344 parttypes = opts.get(r'part_type', [])
345 for part in gen.iterparts():
345 for part in gen.iterparts():
346 if parttypes and part.type not in parttypes:
346 if parttypes and part.type not in parttypes:
347 continue
347 continue
348 msg = '%s -- %s (mandatory: %r)\n'
348 msg = '%s -- %s (mandatory: %r)\n'
349 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
349 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
350 if part.type == 'changegroup':
350 if part.type == 'changegroup':
351 version = part.params.get('version', '01')
351 version = part.params.get('version', '01')
352 cg = changegroup.getunbundler(version, part, 'UN')
352 cg = changegroup.getunbundler(version, part, 'UN')
353 if not ui.quiet:
353 if not ui.quiet:
354 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
354 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
355 if part.type == 'obsmarkers':
355 if part.type == 'obsmarkers':
356 if not ui.quiet:
356 if not ui.quiet:
357 _debugobsmarkers(ui, part, indent=4, **opts)
357 _debugobsmarkers(ui, part, indent=4, **opts)
358 if part.type == 'phase-heads':
358 if part.type == 'phase-heads':
359 if not ui.quiet:
359 if not ui.quiet:
360 _debugphaseheads(ui, part, indent=4)
360 _debugphaseheads(ui, part, indent=4)
361
361
362 @command('debugbundle',
362 @command('debugbundle',
363 [('a', 'all', None, _('show all details')),
363 [('a', 'all', None, _('show all details')),
364 ('', 'part-type', [], _('show only the named part type')),
364 ('', 'part-type', [], _('show only the named part type')),
365 ('', 'spec', None, _('print the bundlespec of the bundle'))],
365 ('', 'spec', None, _('print the bundlespec of the bundle'))],
366 _('FILE'),
366 _('FILE'),
367 norepo=True)
367 norepo=True)
368 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
368 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
369 """lists the contents of a bundle"""
369 """lists the contents of a bundle"""
370 with hg.openpath(ui, bundlepath) as f:
370 with hg.openpath(ui, bundlepath) as f:
371 if spec:
371 if spec:
372 spec = exchange.getbundlespec(ui, f)
372 spec = exchange.getbundlespec(ui, f)
373 ui.write('%s\n' % spec)
373 ui.write('%s\n' % spec)
374 return
374 return
375
375
376 gen = exchange.readbundle(ui, f, bundlepath)
376 gen = exchange.readbundle(ui, f, bundlepath)
377 if isinstance(gen, bundle2.unbundle20):
377 if isinstance(gen, bundle2.unbundle20):
378 return _debugbundle2(ui, gen, all=all, **opts)
378 return _debugbundle2(ui, gen, all=all, **opts)
379 _debugchangegroup(ui, gen, all=all, **opts)
379 _debugchangegroup(ui, gen, all=all, **opts)
380
380
381 @command('debugcapabilities',
381 @command('debugcapabilities',
382 [], _('PATH'),
382 [], _('PATH'),
383 norepo=True)
383 norepo=True)
384 def debugcapabilities(ui, path, **opts):
384 def debugcapabilities(ui, path, **opts):
385 """lists the capabilities of a remote peer"""
385 """lists the capabilities of a remote peer"""
386 opts = pycompat.byteskwargs(opts)
386 opts = pycompat.byteskwargs(opts)
387 peer = hg.peer(ui, opts, path)
387 peer = hg.peer(ui, opts, path)
388 caps = peer.capabilities()
388 caps = peer.capabilities()
389 ui.write(('Main capabilities:\n'))
389 ui.write(('Main capabilities:\n'))
390 for c in sorted(caps):
390 for c in sorted(caps):
391 ui.write((' %s\n') % c)
391 ui.write((' %s\n') % c)
392 b2caps = bundle2.bundle2caps(peer)
392 b2caps = bundle2.bundle2caps(peer)
393 if b2caps:
393 if b2caps:
394 ui.write(('Bundle2 capabilities:\n'))
394 ui.write(('Bundle2 capabilities:\n'))
395 for key, values in sorted(b2caps.iteritems()):
395 for key, values in sorted(b2caps.iteritems()):
396 ui.write((' %s\n') % key)
396 ui.write((' %s\n') % key)
397 for v in values:
397 for v in values:
398 ui.write((' %s\n') % v)
398 ui.write((' %s\n') % v)
399
399
400 @command('debugcheckstate', [], '')
400 @command('debugcheckstate', [], '')
401 def debugcheckstate(ui, repo):
401 def debugcheckstate(ui, repo):
402 """validate the correctness of the current dirstate"""
402 """validate the correctness of the current dirstate"""
403 parent1, parent2 = repo.dirstate.parents()
403 parent1, parent2 = repo.dirstate.parents()
404 m1 = repo[parent1].manifest()
404 m1 = repo[parent1].manifest()
405 m2 = repo[parent2].manifest()
405 m2 = repo[parent2].manifest()
406 errors = 0
406 errors = 0
407 for f in repo.dirstate:
407 for f in repo.dirstate:
408 state = repo.dirstate[f]
408 state = repo.dirstate[f]
409 if state in "nr" and f not in m1:
409 if state in "nr" and f not in m1:
410 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
410 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
411 errors += 1
411 errors += 1
412 if state in "a" and f in m1:
412 if state in "a" and f in m1:
413 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
413 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
414 errors += 1
414 errors += 1
415 if state in "m" and f not in m1 and f not in m2:
415 if state in "m" and f not in m1 and f not in m2:
416 ui.warn(_("%s in state %s, but not in either manifest\n") %
416 ui.warn(_("%s in state %s, but not in either manifest\n") %
417 (f, state))
417 (f, state))
418 errors += 1
418 errors += 1
419 for f in m1:
419 for f in m1:
420 state = repo.dirstate[f]
420 state = repo.dirstate[f]
421 if state not in "nrm":
421 if state not in "nrm":
422 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
422 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
423 errors += 1
423 errors += 1
424 if errors:
424 if errors:
425 error = _(".hg/dirstate inconsistent with current parent's manifest")
425 error = _(".hg/dirstate inconsistent with current parent's manifest")
426 raise error.Abort(error)
426 raise error.Abort(error)
427
427
428 @command('debugcolor',
428 @command('debugcolor',
429 [('', 'style', None, _('show all configured styles'))],
429 [('', 'style', None, _('show all configured styles'))],
430 'hg debugcolor')
430 'hg debugcolor')
431 def debugcolor(ui, repo, **opts):
431 def debugcolor(ui, repo, **opts):
432 """show available color, effects or style"""
432 """show available color, effects or style"""
433 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
433 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
434 if opts.get(r'style'):
434 if opts.get(r'style'):
435 return _debugdisplaystyle(ui)
435 return _debugdisplaystyle(ui)
436 else:
436 else:
437 return _debugdisplaycolor(ui)
437 return _debugdisplaycolor(ui)
438
438
439 def _debugdisplaycolor(ui):
439 def _debugdisplaycolor(ui):
440 ui = ui.copy()
440 ui = ui.copy()
441 ui._styles.clear()
441 ui._styles.clear()
442 for effect in color._activeeffects(ui).keys():
442 for effect in color._activeeffects(ui).keys():
443 ui._styles[effect] = effect
443 ui._styles[effect] = effect
444 if ui._terminfoparams:
444 if ui._terminfoparams:
445 for k, v in ui.configitems('color'):
445 for k, v in ui.configitems('color'):
446 if k.startswith('color.'):
446 if k.startswith('color.'):
447 ui._styles[k] = k[6:]
447 ui._styles[k] = k[6:]
448 elif k.startswith('terminfo.'):
448 elif k.startswith('terminfo.'):
449 ui._styles[k] = k[9:]
449 ui._styles[k] = k[9:]
450 ui.write(_('available colors:\n'))
450 ui.write(_('available colors:\n'))
451 # sort label with a '_' after the other to group '_background' entry.
451 # sort label with a '_' after the other to group '_background' entry.
452 items = sorted(ui._styles.items(),
452 items = sorted(ui._styles.items(),
453 key=lambda i: ('_' in i[0], i[0], i[1]))
453 key=lambda i: ('_' in i[0], i[0], i[1]))
454 for colorname, label in items:
454 for colorname, label in items:
455 ui.write(('%s\n') % colorname, label=label)
455 ui.write(('%s\n') % colorname, label=label)
456
456
457 def _debugdisplaystyle(ui):
457 def _debugdisplaystyle(ui):
458 ui.write(_('available style:\n'))
458 ui.write(_('available style:\n'))
459 if not ui._styles:
459 if not ui._styles:
460 return
460 return
461 width = max(len(s) for s in ui._styles)
461 width = max(len(s) for s in ui._styles)
462 for label, effects in sorted(ui._styles.items()):
462 for label, effects in sorted(ui._styles.items()):
463 ui.write('%s' % label, label=label)
463 ui.write('%s' % label, label=label)
464 if effects:
464 if effects:
465 # 50
465 # 50
466 ui.write(': ')
466 ui.write(': ')
467 ui.write(' ' * (max(0, width - len(label))))
467 ui.write(' ' * (max(0, width - len(label))))
468 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
468 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
469 ui.write('\n')
469 ui.write('\n')
470
470
471 @command('debugcreatestreamclonebundle', [], 'FILE')
471 @command('debugcreatestreamclonebundle', [], 'FILE')
472 def debugcreatestreamclonebundle(ui, repo, fname):
472 def debugcreatestreamclonebundle(ui, repo, fname):
473 """create a stream clone bundle file
473 """create a stream clone bundle file
474
474
475 Stream bundles are special bundles that are essentially archives of
475 Stream bundles are special bundles that are essentially archives of
476 revlog files. They are commonly used for cloning very quickly.
476 revlog files. They are commonly used for cloning very quickly.
477 """
477 """
478 # TODO we may want to turn this into an abort when this functionality
478 # TODO we may want to turn this into an abort when this functionality
479 # is moved into `hg bundle`.
479 # is moved into `hg bundle`.
480 if phases.hassecret(repo):
480 if phases.hassecret(repo):
481 ui.warn(_('(warning: stream clone bundle will contain secret '
481 ui.warn(_('(warning: stream clone bundle will contain secret '
482 'revisions)\n'))
482 'revisions)\n'))
483
483
484 requirements, gen = streamclone.generatebundlev1(repo)
484 requirements, gen = streamclone.generatebundlev1(repo)
485 changegroup.writechunks(ui, gen, fname)
485 changegroup.writechunks(ui, gen, fname)
486
486
487 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
487 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
488
488
489 @command('debugdag',
489 @command('debugdag',
490 [('t', 'tags', None, _('use tags as labels')),
490 [('t', 'tags', None, _('use tags as labels')),
491 ('b', 'branches', None, _('annotate with branch names')),
491 ('b', 'branches', None, _('annotate with branch names')),
492 ('', 'dots', None, _('use dots for runs')),
492 ('', 'dots', None, _('use dots for runs')),
493 ('s', 'spaces', None, _('separate elements by spaces'))],
493 ('s', 'spaces', None, _('separate elements by spaces'))],
494 _('[OPTION]... [FILE [REV]...]'),
494 _('[OPTION]... [FILE [REV]...]'),
495 optionalrepo=True)
495 optionalrepo=True)
496 def debugdag(ui, repo, file_=None, *revs, **opts):
496 def debugdag(ui, repo, file_=None, *revs, **opts):
497 """format the changelog or an index DAG as a concise textual description
497 """format the changelog or an index DAG as a concise textual description
498
498
499 If you pass a revlog index, the revlog's DAG is emitted. If you list
499 If you pass a revlog index, the revlog's DAG is emitted. If you list
500 revision numbers, they get labeled in the output as rN.
500 revision numbers, they get labeled in the output as rN.
501
501
502 Otherwise, the changelog DAG of the current repo is emitted.
502 Otherwise, the changelog DAG of the current repo is emitted.
503 """
503 """
504 spaces = opts.get(r'spaces')
504 spaces = opts.get(r'spaces')
505 dots = opts.get(r'dots')
505 dots = opts.get(r'dots')
506 if file_:
506 if file_:
507 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
507 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
508 file_)
508 file_)
509 revs = set((int(r) for r in revs))
509 revs = set((int(r) for r in revs))
510 def events():
510 def events():
511 for r in rlog:
511 for r in rlog:
512 yield 'n', (r, list(p for p in rlog.parentrevs(r)
512 yield 'n', (r, list(p for p in rlog.parentrevs(r)
513 if p != -1))
513 if p != -1))
514 if r in revs:
514 if r in revs:
515 yield 'l', (r, "r%i" % r)
515 yield 'l', (r, "r%i" % r)
516 elif repo:
516 elif repo:
517 cl = repo.changelog
517 cl = repo.changelog
518 tags = opts.get(r'tags')
518 tags = opts.get(r'tags')
519 branches = opts.get(r'branches')
519 branches = opts.get(r'branches')
520 if tags:
520 if tags:
521 labels = {}
521 labels = {}
522 for l, n in repo.tags().items():
522 for l, n in repo.tags().items():
523 labels.setdefault(cl.rev(n), []).append(l)
523 labels.setdefault(cl.rev(n), []).append(l)
524 def events():
524 def events():
525 b = "default"
525 b = "default"
526 for r in cl:
526 for r in cl:
527 if branches:
527 if branches:
528 newb = cl.read(cl.node(r))[5]['branch']
528 newb = cl.read(cl.node(r))[5]['branch']
529 if newb != b:
529 if newb != b:
530 yield 'a', newb
530 yield 'a', newb
531 b = newb
531 b = newb
532 yield 'n', (r, list(p for p in cl.parentrevs(r)
532 yield 'n', (r, list(p for p in cl.parentrevs(r)
533 if p != -1))
533 if p != -1))
534 if tags:
534 if tags:
535 ls = labels.get(r)
535 ls = labels.get(r)
536 if ls:
536 if ls:
537 for l in ls:
537 for l in ls:
538 yield 'l', (r, l)
538 yield 'l', (r, l)
539 else:
539 else:
540 raise error.Abort(_('need repo for changelog dag'))
540 raise error.Abort(_('need repo for changelog dag'))
541
541
542 for line in dagparser.dagtextlines(events(),
542 for line in dagparser.dagtextlines(events(),
543 addspaces=spaces,
543 addspaces=spaces,
544 wraplabels=True,
544 wraplabels=True,
545 wrapannotations=True,
545 wrapannotations=True,
546 wrapnonlinear=dots,
546 wrapnonlinear=dots,
547 usedots=dots,
547 usedots=dots,
548 maxlinewidth=70):
548 maxlinewidth=70):
549 ui.write(line)
549 ui.write(line)
550 ui.write("\n")
550 ui.write("\n")
551
551
552 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
552 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
553 def debugdata(ui, repo, file_, rev=None, **opts):
553 def debugdata(ui, repo, file_, rev=None, **opts):
554 """dump the contents of a data file revision"""
554 """dump the contents of a data file revision"""
555 opts = pycompat.byteskwargs(opts)
555 opts = pycompat.byteskwargs(opts)
556 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
556 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
557 if rev is not None:
557 if rev is not None:
558 raise error.CommandError('debugdata', _('invalid arguments'))
558 raise error.CommandError('debugdata', _('invalid arguments'))
559 file_, rev = None, file_
559 file_, rev = None, file_
560 elif rev is None:
560 elif rev is None:
561 raise error.CommandError('debugdata', _('invalid arguments'))
561 raise error.CommandError('debugdata', _('invalid arguments'))
562 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
562 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
563 try:
563 try:
564 ui.write(r.revision(r.lookup(rev), raw=True))
564 ui.write(r.revision(r.lookup(rev), raw=True))
565 except KeyError:
565 except KeyError:
566 raise error.Abort(_('invalid revision identifier %s') % rev)
566 raise error.Abort(_('invalid revision identifier %s') % rev)
567
567
568 @command('debugdate',
568 @command('debugdate',
569 [('e', 'extended', None, _('try extended date formats'))],
569 [('e', 'extended', None, _('try extended date formats'))],
570 _('[-e] DATE [RANGE]'),
570 _('[-e] DATE [RANGE]'),
571 norepo=True, optionalrepo=True)
571 norepo=True, optionalrepo=True)
572 def debugdate(ui, date, range=None, **opts):
572 def debugdate(ui, date, range=None, **opts):
573 """parse and display a date"""
573 """parse and display a date"""
574 if opts[r"extended"]:
574 if opts[r"extended"]:
575 d = dateutil.parsedate(date, util.extendeddateformats)
575 d = dateutil.parsedate(date, util.extendeddateformats)
576 else:
576 else:
577 d = dateutil.parsedate(date)
577 d = dateutil.parsedate(date)
578 ui.write(("internal: %d %d\n") % d)
578 ui.write(("internal: %d %d\n") % d)
579 ui.write(("standard: %s\n") % dateutil.datestr(d))
579 ui.write(("standard: %s\n") % dateutil.datestr(d))
580 if range:
580 if range:
581 m = dateutil.matchdate(range)
581 m = dateutil.matchdate(range)
582 ui.write(("match: %s\n") % m(d[0]))
582 ui.write(("match: %s\n") % m(d[0]))
583
583
584 @command('debugdeltachain',
584 @command('debugdeltachain',
585 cmdutil.debugrevlogopts + cmdutil.formatteropts,
585 cmdutil.debugrevlogopts + cmdutil.formatteropts,
586 _('-c|-m|FILE'),
586 _('-c|-m|FILE'),
587 optionalrepo=True)
587 optionalrepo=True)
588 def debugdeltachain(ui, repo, file_=None, **opts):
588 def debugdeltachain(ui, repo, file_=None, **opts):
589 """dump information about delta chains in a revlog
589 """dump information about delta chains in a revlog
590
590
591 Output can be templatized. Available template keywords are:
591 Output can be templatized. Available template keywords are:
592
592
593 :``rev``: revision number
593 :``rev``: revision number
594 :``chainid``: delta chain identifier (numbered by unique base)
594 :``chainid``: delta chain identifier (numbered by unique base)
595 :``chainlen``: delta chain length to this revision
595 :``chainlen``: delta chain length to this revision
596 :``prevrev``: previous revision in delta chain
596 :``prevrev``: previous revision in delta chain
597 :``deltatype``: role of delta / how it was computed
597 :``deltatype``: role of delta / how it was computed
598 :``compsize``: compressed size of revision
598 :``compsize``: compressed size of revision
599 :``uncompsize``: uncompressed size of revision
599 :``uncompsize``: uncompressed size of revision
600 :``chainsize``: total size of compressed revisions in chain
600 :``chainsize``: total size of compressed revisions in chain
601 :``chainratio``: total chain size divided by uncompressed revision size
601 :``chainratio``: total chain size divided by uncompressed revision size
602 (new delta chains typically start at ratio 2.00)
602 (new delta chains typically start at ratio 2.00)
603 :``lindist``: linear distance from base revision in delta chain to end
603 :``lindist``: linear distance from base revision in delta chain to end
604 of this revision
604 of this revision
605 :``extradist``: total size of revisions not part of this delta chain from
605 :``extradist``: total size of revisions not part of this delta chain from
606 base of delta chain to end of this revision; a measurement
606 base of delta chain to end of this revision; a measurement
607 of how much extra data we need to read/seek across to read
607 of how much extra data we need to read/seek across to read
608 the delta chain for this revision
608 the delta chain for this revision
609 :``extraratio``: extradist divided by chainsize; another representation of
609 :``extraratio``: extradist divided by chainsize; another representation of
610 how much unrelated data is needed to load this delta chain
610 how much unrelated data is needed to load this delta chain
611
611
612 If the repository is configured to use the sparse read, additional keywords
612 If the repository is configured to use the sparse read, additional keywords
613 are available:
613 are available:
614
614
615 :``readsize``: total size of data read from the disk for a revision
615 :``readsize``: total size of data read from the disk for a revision
616 (sum of the sizes of all the blocks)
616 (sum of the sizes of all the blocks)
617 :``largestblock``: size of the largest block of data read from the disk
617 :``largestblock``: size of the largest block of data read from the disk
618 :``readdensity``: density of useful bytes in the data read from the disk
618 :``readdensity``: density of useful bytes in the data read from the disk
619 :``srchunks``: in how many data hunks the whole revision would be read
619 :``srchunks``: in how many data hunks the whole revision would be read
620
620
621 The sparse read can be enabled with experimental.sparse-read = True
621 The sparse read can be enabled with experimental.sparse-read = True
622 """
622 """
623 opts = pycompat.byteskwargs(opts)
623 opts = pycompat.byteskwargs(opts)
624 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
624 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
625 index = r.index
625 index = r.index
626 start = r.start
626 start = r.start
627 length = r.length
627 length = r.length
628 generaldelta = r.version & revlog.FLAG_GENERALDELTA
628 generaldelta = r.version & revlog.FLAG_GENERALDELTA
629 withsparseread = getattr(r, '_withsparseread', False)
629 withsparseread = getattr(r, '_withsparseread', False)
630
630
631 def revinfo(rev):
631 def revinfo(rev):
632 e = index[rev]
632 e = index[rev]
633 compsize = e[1]
633 compsize = e[1]
634 uncompsize = e[2]
634 uncompsize = e[2]
635 chainsize = 0
635 chainsize = 0
636
636
637 if generaldelta:
637 if generaldelta:
638 if e[3] == e[5]:
638 if e[3] == e[5]:
639 deltatype = 'p1'
639 deltatype = 'p1'
640 elif e[3] == e[6]:
640 elif e[3] == e[6]:
641 deltatype = 'p2'
641 deltatype = 'p2'
642 elif e[3] == rev - 1:
642 elif e[3] == rev - 1:
643 deltatype = 'prev'
643 deltatype = 'prev'
644 elif e[3] == rev:
644 elif e[3] == rev:
645 deltatype = 'base'
645 deltatype = 'base'
646 else:
646 else:
647 deltatype = 'other'
647 deltatype = 'other'
648 else:
648 else:
649 if e[3] == rev:
649 if e[3] == rev:
650 deltatype = 'base'
650 deltatype = 'base'
651 else:
651 else:
652 deltatype = 'prev'
652 deltatype = 'prev'
653
653
654 chain = r._deltachain(rev)[0]
654 chain = r._deltachain(rev)[0]
655 for iterrev in chain:
655 for iterrev in chain:
656 e = index[iterrev]
656 e = index[iterrev]
657 chainsize += e[1]
657 chainsize += e[1]
658
658
659 return compsize, uncompsize, deltatype, chain, chainsize
659 return compsize, uncompsize, deltatype, chain, chainsize
660
660
661 fm = ui.formatter('debugdeltachain', opts)
661 fm = ui.formatter('debugdeltachain', opts)
662
662
663 fm.plain(' rev chain# chainlen prev delta '
663 fm.plain(' rev chain# chainlen prev delta '
664 'size rawsize chainsize ratio lindist extradist '
664 'size rawsize chainsize ratio lindist extradist '
665 'extraratio')
665 'extraratio')
666 if withsparseread:
666 if withsparseread:
667 fm.plain(' readsize largestblk rddensity srchunks')
667 fm.plain(' readsize largestblk rddensity srchunks')
668 fm.plain('\n')
668 fm.plain('\n')
669
669
670 chainbases = {}
670 chainbases = {}
671 for rev in r:
671 for rev in r:
672 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
672 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
673 chainbase = chain[0]
673 chainbase = chain[0]
674 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
674 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
675 basestart = start(chainbase)
675 basestart = start(chainbase)
676 revstart = start(rev)
676 revstart = start(rev)
677 lineardist = revstart + comp - basestart
677 lineardist = revstart + comp - basestart
678 extradist = lineardist - chainsize
678 extradist = lineardist - chainsize
679 try:
679 try:
680 prevrev = chain[-2]
680 prevrev = chain[-2]
681 except IndexError:
681 except IndexError:
682 prevrev = -1
682 prevrev = -1
683
683
684 if uncomp != 0:
684 if uncomp != 0:
685 chainratio = float(chainsize) / float(uncomp)
685 chainratio = float(chainsize) / float(uncomp)
686 else:
686 else:
687 chainratio = chainsize
687 chainratio = chainsize
688
688
689 if chainsize != 0:
689 if chainsize != 0:
690 extraratio = float(extradist) / float(chainsize)
690 extraratio = float(extradist) / float(chainsize)
691 else:
691 else:
692 extraratio = extradist
692 extraratio = extradist
693
693
694 fm.startitem()
694 fm.startitem()
695 fm.write('rev chainid chainlen prevrev deltatype compsize '
695 fm.write('rev chainid chainlen prevrev deltatype compsize '
696 'uncompsize chainsize chainratio lindist extradist '
696 'uncompsize chainsize chainratio lindist extradist '
697 'extraratio',
697 'extraratio',
698 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
698 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
699 rev, chainid, len(chain), prevrev, deltatype, comp,
699 rev, chainid, len(chain), prevrev, deltatype, comp,
700 uncomp, chainsize, chainratio, lineardist, extradist,
700 uncomp, chainsize, chainratio, lineardist, extradist,
701 extraratio,
701 extraratio,
702 rev=rev, chainid=chainid, chainlen=len(chain),
702 rev=rev, chainid=chainid, chainlen=len(chain),
703 prevrev=prevrev, deltatype=deltatype, compsize=comp,
703 prevrev=prevrev, deltatype=deltatype, compsize=comp,
704 uncompsize=uncomp, chainsize=chainsize,
704 uncompsize=uncomp, chainsize=chainsize,
705 chainratio=chainratio, lindist=lineardist,
705 chainratio=chainratio, lindist=lineardist,
706 extradist=extradist, extraratio=extraratio)
706 extradist=extradist, extraratio=extraratio)
707 if withsparseread:
707 if withsparseread:
708 readsize = 0
708 readsize = 0
709 largestblock = 0
709 largestblock = 0
710 srchunks = 0
710 srchunks = 0
711
711
712 for revschunk in deltautil.slicechunk(r, chain):
712 for revschunk in deltautil.slicechunk(r, chain):
713 srchunks += 1
713 srchunks += 1
714 blkend = start(revschunk[-1]) + length(revschunk[-1])
714 blkend = start(revschunk[-1]) + length(revschunk[-1])
715 blksize = blkend - start(revschunk[0])
715 blksize = blkend - start(revschunk[0])
716
716
717 readsize += blksize
717 readsize += blksize
718 if largestblock < blksize:
718 if largestblock < blksize:
719 largestblock = blksize
719 largestblock = blksize
720
720
721 if readsize:
721 if readsize:
722 readdensity = float(chainsize) / float(readsize)
722 readdensity = float(chainsize) / float(readsize)
723 else:
723 else:
724 readdensity = 1
724 readdensity = 1
725
725
726 fm.write('readsize largestblock readdensity srchunks',
726 fm.write('readsize largestblock readdensity srchunks',
727 ' %10d %10d %9.5f %8d',
727 ' %10d %10d %9.5f %8d',
728 readsize, largestblock, readdensity, srchunks,
728 readsize, largestblock, readdensity, srchunks,
729 readsize=readsize, largestblock=largestblock,
729 readsize=readsize, largestblock=largestblock,
730 readdensity=readdensity, srchunks=srchunks)
730 readdensity=readdensity, srchunks=srchunks)
731
731
732 fm.plain('\n')
732 fm.plain('\n')
733
733
734 fm.end()
734 fm.end()
735
735
736 @command('debugdirstate|debugstate',
736 @command('debugdirstate|debugstate',
737 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
737 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
738 ('', 'dates', True, _('display the saved mtime')),
738 ('', 'dates', True, _('display the saved mtime')),
739 ('', 'datesort', None, _('sort by saved mtime'))],
739 ('', 'datesort', None, _('sort by saved mtime'))],
740 _('[OPTION]...'))
740 _('[OPTION]...'))
741 def debugstate(ui, repo, **opts):
741 def debugstate(ui, repo, **opts):
742 """show the contents of the current dirstate"""
742 """show the contents of the current dirstate"""
743
743
744 nodates = not opts[r'dates']
744 nodates = not opts[r'dates']
745 if opts.get(r'nodates') is not None:
745 if opts.get(r'nodates') is not None:
746 nodates = True
746 nodates = True
747 datesort = opts.get(r'datesort')
747 datesort = opts.get(r'datesort')
748
748
749 if datesort:
749 if datesort:
750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
750 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 else:
751 else:
752 keyfunc = None # sort by filename
752 keyfunc = None # sort by filename
753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
753 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 if ent[3] == -1:
754 if ent[3] == -1:
755 timestr = 'unset '
755 timestr = 'unset '
756 elif nodates:
756 elif nodates:
757 timestr = 'set '
757 timestr = 'set '
758 else:
758 else:
759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
759 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 time.localtime(ent[3]))
760 time.localtime(ent[3]))
761 timestr = encoding.strtolocal(timestr)
761 timestr = encoding.strtolocal(timestr)
762 if ent[1] & 0o20000:
762 if ent[1] & 0o20000:
763 mode = 'lnk'
763 mode = 'lnk'
764 else:
764 else:
765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
765 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
766 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 for f in repo.dirstate.copies():
767 for f in repo.dirstate.copies():
768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
768 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769
769
770 @command('debugdiscovery',
770 @command('debugdiscovery',
771 [('', 'old', None, _('use old-style discovery')),
771 [('', 'old', None, _('use old-style discovery')),
772 ('', 'nonheads', None,
772 ('', 'nonheads', None,
773 _('use old-style discovery with non-heads included')),
773 _('use old-style discovery with non-heads included')),
774 ('', 'rev', [], 'restrict discovery to this set of revs'),
774 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 ] + cmdutil.remoteopts,
775 ] + cmdutil.remoteopts,
776 _('[--rev REV] [OTHER]'))
776 _('[--rev REV] [OTHER]'))
777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
777 def debugdiscovery(ui, repo, remoteurl="default", **opts):
778 """runs the changeset discovery protocol in isolation"""
778 """runs the changeset discovery protocol in isolation"""
779 opts = pycompat.byteskwargs(opts)
779 opts = pycompat.byteskwargs(opts)
780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
780 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
781 remote = hg.peer(repo, opts, remoteurl)
781 remote = hg.peer(repo, opts, remoteurl)
782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
782 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
783
783
784 # make sure tests are repeatable
784 # make sure tests are repeatable
785 random.seed(12323)
785 random.seed(12323)
786
786
787 def doit(pushedrevs, remoteheads, remote=remote):
787 def doit(pushedrevs, remoteheads, remote=remote):
788 if opts.get('old'):
788 if opts.get('old'):
789 if not util.safehasattr(remote, 'branches'):
789 if not util.safehasattr(remote, 'branches'):
790 # enable in-client legacy support
790 # enable in-client legacy support
791 remote = localrepo.locallegacypeer(remote.local())
791 remote = localrepo.locallegacypeer(remote.local())
792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
792 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
793 force=True)
793 force=True)
794 common = set(common)
794 common = set(common)
795 if not opts.get('nonheads'):
795 if not opts.get('nonheads'):
796 ui.write(("unpruned common: %s\n") %
796 ui.write(("unpruned common: %s\n") %
797 " ".join(sorted(short(n) for n in common)))
797 " ".join(sorted(short(n) for n in common)))
798
798
799 clnode = repo.changelog.node
799 clnode = repo.changelog.node
800 common = repo.revs('heads(::%ln)', common)
800 common = repo.revs('heads(::%ln)', common)
801 common = {clnode(r) for r in common}
801 common = {clnode(r) for r in common}
802 else:
802 else:
803 nodes = None
803 nodes = None
804 if pushedrevs:
804 if pushedrevs:
805 revs = scmutil.revrange(repo, pushedrevs)
805 revs = scmutil.revrange(repo, pushedrevs)
806 nodes = [repo[r].node() for r in revs]
806 nodes = [repo[r].node() for r in revs]
807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
807 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
808 ancestorsof=nodes)
808 ancestorsof=nodes)
809 common = set(common)
809 common = set(common)
810 rheads = set(hds)
810 rheads = set(hds)
811 lheads = set(repo.heads())
811 lheads = set(repo.heads())
812 ui.write(("common heads: %s\n") %
812 ui.write(("common heads: %s\n") %
813 " ".join(sorted(short(n) for n in common)))
813 " ".join(sorted(short(n) for n in common)))
814 if lheads <= common:
814 if lheads <= common:
815 ui.write(("local is subset\n"))
815 ui.write(("local is subset\n"))
816 elif rheads <= common:
816 elif rheads <= common:
817 ui.write(("remote is subset\n"))
817 ui.write(("remote is subset\n"))
818
818
819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
819 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
820 localrevs = opts['rev']
820 localrevs = opts['rev']
821 doit(localrevs, remoterevs)
821 doit(localrevs, remoterevs)
822
822
823 _chunksize = 4 << 10
823 _chunksize = 4 << 10
824
824
825 @command('debugdownload',
825 @command('debugdownload',
826 [
826 [
827 ('o', 'output', '', _('path')),
827 ('o', 'output', '', _('path')),
828 ],
828 ],
829 optionalrepo=True)
829 optionalrepo=True)
830 def debugdownload(ui, repo, url, output=None, **opts):
830 def debugdownload(ui, repo, url, output=None, **opts):
831 """download a resource using Mercurial logic and config
831 """download a resource using Mercurial logic and config
832 """
832 """
833 fh = urlmod.open(ui, url, output)
833 fh = urlmod.open(ui, url, output)
834
834
835 dest = ui
835 dest = ui
836 if output:
836 if output:
837 dest = open(output, "wb", _chunksize)
837 dest = open(output, "wb", _chunksize)
838 try:
838 try:
839 data = fh.read(_chunksize)
839 data = fh.read(_chunksize)
840 while data:
840 while data:
841 dest.write(data)
841 dest.write(data)
842 data = fh.read(_chunksize)
842 data = fh.read(_chunksize)
843 finally:
843 finally:
844 if output:
844 if output:
845 dest.close()
845 dest.close()
846
846
847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
847 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
848 def debugextensions(ui, repo, **opts):
848 def debugextensions(ui, repo, **opts):
849 '''show information about active extensions'''
849 '''show information about active extensions'''
850 opts = pycompat.byteskwargs(opts)
850 opts = pycompat.byteskwargs(opts)
851 exts = extensions.extensions(ui)
851 exts = extensions.extensions(ui)
852 hgver = util.version()
852 hgver = util.version()
853 fm = ui.formatter('debugextensions', opts)
853 fm = ui.formatter('debugextensions', opts)
854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
854 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
855 isinternal = extensions.ismoduleinternal(extmod)
855 isinternal = extensions.ismoduleinternal(extmod)
856 extsource = pycompat.fsencode(extmod.__file__)
856 extsource = pycompat.fsencode(extmod.__file__)
857 if isinternal:
857 if isinternal:
858 exttestedwith = [] # never expose magic string to users
858 exttestedwith = [] # never expose magic string to users
859 else:
859 else:
860 exttestedwith = getattr(extmod, 'testedwith', '').split()
860 exttestedwith = getattr(extmod, 'testedwith', '').split()
861 extbuglink = getattr(extmod, 'buglink', None)
861 extbuglink = getattr(extmod, 'buglink', None)
862
862
863 fm.startitem()
863 fm.startitem()
864
864
865 if ui.quiet or ui.verbose:
865 if ui.quiet or ui.verbose:
866 fm.write('name', '%s\n', extname)
866 fm.write('name', '%s\n', extname)
867 else:
867 else:
868 fm.write('name', '%s', extname)
868 fm.write('name', '%s', extname)
869 if isinternal or hgver in exttestedwith:
869 if isinternal or hgver in exttestedwith:
870 fm.plain('\n')
870 fm.plain('\n')
871 elif not exttestedwith:
871 elif not exttestedwith:
872 fm.plain(_(' (untested!)\n'))
872 fm.plain(_(' (untested!)\n'))
873 else:
873 else:
874 lasttestedversion = exttestedwith[-1]
874 lasttestedversion = exttestedwith[-1]
875 fm.plain(' (%s!)\n' % lasttestedversion)
875 fm.plain(' (%s!)\n' % lasttestedversion)
876
876
877 fm.condwrite(ui.verbose and extsource, 'source',
877 fm.condwrite(ui.verbose and extsource, 'source',
878 _(' location: %s\n'), extsource or "")
878 _(' location: %s\n'), extsource or "")
879
879
880 if ui.verbose:
880 if ui.verbose:
881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
881 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
882 fm.data(bundled=isinternal)
882 fm.data(bundled=isinternal)
883
883
884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
884 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
885 _(' tested with: %s\n'),
885 _(' tested with: %s\n'),
886 fm.formatlist(exttestedwith, name='ver'))
886 fm.formatlist(exttestedwith, name='ver'))
887
887
888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
888 fm.condwrite(ui.verbose and extbuglink, 'buglink',
889 _(' bug reporting: %s\n'), extbuglink or "")
889 _(' bug reporting: %s\n'), extbuglink or "")
890
890
891 fm.end()
891 fm.end()
892
892
893 @command('debugfileset',
893 @command('debugfileset',
894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
894 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
895 ('', 'all-files', False,
895 ('', 'all-files', False,
896 _('test files from all revisions and working directory')),
896 _('test files from all revisions and working directory')),
897 ('s', 'show-matcher', None,
897 ('s', 'show-matcher', None,
898 _('print internal representation of matcher')),
898 _('print internal representation of matcher')),
899 ('p', 'show-stage', [],
899 ('p', 'show-stage', [],
900 _('print parsed tree at the given stage'), _('NAME'))],
900 _('print parsed tree at the given stage'), _('NAME'))],
901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
901 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
902 def debugfileset(ui, repo, expr, **opts):
902 def debugfileset(ui, repo, expr, **opts):
903 '''parse and apply a fileset specification'''
903 '''parse and apply a fileset specification'''
904 from . import fileset
904 from . import fileset
905 fileset.symbols # force import of fileset so we have predicates to optimize
905 fileset.symbols # force import of fileset so we have predicates to optimize
906 opts = pycompat.byteskwargs(opts)
906 opts = pycompat.byteskwargs(opts)
907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
907 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
908
908
909 stages = [
909 stages = [
910 ('parsed', pycompat.identity),
910 ('parsed', pycompat.identity),
911 ('analyzed', filesetlang.analyze),
911 ('analyzed', filesetlang.analyze),
912 ('optimized', filesetlang.optimize),
912 ('optimized', filesetlang.optimize),
913 ]
913 ]
914 stagenames = set(n for n, f in stages)
914 stagenames = set(n for n, f in stages)
915
915
916 showalways = set()
916 showalways = set()
917 if ui.verbose and not opts['show_stage']:
917 if ui.verbose and not opts['show_stage']:
918 # show parsed tree by --verbose (deprecated)
918 # show parsed tree by --verbose (deprecated)
919 showalways.add('parsed')
919 showalways.add('parsed')
920 if opts['show_stage'] == ['all']:
920 if opts['show_stage'] == ['all']:
921 showalways.update(stagenames)
921 showalways.update(stagenames)
922 else:
922 else:
923 for n in opts['show_stage']:
923 for n in opts['show_stage']:
924 if n not in stagenames:
924 if n not in stagenames:
925 raise error.Abort(_('invalid stage name: %s') % n)
925 raise error.Abort(_('invalid stage name: %s') % n)
926 showalways.update(opts['show_stage'])
926 showalways.update(opts['show_stage'])
927
927
928 tree = filesetlang.parse(expr)
928 tree = filesetlang.parse(expr)
929 for n, f in stages:
929 for n, f in stages:
930 tree = f(tree)
930 tree = f(tree)
931 if n in showalways:
931 if n in showalways:
932 if opts['show_stage'] or n != 'parsed':
932 if opts['show_stage'] or n != 'parsed':
933 ui.write(("* %s:\n") % n)
933 ui.write(("* %s:\n") % n)
934 ui.write(filesetlang.prettyformat(tree), "\n")
934 ui.write(filesetlang.prettyformat(tree), "\n")
935
935
936 files = set()
936 files = set()
937 if opts['all_files']:
937 if opts['all_files']:
938 for r in repo:
938 for r in repo:
939 c = repo[r]
939 c = repo[r]
940 files.update(c.files())
940 files.update(c.files())
941 files.update(c.substate)
941 files.update(c.substate)
942 if opts['all_files'] or ctx.rev() is None:
942 if opts['all_files'] or ctx.rev() is None:
943 wctx = repo[None]
943 wctx = repo[None]
944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
944 files.update(repo.dirstate.walk(scmutil.matchall(repo),
945 subrepos=list(wctx.substate),
945 subrepos=list(wctx.substate),
946 unknown=True, ignored=True))
946 unknown=True, ignored=True))
947 files.update(wctx.substate)
947 files.update(wctx.substate)
948 else:
948 else:
949 files.update(ctx.files())
949 files.update(ctx.files())
950 files.update(ctx.substate)
950 files.update(ctx.substate)
951
951
952 m = ctx.matchfileset(expr)
952 m = ctx.matchfileset(expr)
953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
953 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
954 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
955 for f in sorted(files):
955 for f in sorted(files):
956 if not m(f):
956 if not m(f):
957 continue
957 continue
958 ui.write("%s\n" % f)
958 ui.write("%s\n" % f)
959
959
960 @command('debugformat',
960 @command('debugformat',
961 [] + cmdutil.formatteropts)
961 [] + cmdutil.formatteropts)
962 def debugformat(ui, repo, **opts):
962 def debugformat(ui, repo, **opts):
963 """display format information about the current repository
963 """display format information about the current repository
964
964
965 Use --verbose to get extra information about current config value and
965 Use --verbose to get extra information about current config value and
966 Mercurial default."""
966 Mercurial default."""
967 opts = pycompat.byteskwargs(opts)
967 opts = pycompat.byteskwargs(opts)
968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
968 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
969 maxvariantlength = max(len('format-variant'), maxvariantlength)
969 maxvariantlength = max(len('format-variant'), maxvariantlength)
970
970
971 def makeformatname(name):
971 def makeformatname(name):
972 return '%s:' + (' ' * (maxvariantlength - len(name)))
972 return '%s:' + (' ' * (maxvariantlength - len(name)))
973
973
974 fm = ui.formatter('debugformat', opts)
974 fm = ui.formatter('debugformat', opts)
975 if fm.isplain():
975 if fm.isplain():
976 def formatvalue(value):
976 def formatvalue(value):
977 if util.safehasattr(value, 'startswith'):
977 if util.safehasattr(value, 'startswith'):
978 return value
978 return value
979 if value:
979 if value:
980 return 'yes'
980 return 'yes'
981 else:
981 else:
982 return 'no'
982 return 'no'
983 else:
983 else:
984 formatvalue = pycompat.identity
984 formatvalue = pycompat.identity
985
985
986 fm.plain('format-variant')
986 fm.plain('format-variant')
987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
987 fm.plain(' ' * (maxvariantlength - len('format-variant')))
988 fm.plain(' repo')
988 fm.plain(' repo')
989 if ui.verbose:
989 if ui.verbose:
990 fm.plain(' config default')
990 fm.plain(' config default')
991 fm.plain('\n')
991 fm.plain('\n')
992 for fv in upgrade.allformatvariant:
992 for fv in upgrade.allformatvariant:
993 fm.startitem()
993 fm.startitem()
994 repovalue = fv.fromrepo(repo)
994 repovalue = fv.fromrepo(repo)
995 configvalue = fv.fromconfig(repo)
995 configvalue = fv.fromconfig(repo)
996
996
997 if repovalue != configvalue:
997 if repovalue != configvalue:
998 namelabel = 'formatvariant.name.mismatchconfig'
998 namelabel = 'formatvariant.name.mismatchconfig'
999 repolabel = 'formatvariant.repo.mismatchconfig'
999 repolabel = 'formatvariant.repo.mismatchconfig'
1000 elif repovalue != fv.default:
1000 elif repovalue != fv.default:
1001 namelabel = 'formatvariant.name.mismatchdefault'
1001 namelabel = 'formatvariant.name.mismatchdefault'
1002 repolabel = 'formatvariant.repo.mismatchdefault'
1002 repolabel = 'formatvariant.repo.mismatchdefault'
1003 else:
1003 else:
1004 namelabel = 'formatvariant.name.uptodate'
1004 namelabel = 'formatvariant.name.uptodate'
1005 repolabel = 'formatvariant.repo.uptodate'
1005 repolabel = 'formatvariant.repo.uptodate'
1006
1006
1007 fm.write('name', makeformatname(fv.name), fv.name,
1007 fm.write('name', makeformatname(fv.name), fv.name,
1008 label=namelabel)
1008 label=namelabel)
1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1009 fm.write('repo', ' %3s', formatvalue(repovalue),
1010 label=repolabel)
1010 label=repolabel)
1011 if fv.default != configvalue:
1011 if fv.default != configvalue:
1012 configlabel = 'formatvariant.config.special'
1012 configlabel = 'formatvariant.config.special'
1013 else:
1013 else:
1014 configlabel = 'formatvariant.config.default'
1014 configlabel = 'formatvariant.config.default'
1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1015 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1016 label=configlabel)
1016 label=configlabel)
1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1017 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1018 label='formatvariant.default')
1018 label='formatvariant.default')
1019 fm.plain('\n')
1019 fm.plain('\n')
1020 fm.end()
1020 fm.end()
1021
1021
1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1022 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1023 def debugfsinfo(ui, path="."):
1023 def debugfsinfo(ui, path="."):
1024 """show information detected about current filesystem"""
1024 """show information detected about current filesystem"""
1025 ui.write(('path: %s\n') % path)
1025 ui.write(('path: %s\n') % path)
1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1026 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1027 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1028 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1029 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1030 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1031 casesensitive = '(unknown)'
1031 casesensitive = '(unknown)'
1032 try:
1032 try:
1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1033 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1034 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1035 except OSError:
1035 except OSError:
1036 pass
1036 pass
1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1037 ui.write(('case-sensitive: %s\n') % casesensitive)
1038
1038
1039 @command('debuggetbundle',
1039 @command('debuggetbundle',
1040 [('H', 'head', [], _('id of head node'), _('ID')),
1040 [('H', 'head', [], _('id of head node'), _('ID')),
1041 ('C', 'common', [], _('id of common node'), _('ID')),
1041 ('C', 'common', [], _('id of common node'), _('ID')),
1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1042 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1043 _('REPO FILE [-H|-C ID]...'),
1043 _('REPO FILE [-H|-C ID]...'),
1044 norepo=True)
1044 norepo=True)
1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1045 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1046 """retrieves a bundle from a repo
1046 """retrieves a bundle from a repo
1047
1047
1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1048 Every ID must be a full-length hex node id string. Saves the bundle to the
1049 given file.
1049 given file.
1050 """
1050 """
1051 opts = pycompat.byteskwargs(opts)
1051 opts = pycompat.byteskwargs(opts)
1052 repo = hg.peer(ui, opts, repopath)
1052 repo = hg.peer(ui, opts, repopath)
1053 if not repo.capable('getbundle'):
1053 if not repo.capable('getbundle'):
1054 raise error.Abort("getbundle() not supported by target repository")
1054 raise error.Abort("getbundle() not supported by target repository")
1055 args = {}
1055 args = {}
1056 if common:
1056 if common:
1057 args[r'common'] = [bin(s) for s in common]
1057 args[r'common'] = [bin(s) for s in common]
1058 if head:
1058 if head:
1059 args[r'heads'] = [bin(s) for s in head]
1059 args[r'heads'] = [bin(s) for s in head]
1060 # TODO: get desired bundlecaps from command line.
1060 # TODO: get desired bundlecaps from command line.
1061 args[r'bundlecaps'] = None
1061 args[r'bundlecaps'] = None
1062 bundle = repo.getbundle('debug', **args)
1062 bundle = repo.getbundle('debug', **args)
1063
1063
1064 bundletype = opts.get('type', 'bzip2').lower()
1064 bundletype = opts.get('type', 'bzip2').lower()
1065 btypes = {'none': 'HG10UN',
1065 btypes = {'none': 'HG10UN',
1066 'bzip2': 'HG10BZ',
1066 'bzip2': 'HG10BZ',
1067 'gzip': 'HG10GZ',
1067 'gzip': 'HG10GZ',
1068 'bundle2': 'HG20'}
1068 'bundle2': 'HG20'}
1069 bundletype = btypes.get(bundletype)
1069 bundletype = btypes.get(bundletype)
1070 if bundletype not in bundle2.bundletypes:
1070 if bundletype not in bundle2.bundletypes:
1071 raise error.Abort(_('unknown bundle type specified with --type'))
1071 raise error.Abort(_('unknown bundle type specified with --type'))
1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1072 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1073
1073
1074 @command('debugignore', [], '[FILE]')
1074 @command('debugignore', [], '[FILE]')
1075 def debugignore(ui, repo, *files, **opts):
1075 def debugignore(ui, repo, *files, **opts):
1076 """display the combined ignore pattern and information about ignored files
1076 """display the combined ignore pattern and information about ignored files
1077
1077
1078 With no argument display the combined ignore pattern.
1078 With no argument display the combined ignore pattern.
1079
1079
1080 Given space separated file names, shows if the given file is ignored and
1080 Given space separated file names, shows if the given file is ignored and
1081 if so, show the ignore rule (file and line number) that matched it.
1081 if so, show the ignore rule (file and line number) that matched it.
1082 """
1082 """
1083 ignore = repo.dirstate._ignore
1083 ignore = repo.dirstate._ignore
1084 if not files:
1084 if not files:
1085 # Show all the patterns
1085 # Show all the patterns
1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1086 ui.write("%s\n" % pycompat.byterepr(ignore))
1087 else:
1087 else:
1088 m = scmutil.match(repo[None], pats=files)
1088 m = scmutil.match(repo[None], pats=files)
1089 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1089 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1090 for f in m.files():
1090 for f in m.files():
1091 nf = util.normpath(f)
1091 nf = util.normpath(f)
1092 ignored = None
1092 ignored = None
1093 ignoredata = None
1093 ignoredata = None
1094 if nf != '.':
1094 if nf != '.':
1095 if ignore(nf):
1095 if ignore(nf):
1096 ignored = nf
1096 ignored = nf
1097 ignoredata = repo.dirstate._ignorefileandline(nf)
1097 ignoredata = repo.dirstate._ignorefileandline(nf)
1098 else:
1098 else:
1099 for p in util.finddirs(nf):
1099 for p in util.finddirs(nf):
1100 if ignore(p):
1100 if ignore(p):
1101 ignored = p
1101 ignored = p
1102 ignoredata = repo.dirstate._ignorefileandline(p)
1102 ignoredata = repo.dirstate._ignorefileandline(p)
1103 break
1103 break
1104 if ignored:
1104 if ignored:
1105 if ignored == nf:
1105 if ignored == nf:
1106 ui.write(_("%s is ignored\n") % uipathfn(f))
1106 ui.write(_("%s is ignored\n") % uipathfn(f))
1107 else:
1107 else:
1108 ui.write(_("%s is ignored because of "
1108 ui.write(_("%s is ignored because of "
1109 "containing folder %s\n")
1109 "containing folder %s\n")
1110 % (uipathfn(f), ignored))
1110 % (uipathfn(f), ignored))
1111 ignorefile, lineno, line = ignoredata
1111 ignorefile, lineno, line = ignoredata
1112 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1112 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1113 % (ignorefile, lineno, line))
1113 % (ignorefile, lineno, line))
1114 else:
1114 else:
1115 ui.write(_("%s is not ignored\n") % uipathfn(f))
1115 ui.write(_("%s is not ignored\n") % uipathfn(f))
1116
1116
1117 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1117 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1118 _('-c|-m|FILE'))
1118 _('-c|-m|FILE'))
1119 def debugindex(ui, repo, file_=None, **opts):
1119 def debugindex(ui, repo, file_=None, **opts):
1120 """dump index data for a storage primitive"""
1120 """dump index data for a storage primitive"""
1121 opts = pycompat.byteskwargs(opts)
1121 opts = pycompat.byteskwargs(opts)
1122 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1122 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1123
1123
1124 if ui.debugflag:
1124 if ui.debugflag:
1125 shortfn = hex
1125 shortfn = hex
1126 else:
1126 else:
1127 shortfn = short
1127 shortfn = short
1128
1128
1129 idlen = 12
1129 idlen = 12
1130 for i in store:
1130 for i in store:
1131 idlen = len(shortfn(store.node(i)))
1131 idlen = len(shortfn(store.node(i)))
1132 break
1132 break
1133
1133
1134 fm = ui.formatter('debugindex', opts)
1134 fm = ui.formatter('debugindex', opts)
1135 fm.plain(b' rev linkrev %s %s p2\n' % (
1135 fm.plain(b' rev linkrev %s %s p2\n' % (
1136 b'nodeid'.ljust(idlen),
1136 b'nodeid'.ljust(idlen),
1137 b'p1'.ljust(idlen)))
1137 b'p1'.ljust(idlen)))
1138
1138
1139 for rev in store:
1139 for rev in store:
1140 node = store.node(rev)
1140 node = store.node(rev)
1141 parents = store.parents(node)
1141 parents = store.parents(node)
1142
1142
1143 fm.startitem()
1143 fm.startitem()
1144 fm.write(b'rev', b'%6d ', rev)
1144 fm.write(b'rev', b'%6d ', rev)
1145 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1145 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1146 fm.write(b'node', '%s ', shortfn(node))
1146 fm.write(b'node', '%s ', shortfn(node))
1147 fm.write(b'p1', '%s ', shortfn(parents[0]))
1147 fm.write(b'p1', '%s ', shortfn(parents[0]))
1148 fm.write(b'p2', '%s', shortfn(parents[1]))
1148 fm.write(b'p2', '%s', shortfn(parents[1]))
1149 fm.plain(b'\n')
1149 fm.plain(b'\n')
1150
1150
1151 fm.end()
1151 fm.end()
1152
1152
1153 @command('debugindexdot', cmdutil.debugrevlogopts,
1153 @command('debugindexdot', cmdutil.debugrevlogopts,
1154 _('-c|-m|FILE'), optionalrepo=True)
1154 _('-c|-m|FILE'), optionalrepo=True)
1155 def debugindexdot(ui, repo, file_=None, **opts):
1155 def debugindexdot(ui, repo, file_=None, **opts):
1156 """dump an index DAG as a graphviz dot file"""
1156 """dump an index DAG as a graphviz dot file"""
1157 opts = pycompat.byteskwargs(opts)
1157 opts = pycompat.byteskwargs(opts)
1158 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1158 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1159 ui.write(("digraph G {\n"))
1159 ui.write(("digraph G {\n"))
1160 for i in r:
1160 for i in r:
1161 node = r.node(i)
1161 node = r.node(i)
1162 pp = r.parents(node)
1162 pp = r.parents(node)
1163 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1163 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1164 if pp[1] != nullid:
1164 if pp[1] != nullid:
1165 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1165 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1166 ui.write("}\n")
1166 ui.write("}\n")
1167
1167
1168 @command('debugindexstats', [])
1168 @command('debugindexstats', [])
1169 def debugindexstats(ui, repo):
1169 def debugindexstats(ui, repo):
1170 """show stats related to the changelog index"""
1170 """show stats related to the changelog index"""
1171 repo.changelog.shortest(nullid, 1)
1171 repo.changelog.shortest(nullid, 1)
1172 index = repo.changelog.index
1172 index = repo.changelog.index
1173 if not util.safehasattr(index, 'stats'):
1173 if not util.safehasattr(index, 'stats'):
1174 raise error.Abort(_('debugindexstats only works with native code'))
1174 raise error.Abort(_('debugindexstats only works with native code'))
1175 for k, v in sorted(index.stats().items()):
1175 for k, v in sorted(index.stats().items()):
1176 ui.write('%s: %d\n' % (k, v))
1176 ui.write('%s: %d\n' % (k, v))
1177
1177
1178 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1178 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1179 def debuginstall(ui, **opts):
1179 def debuginstall(ui, **opts):
1180 '''test Mercurial installation
1180 '''test Mercurial installation
1181
1181
1182 Returns 0 on success.
1182 Returns 0 on success.
1183 '''
1183 '''
1184 opts = pycompat.byteskwargs(opts)
1184 opts = pycompat.byteskwargs(opts)
1185
1185
1186 problems = 0
1186 problems = 0
1187
1187
1188 fm = ui.formatter('debuginstall', opts)
1188 fm = ui.formatter('debuginstall', opts)
1189 fm.startitem()
1189 fm.startitem()
1190
1190
1191 # encoding
1191 # encoding
1192 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1192 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1193 err = None
1193 err = None
1194 try:
1194 try:
1195 codecs.lookup(pycompat.sysstr(encoding.encoding))
1195 codecs.lookup(pycompat.sysstr(encoding.encoding))
1196 except LookupError as inst:
1196 except LookupError as inst:
1197 err = stringutil.forcebytestr(inst)
1197 err = stringutil.forcebytestr(inst)
1198 problems += 1
1198 problems += 1
1199 fm.condwrite(err, 'encodingerror', _(" %s\n"
1199 fm.condwrite(err, 'encodingerror', _(" %s\n"
1200 " (check that your locale is properly set)\n"), err)
1200 " (check that your locale is properly set)\n"), err)
1201
1201
1202 # Python
1202 # Python
1203 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1203 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1204 pycompat.sysexecutable)
1204 pycompat.sysexecutable)
1205 fm.write('pythonver', _("checking Python version (%s)\n"),
1205 fm.write('pythonver', _("checking Python version (%s)\n"),
1206 ("%d.%d.%d" % sys.version_info[:3]))
1206 ("%d.%d.%d" % sys.version_info[:3]))
1207 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1207 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1208 os.path.dirname(pycompat.fsencode(os.__file__)))
1208 os.path.dirname(pycompat.fsencode(os.__file__)))
1209
1209
1210 security = set(sslutil.supportedprotocols)
1210 security = set(sslutil.supportedprotocols)
1211 if sslutil.hassni:
1211 if sslutil.hassni:
1212 security.add('sni')
1212 security.add('sni')
1213
1213
1214 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1214 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1215 fm.formatlist(sorted(security), name='protocol',
1215 fm.formatlist(sorted(security), name='protocol',
1216 fmt='%s', sep=','))
1216 fmt='%s', sep=','))
1217
1217
1218 # These are warnings, not errors. So don't increment problem count. This
1218 # These are warnings, not errors. So don't increment problem count. This
1219 # may change in the future.
1219 # may change in the future.
1220 if 'tls1.2' not in security:
1220 if 'tls1.2' not in security:
1221 fm.plain(_(' TLS 1.2 not supported by Python install; '
1221 fm.plain(_(' TLS 1.2 not supported by Python install; '
1222 'network connections lack modern security\n'))
1222 'network connections lack modern security\n'))
1223 if 'sni' not in security:
1223 if 'sni' not in security:
1224 fm.plain(_(' SNI not supported by Python install; may have '
1224 fm.plain(_(' SNI not supported by Python install; may have '
1225 'connectivity issues with some servers\n'))
1225 'connectivity issues with some servers\n'))
1226
1226
1227 # TODO print CA cert info
1227 # TODO print CA cert info
1228
1228
1229 # hg version
1229 # hg version
1230 hgver = util.version()
1230 hgver = util.version()
1231 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1231 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1232 hgver.split('+')[0])
1232 hgver.split('+')[0])
1233 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1233 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1234 '+'.join(hgver.split('+')[1:]))
1234 '+'.join(hgver.split('+')[1:]))
1235
1235
1236 # compiled modules
1236 # compiled modules
1237 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1237 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1238 policy.policy)
1238 policy.policy)
1239 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1239 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1240 os.path.dirname(pycompat.fsencode(__file__)))
1240 os.path.dirname(pycompat.fsencode(__file__)))
1241
1241
1242 if policy.policy in ('c', 'allow'):
1242 if policy.policy in ('c', 'allow'):
1243 err = None
1243 err = None
1244 try:
1244 try:
1245 from .cext import (
1245 from .cext import (
1246 base85,
1246 base85,
1247 bdiff,
1247 bdiff,
1248 mpatch,
1248 mpatch,
1249 osutil,
1249 osutil,
1250 )
1250 )
1251 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1251 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1252 except Exception as inst:
1252 except Exception as inst:
1253 err = stringutil.forcebytestr(inst)
1253 err = stringutil.forcebytestr(inst)
1254 problems += 1
1254 problems += 1
1255 fm.condwrite(err, 'extensionserror', " %s\n", err)
1255 fm.condwrite(err, 'extensionserror', " %s\n", err)
1256
1256
1257 compengines = util.compengines._engines.values()
1257 compengines = util.compengines._engines.values()
1258 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1258 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1259 fm.formatlist(sorted(e.name() for e in compengines),
1259 fm.formatlist(sorted(e.name() for e in compengines),
1260 name='compengine', fmt='%s', sep=', '))
1260 name='compengine', fmt='%s', sep=', '))
1261 fm.write('compenginesavail', _('checking available compression engines '
1261 fm.write('compenginesavail', _('checking available compression engines '
1262 '(%s)\n'),
1262 '(%s)\n'),
1263 fm.formatlist(sorted(e.name() for e in compengines
1263 fm.formatlist(sorted(e.name() for e in compengines
1264 if e.available()),
1264 if e.available()),
1265 name='compengine', fmt='%s', sep=', '))
1265 name='compengine', fmt='%s', sep=', '))
1266 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1266 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1267 fm.write('compenginesserver', _('checking available compression engines '
1267 fm.write('compenginesserver', _('checking available compression engines '
1268 'for wire protocol (%s)\n'),
1268 'for wire protocol (%s)\n'),
1269 fm.formatlist([e.name() for e in wirecompengines
1269 fm.formatlist([e.name() for e in wirecompengines
1270 if e.wireprotosupport()],
1270 if e.wireprotosupport()],
1271 name='compengine', fmt='%s', sep=', '))
1271 name='compengine', fmt='%s', sep=', '))
1272 re2 = 'missing'
1272 re2 = 'missing'
1273 if util._re2:
1273 if util._re2:
1274 re2 = 'available'
1274 re2 = 'available'
1275 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1275 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1276 fm.data(re2=bool(util._re2))
1276 fm.data(re2=bool(util._re2))
1277
1277
1278 # templates
1278 # templates
1279 p = templater.templatepaths()
1279 p = templater.templatepaths()
1280 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1280 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1281 fm.condwrite(not p, '', _(" no template directories found\n"))
1281 fm.condwrite(not p, '', _(" no template directories found\n"))
1282 if p:
1282 if p:
1283 m = templater.templatepath("map-cmdline.default")
1283 m = templater.templatepath("map-cmdline.default")
1284 if m:
1284 if m:
1285 # template found, check if it is working
1285 # template found, check if it is working
1286 err = None
1286 err = None
1287 try:
1287 try:
1288 templater.templater.frommapfile(m)
1288 templater.templater.frommapfile(m)
1289 except Exception as inst:
1289 except Exception as inst:
1290 err = stringutil.forcebytestr(inst)
1290 err = stringutil.forcebytestr(inst)
1291 p = None
1291 p = None
1292 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1292 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1293 else:
1293 else:
1294 p = None
1294 p = None
1295 fm.condwrite(p, 'defaulttemplate',
1295 fm.condwrite(p, 'defaulttemplate',
1296 _("checking default template (%s)\n"), m)
1296 _("checking default template (%s)\n"), m)
1297 fm.condwrite(not m, 'defaulttemplatenotfound',
1297 fm.condwrite(not m, 'defaulttemplatenotfound',
1298 _(" template '%s' not found\n"), "default")
1298 _(" template '%s' not found\n"), "default")
1299 if not p:
1299 if not p:
1300 problems += 1
1300 problems += 1
1301 fm.condwrite(not p, '',
1301 fm.condwrite(not p, '',
1302 _(" (templates seem to have been installed incorrectly)\n"))
1302 _(" (templates seem to have been installed incorrectly)\n"))
1303
1303
1304 # editor
1304 # editor
1305 editor = ui.geteditor()
1305 editor = ui.geteditor()
1306 editor = util.expandpath(editor)
1306 editor = util.expandpath(editor)
1307 editorbin = procutil.shellsplit(editor)[0]
1307 editorbin = procutil.shellsplit(editor)[0]
1308 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1308 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1309 cmdpath = procutil.findexe(editorbin)
1309 cmdpath = procutil.findexe(editorbin)
1310 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1310 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1311 _(" No commit editor set and can't find %s in PATH\n"
1311 _(" No commit editor set and can't find %s in PATH\n"
1312 " (specify a commit editor in your configuration"
1312 " (specify a commit editor in your configuration"
1313 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1313 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1314 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1314 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1315 _(" Can't find editor '%s' in PATH\n"
1315 _(" Can't find editor '%s' in PATH\n"
1316 " (specify a commit editor in your configuration"
1316 " (specify a commit editor in your configuration"
1317 " file)\n"), not cmdpath and editorbin)
1317 " file)\n"), not cmdpath and editorbin)
1318 if not cmdpath and editor != 'vi':
1318 if not cmdpath and editor != 'vi':
1319 problems += 1
1319 problems += 1
1320
1320
1321 # check username
1321 # check username
1322 username = None
1322 username = None
1323 err = None
1323 err = None
1324 try:
1324 try:
1325 username = ui.username()
1325 username = ui.username()
1326 except error.Abort as e:
1326 except error.Abort as e:
1327 err = stringutil.forcebytestr(e)
1327 err = stringutil.forcebytestr(e)
1328 problems += 1
1328 problems += 1
1329
1329
1330 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1330 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1331 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1331 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1332 " (specify a username in your configuration file)\n"), err)
1332 " (specify a username in your configuration file)\n"), err)
1333
1333
1334 fm.condwrite(not problems, '',
1334 fm.condwrite(not problems, '',
1335 _("no problems detected\n"))
1335 _("no problems detected\n"))
1336 if not problems:
1336 if not problems:
1337 fm.data(problems=problems)
1337 fm.data(problems=problems)
1338 fm.condwrite(problems, 'problems',
1338 fm.condwrite(problems, 'problems',
1339 _("%d problems detected,"
1339 _("%d problems detected,"
1340 " please check your install!\n"), problems)
1340 " please check your install!\n"), problems)
1341 fm.end()
1341 fm.end()
1342
1342
1343 return problems
1343 return problems
1344
1344
1345 @command('debugknown', [], _('REPO ID...'), norepo=True)
1345 @command('debugknown', [], _('REPO ID...'), norepo=True)
1346 def debugknown(ui, repopath, *ids, **opts):
1346 def debugknown(ui, repopath, *ids, **opts):
1347 """test whether node ids are known to a repo
1347 """test whether node ids are known to a repo
1348
1348
1349 Every ID must be a full-length hex node id string. Returns a list of 0s
1349 Every ID must be a full-length hex node id string. Returns a list of 0s
1350 and 1s indicating unknown/known.
1350 and 1s indicating unknown/known.
1351 """
1351 """
1352 opts = pycompat.byteskwargs(opts)
1352 opts = pycompat.byteskwargs(opts)
1353 repo = hg.peer(ui, opts, repopath)
1353 repo = hg.peer(ui, opts, repopath)
1354 if not repo.capable('known'):
1354 if not repo.capable('known'):
1355 raise error.Abort("known() not supported by target repository")
1355 raise error.Abort("known() not supported by target repository")
1356 flags = repo.known([bin(s) for s in ids])
1356 flags = repo.known([bin(s) for s in ids])
1357 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1357 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1358
1358
1359 @command('debuglabelcomplete', [], _('LABEL...'))
1359 @command('debuglabelcomplete', [], _('LABEL...'))
1360 def debuglabelcomplete(ui, repo, *args):
1360 def debuglabelcomplete(ui, repo, *args):
1361 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1361 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1362 debugnamecomplete(ui, repo, *args)
1362 debugnamecomplete(ui, repo, *args)
1363
1363
1364 @command('debuglocks',
1364 @command('debuglocks',
1365 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1365 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1366 ('W', 'force-wlock', None,
1366 ('W', 'force-wlock', None,
1367 _('free the working state lock (DANGEROUS)')),
1367 _('free the working state lock (DANGEROUS)')),
1368 ('s', 'set-lock', None, _('set the store lock until stopped')),
1368 ('s', 'set-lock', None, _('set the store lock until stopped')),
1369 ('S', 'set-wlock', None,
1369 ('S', 'set-wlock', None,
1370 _('set the working state lock until stopped'))],
1370 _('set the working state lock until stopped'))],
1371 _('[OPTION]...'))
1371 _('[OPTION]...'))
1372 def debuglocks(ui, repo, **opts):
1372 def debuglocks(ui, repo, **opts):
1373 """show or modify state of locks
1373 """show or modify state of locks
1374
1374
1375 By default, this command will show which locks are held. This
1375 By default, this command will show which locks are held. This
1376 includes the user and process holding the lock, the amount of time
1376 includes the user and process holding the lock, the amount of time
1377 the lock has been held, and the machine name where the process is
1377 the lock has been held, and the machine name where the process is
1378 running if it's not local.
1378 running if it's not local.
1379
1379
1380 Locks protect the integrity of Mercurial's data, so should be
1380 Locks protect the integrity of Mercurial's data, so should be
1381 treated with care. System crashes or other interruptions may cause
1381 treated with care. System crashes or other interruptions may cause
1382 locks to not be properly released, though Mercurial will usually
1382 locks to not be properly released, though Mercurial will usually
1383 detect and remove such stale locks automatically.
1383 detect and remove such stale locks automatically.
1384
1384
1385 However, detecting stale locks may not always be possible (for
1385 However, detecting stale locks may not always be possible (for
1386 instance, on a shared filesystem). Removing locks may also be
1386 instance, on a shared filesystem). Removing locks may also be
1387 blocked by filesystem permissions.
1387 blocked by filesystem permissions.
1388
1388
1389 Setting a lock will prevent other commands from changing the data.
1389 Setting a lock will prevent other commands from changing the data.
1390 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1390 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1391 The set locks are removed when the command exits.
1391 The set locks are removed when the command exits.
1392
1392
1393 Returns 0 if no locks are held.
1393 Returns 0 if no locks are held.
1394
1394
1395 """
1395 """
1396
1396
1397 if opts.get(r'force_lock'):
1397 if opts.get(r'force_lock'):
1398 repo.svfs.unlink('lock')
1398 repo.svfs.unlink('lock')
1399 if opts.get(r'force_wlock'):
1399 if opts.get(r'force_wlock'):
1400 repo.vfs.unlink('wlock')
1400 repo.vfs.unlink('wlock')
1401 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1401 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1402 return 0
1402 return 0
1403
1403
1404 locks = []
1404 locks = []
1405 try:
1405 try:
1406 if opts.get(r'set_wlock'):
1406 if opts.get(r'set_wlock'):
1407 try:
1407 try:
1408 locks.append(repo.wlock(False))
1408 locks.append(repo.wlock(False))
1409 except error.LockHeld:
1409 except error.LockHeld:
1410 raise error.Abort(_('wlock is already held'))
1410 raise error.Abort(_('wlock is already held'))
1411 if opts.get(r'set_lock'):
1411 if opts.get(r'set_lock'):
1412 try:
1412 try:
1413 locks.append(repo.lock(False))
1413 locks.append(repo.lock(False))
1414 except error.LockHeld:
1414 except error.LockHeld:
1415 raise error.Abort(_('lock is already held'))
1415 raise error.Abort(_('lock is already held'))
1416 if len(locks):
1416 if len(locks):
1417 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1417 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1418 return 0
1418 return 0
1419 finally:
1419 finally:
1420 release(*locks)
1420 release(*locks)
1421
1421
1422 now = time.time()
1422 now = time.time()
1423 held = 0
1423 held = 0
1424
1424
1425 def report(vfs, name, method):
1425 def report(vfs, name, method):
1426 # this causes stale locks to get reaped for more accurate reporting
1426 # this causes stale locks to get reaped for more accurate reporting
1427 try:
1427 try:
1428 l = method(False)
1428 l = method(False)
1429 except error.LockHeld:
1429 except error.LockHeld:
1430 l = None
1430 l = None
1431
1431
1432 if l:
1432 if l:
1433 l.release()
1433 l.release()
1434 else:
1434 else:
1435 try:
1435 try:
1436 st = vfs.lstat(name)
1436 st = vfs.lstat(name)
1437 age = now - st[stat.ST_MTIME]
1437 age = now - st[stat.ST_MTIME]
1438 user = util.username(st.st_uid)
1438 user = util.username(st.st_uid)
1439 locker = vfs.readlock(name)
1439 locker = vfs.readlock(name)
1440 if ":" in locker:
1440 if ":" in locker:
1441 host, pid = locker.split(':')
1441 host, pid = locker.split(':')
1442 if host == socket.gethostname():
1442 if host == socket.gethostname():
1443 locker = 'user %s, process %s' % (user or b'None', pid)
1443 locker = 'user %s, process %s' % (user or b'None', pid)
1444 else:
1444 else:
1445 locker = ('user %s, process %s, host %s'
1445 locker = ('user %s, process %s, host %s'
1446 % (user or b'None', pid, host))
1446 % (user or b'None', pid, host))
1447 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1447 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1448 return 1
1448 return 1
1449 except OSError as e:
1449 except OSError as e:
1450 if e.errno != errno.ENOENT:
1450 if e.errno != errno.ENOENT:
1451 raise
1451 raise
1452
1452
1453 ui.write(("%-6s free\n") % (name + ":"))
1453 ui.write(("%-6s free\n") % (name + ":"))
1454 return 0
1454 return 0
1455
1455
1456 held += report(repo.svfs, "lock", repo.lock)
1456 held += report(repo.svfs, "lock", repo.lock)
1457 held += report(repo.vfs, "wlock", repo.wlock)
1457 held += report(repo.vfs, "wlock", repo.wlock)
1458
1458
1459 return held
1459 return held
1460
1460
1461 @command('debugmanifestfulltextcache', [
1461 @command('debugmanifestfulltextcache', [
1462 ('', 'clear', False, _('clear the cache')),
1462 ('', 'clear', False, _('clear the cache')),
1463 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1463 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1464 _('NODE'))
1464 _('NODE'))
1465 ], '')
1465 ], '')
1466 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1466 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1467 """show, clear or amend the contents of the manifest fulltext cache"""
1467 """show, clear or amend the contents of the manifest fulltext cache"""
1468
1468
1469 def getcache():
1469 def getcache():
1470 r = repo.manifestlog.getstorage(b'')
1470 r = repo.manifestlog.getstorage(b'')
1471 try:
1471 try:
1472 return r._fulltextcache
1472 return r._fulltextcache
1473 except AttributeError:
1473 except AttributeError:
1474 msg = _("Current revlog implementation doesn't appear to have a "
1474 msg = _("Current revlog implementation doesn't appear to have a "
1475 "manifest fulltext cache\n")
1475 "manifest fulltext cache\n")
1476 raise error.Abort(msg)
1476 raise error.Abort(msg)
1477
1477
1478 if opts.get(r'clear'):
1478 if opts.get(r'clear'):
1479 with repo.lock():
1479 with repo.wlock():
1480 cache = getcache()
1480 cache = getcache()
1481 cache.clear(clear_persisted_data=True)
1481 cache.clear(clear_persisted_data=True)
1482 return
1482 return
1483
1483
1484 if add:
1484 if add:
1485 with repo.lock():
1485 with repo.wlock():
1486 m = repo.manifestlog
1486 m = repo.manifestlog
1487 store = m.getstorage(b'')
1487 store = m.getstorage(b'')
1488 for n in add:
1488 for n in add:
1489 try:
1489 try:
1490 manifest = m[store.lookup(n)]
1490 manifest = m[store.lookup(n)]
1491 except error.LookupError as e:
1491 except error.LookupError as e:
1492 raise error.Abort(e, hint="Check your manifest node id")
1492 raise error.Abort(e, hint="Check your manifest node id")
1493 manifest.read() # stores revisision in cache too
1493 manifest.read() # stores revisision in cache too
1494 return
1494 return
1495
1495
1496 cache = getcache()
1496 cache = getcache()
1497 if not len(cache):
1497 if not len(cache):
1498 ui.write(_('cache empty\n'))
1498 ui.write(_('cache empty\n'))
1499 else:
1499 else:
1500 ui.write(
1500 ui.write(
1501 _('cache contains %d manifest entries, in order of most to '
1501 _('cache contains %d manifest entries, in order of most to '
1502 'least recent:\n') % (len(cache),))
1502 'least recent:\n') % (len(cache),))
1503 totalsize = 0
1503 totalsize = 0
1504 for nodeid in cache:
1504 for nodeid in cache:
1505 # Use cache.get to not update the LRU order
1505 # Use cache.get to not update the LRU order
1506 data = cache.peek(nodeid)
1506 data = cache.peek(nodeid)
1507 size = len(data)
1507 size = len(data)
1508 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1508 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1509 ui.write(_('id: %s, size %s\n') % (
1509 ui.write(_('id: %s, size %s\n') % (
1510 hex(nodeid), util.bytecount(size)))
1510 hex(nodeid), util.bytecount(size)))
1511 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1511 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1512 ui.write(
1512 ui.write(
1513 _('total cache data size %s, on-disk %s\n') % (
1513 _('total cache data size %s, on-disk %s\n') % (
1514 util.bytecount(totalsize), util.bytecount(ondisk))
1514 util.bytecount(totalsize), util.bytecount(ondisk))
1515 )
1515 )
1516
1516
1517 @command('debugmergestate', [], '')
1517 @command('debugmergestate', [], '')
1518 def debugmergestate(ui, repo, *args):
1518 def debugmergestate(ui, repo, *args):
1519 """print merge state
1519 """print merge state
1520
1520
1521 Use --verbose to print out information about whether v1 or v2 merge state
1521 Use --verbose to print out information about whether v1 or v2 merge state
1522 was chosen."""
1522 was chosen."""
1523 def _hashornull(h):
1523 def _hashornull(h):
1524 if h == nullhex:
1524 if h == nullhex:
1525 return 'null'
1525 return 'null'
1526 else:
1526 else:
1527 return h
1527 return h
1528
1528
1529 def printrecords(version):
1529 def printrecords(version):
1530 ui.write(('* version %d records\n') % version)
1530 ui.write(('* version %d records\n') % version)
1531 if version == 1:
1531 if version == 1:
1532 records = v1records
1532 records = v1records
1533 else:
1533 else:
1534 records = v2records
1534 records = v2records
1535
1535
1536 for rtype, record in records:
1536 for rtype, record in records:
1537 # pretty print some record types
1537 # pretty print some record types
1538 if rtype == 'L':
1538 if rtype == 'L':
1539 ui.write(('local: %s\n') % record)
1539 ui.write(('local: %s\n') % record)
1540 elif rtype == 'O':
1540 elif rtype == 'O':
1541 ui.write(('other: %s\n') % record)
1541 ui.write(('other: %s\n') % record)
1542 elif rtype == 'm':
1542 elif rtype == 'm':
1543 driver, mdstate = record.split('\0', 1)
1543 driver, mdstate = record.split('\0', 1)
1544 ui.write(('merge driver: %s (state "%s")\n')
1544 ui.write(('merge driver: %s (state "%s")\n')
1545 % (driver, mdstate))
1545 % (driver, mdstate))
1546 elif rtype in 'FDC':
1546 elif rtype in 'FDC':
1547 r = record.split('\0')
1547 r = record.split('\0')
1548 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1548 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1549 if version == 1:
1549 if version == 1:
1550 onode = 'not stored in v1 format'
1550 onode = 'not stored in v1 format'
1551 flags = r[7]
1551 flags = r[7]
1552 else:
1552 else:
1553 onode, flags = r[7:9]
1553 onode, flags = r[7:9]
1554 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1554 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1555 % (f, rtype, state, _hashornull(hash)))
1555 % (f, rtype, state, _hashornull(hash)))
1556 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1556 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1557 ui.write((' ancestor path: %s (node %s)\n')
1557 ui.write((' ancestor path: %s (node %s)\n')
1558 % (afile, _hashornull(anode)))
1558 % (afile, _hashornull(anode)))
1559 ui.write((' other path: %s (node %s)\n')
1559 ui.write((' other path: %s (node %s)\n')
1560 % (ofile, _hashornull(onode)))
1560 % (ofile, _hashornull(onode)))
1561 elif rtype == 'f':
1561 elif rtype == 'f':
1562 filename, rawextras = record.split('\0', 1)
1562 filename, rawextras = record.split('\0', 1)
1563 extras = rawextras.split('\0')
1563 extras = rawextras.split('\0')
1564 i = 0
1564 i = 0
1565 extrastrings = []
1565 extrastrings = []
1566 while i < len(extras):
1566 while i < len(extras):
1567 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1567 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1568 i += 2
1568 i += 2
1569
1569
1570 ui.write(('file extras: %s (%s)\n')
1570 ui.write(('file extras: %s (%s)\n')
1571 % (filename, ', '.join(extrastrings)))
1571 % (filename, ', '.join(extrastrings)))
1572 elif rtype == 'l':
1572 elif rtype == 'l':
1573 labels = record.split('\0', 2)
1573 labels = record.split('\0', 2)
1574 labels = [l for l in labels if len(l) > 0]
1574 labels = [l for l in labels if len(l) > 0]
1575 ui.write(('labels:\n'))
1575 ui.write(('labels:\n'))
1576 ui.write((' local: %s\n' % labels[0]))
1576 ui.write((' local: %s\n' % labels[0]))
1577 ui.write((' other: %s\n' % labels[1]))
1577 ui.write((' other: %s\n' % labels[1]))
1578 if len(labels) > 2:
1578 if len(labels) > 2:
1579 ui.write((' base: %s\n' % labels[2]))
1579 ui.write((' base: %s\n' % labels[2]))
1580 else:
1580 else:
1581 ui.write(('unrecognized entry: %s\t%s\n')
1581 ui.write(('unrecognized entry: %s\t%s\n')
1582 % (rtype, record.replace('\0', '\t')))
1582 % (rtype, record.replace('\0', '\t')))
1583
1583
1584 # Avoid mergestate.read() since it may raise an exception for unsupported
1584 # Avoid mergestate.read() since it may raise an exception for unsupported
1585 # merge state records. We shouldn't be doing this, but this is OK since this
1585 # merge state records. We shouldn't be doing this, but this is OK since this
1586 # command is pretty low-level.
1586 # command is pretty low-level.
1587 ms = mergemod.mergestate(repo)
1587 ms = mergemod.mergestate(repo)
1588
1588
1589 # sort so that reasonable information is on top
1589 # sort so that reasonable information is on top
1590 v1records = ms._readrecordsv1()
1590 v1records = ms._readrecordsv1()
1591 v2records = ms._readrecordsv2()
1591 v2records = ms._readrecordsv2()
1592 order = 'LOml'
1592 order = 'LOml'
1593 def key(r):
1593 def key(r):
1594 idx = order.find(r[0])
1594 idx = order.find(r[0])
1595 if idx == -1:
1595 if idx == -1:
1596 return (1, r[1])
1596 return (1, r[1])
1597 else:
1597 else:
1598 return (0, idx)
1598 return (0, idx)
1599 v1records.sort(key=key)
1599 v1records.sort(key=key)
1600 v2records.sort(key=key)
1600 v2records.sort(key=key)
1601
1601
1602 if not v1records and not v2records:
1602 if not v1records and not v2records:
1603 ui.write(('no merge state found\n'))
1603 ui.write(('no merge state found\n'))
1604 elif not v2records:
1604 elif not v2records:
1605 ui.note(('no version 2 merge state\n'))
1605 ui.note(('no version 2 merge state\n'))
1606 printrecords(1)
1606 printrecords(1)
1607 elif ms._v1v2match(v1records, v2records):
1607 elif ms._v1v2match(v1records, v2records):
1608 ui.note(('v1 and v2 states match: using v2\n'))
1608 ui.note(('v1 and v2 states match: using v2\n'))
1609 printrecords(2)
1609 printrecords(2)
1610 else:
1610 else:
1611 ui.note(('v1 and v2 states mismatch: using v1\n'))
1611 ui.note(('v1 and v2 states mismatch: using v1\n'))
1612 printrecords(1)
1612 printrecords(1)
1613 if ui.verbose:
1613 if ui.verbose:
1614 printrecords(2)
1614 printrecords(2)
1615
1615
1616 @command('debugnamecomplete', [], _('NAME...'))
1616 @command('debugnamecomplete', [], _('NAME...'))
1617 def debugnamecomplete(ui, repo, *args):
1617 def debugnamecomplete(ui, repo, *args):
1618 '''complete "names" - tags, open branch names, bookmark names'''
1618 '''complete "names" - tags, open branch names, bookmark names'''
1619
1619
1620 names = set()
1620 names = set()
1621 # since we previously only listed open branches, we will handle that
1621 # since we previously only listed open branches, we will handle that
1622 # specially (after this for loop)
1622 # specially (after this for loop)
1623 for name, ns in repo.names.iteritems():
1623 for name, ns in repo.names.iteritems():
1624 if name != 'branches':
1624 if name != 'branches':
1625 names.update(ns.listnames(repo))
1625 names.update(ns.listnames(repo))
1626 names.update(tag for (tag, heads, tip, closed)
1626 names.update(tag for (tag, heads, tip, closed)
1627 in repo.branchmap().iterbranches() if not closed)
1627 in repo.branchmap().iterbranches() if not closed)
1628 completions = set()
1628 completions = set()
1629 if not args:
1629 if not args:
1630 args = ['']
1630 args = ['']
1631 for a in args:
1631 for a in args:
1632 completions.update(n for n in names if n.startswith(a))
1632 completions.update(n for n in names if n.startswith(a))
1633 ui.write('\n'.join(sorted(completions)))
1633 ui.write('\n'.join(sorted(completions)))
1634 ui.write('\n')
1634 ui.write('\n')
1635
1635
1636 @command('debugobsolete',
1636 @command('debugobsolete',
1637 [('', 'flags', 0, _('markers flag')),
1637 [('', 'flags', 0, _('markers flag')),
1638 ('', 'record-parents', False,
1638 ('', 'record-parents', False,
1639 _('record parent information for the precursor')),
1639 _('record parent information for the precursor')),
1640 ('r', 'rev', [], _('display markers relevant to REV')),
1640 ('r', 'rev', [], _('display markers relevant to REV')),
1641 ('', 'exclusive', False, _('restrict display to markers only '
1641 ('', 'exclusive', False, _('restrict display to markers only '
1642 'relevant to REV')),
1642 'relevant to REV')),
1643 ('', 'index', False, _('display index of the marker')),
1643 ('', 'index', False, _('display index of the marker')),
1644 ('', 'delete', [], _('delete markers specified by indices')),
1644 ('', 'delete', [], _('delete markers specified by indices')),
1645 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1645 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1646 _('[OBSOLETED [REPLACEMENT ...]]'))
1646 _('[OBSOLETED [REPLACEMENT ...]]'))
1647 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1647 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1648 """create arbitrary obsolete marker
1648 """create arbitrary obsolete marker
1649
1649
1650 With no arguments, displays the list of obsolescence markers."""
1650 With no arguments, displays the list of obsolescence markers."""
1651
1651
1652 opts = pycompat.byteskwargs(opts)
1652 opts = pycompat.byteskwargs(opts)
1653
1653
1654 def parsenodeid(s):
1654 def parsenodeid(s):
1655 try:
1655 try:
1656 # We do not use revsingle/revrange functions here to accept
1656 # We do not use revsingle/revrange functions here to accept
1657 # arbitrary node identifiers, possibly not present in the
1657 # arbitrary node identifiers, possibly not present in the
1658 # local repository.
1658 # local repository.
1659 n = bin(s)
1659 n = bin(s)
1660 if len(n) != len(nullid):
1660 if len(n) != len(nullid):
1661 raise TypeError()
1661 raise TypeError()
1662 return n
1662 return n
1663 except TypeError:
1663 except TypeError:
1664 raise error.Abort('changeset references must be full hexadecimal '
1664 raise error.Abort('changeset references must be full hexadecimal '
1665 'node identifiers')
1665 'node identifiers')
1666
1666
1667 if opts.get('delete'):
1667 if opts.get('delete'):
1668 indices = []
1668 indices = []
1669 for v in opts.get('delete'):
1669 for v in opts.get('delete'):
1670 try:
1670 try:
1671 indices.append(int(v))
1671 indices.append(int(v))
1672 except ValueError:
1672 except ValueError:
1673 raise error.Abort(_('invalid index value: %r') % v,
1673 raise error.Abort(_('invalid index value: %r') % v,
1674 hint=_('use integers for indices'))
1674 hint=_('use integers for indices'))
1675
1675
1676 if repo.currenttransaction():
1676 if repo.currenttransaction():
1677 raise error.Abort(_('cannot delete obsmarkers in the middle '
1677 raise error.Abort(_('cannot delete obsmarkers in the middle '
1678 'of transaction.'))
1678 'of transaction.'))
1679
1679
1680 with repo.lock():
1680 with repo.lock():
1681 n = repair.deleteobsmarkers(repo.obsstore, indices)
1681 n = repair.deleteobsmarkers(repo.obsstore, indices)
1682 ui.write(_('deleted %i obsolescence markers\n') % n)
1682 ui.write(_('deleted %i obsolescence markers\n') % n)
1683
1683
1684 return
1684 return
1685
1685
1686 if precursor is not None:
1686 if precursor is not None:
1687 if opts['rev']:
1687 if opts['rev']:
1688 raise error.Abort('cannot select revision when creating marker')
1688 raise error.Abort('cannot select revision when creating marker')
1689 metadata = {}
1689 metadata = {}
1690 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1690 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1691 succs = tuple(parsenodeid(succ) for succ in successors)
1691 succs = tuple(parsenodeid(succ) for succ in successors)
1692 l = repo.lock()
1692 l = repo.lock()
1693 try:
1693 try:
1694 tr = repo.transaction('debugobsolete')
1694 tr = repo.transaction('debugobsolete')
1695 try:
1695 try:
1696 date = opts.get('date')
1696 date = opts.get('date')
1697 if date:
1697 if date:
1698 date = dateutil.parsedate(date)
1698 date = dateutil.parsedate(date)
1699 else:
1699 else:
1700 date = None
1700 date = None
1701 prec = parsenodeid(precursor)
1701 prec = parsenodeid(precursor)
1702 parents = None
1702 parents = None
1703 if opts['record_parents']:
1703 if opts['record_parents']:
1704 if prec not in repo.unfiltered():
1704 if prec not in repo.unfiltered():
1705 raise error.Abort('cannot used --record-parents on '
1705 raise error.Abort('cannot used --record-parents on '
1706 'unknown changesets')
1706 'unknown changesets')
1707 parents = repo.unfiltered()[prec].parents()
1707 parents = repo.unfiltered()[prec].parents()
1708 parents = tuple(p.node() for p in parents)
1708 parents = tuple(p.node() for p in parents)
1709 repo.obsstore.create(tr, prec, succs, opts['flags'],
1709 repo.obsstore.create(tr, prec, succs, opts['flags'],
1710 parents=parents, date=date,
1710 parents=parents, date=date,
1711 metadata=metadata, ui=ui)
1711 metadata=metadata, ui=ui)
1712 tr.close()
1712 tr.close()
1713 except ValueError as exc:
1713 except ValueError as exc:
1714 raise error.Abort(_('bad obsmarker input: %s') %
1714 raise error.Abort(_('bad obsmarker input: %s') %
1715 pycompat.bytestr(exc))
1715 pycompat.bytestr(exc))
1716 finally:
1716 finally:
1717 tr.release()
1717 tr.release()
1718 finally:
1718 finally:
1719 l.release()
1719 l.release()
1720 else:
1720 else:
1721 if opts['rev']:
1721 if opts['rev']:
1722 revs = scmutil.revrange(repo, opts['rev'])
1722 revs = scmutil.revrange(repo, opts['rev'])
1723 nodes = [repo[r].node() for r in revs]
1723 nodes = [repo[r].node() for r in revs]
1724 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1724 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1725 exclusive=opts['exclusive']))
1725 exclusive=opts['exclusive']))
1726 markers.sort(key=lambda x: x._data)
1726 markers.sort(key=lambda x: x._data)
1727 else:
1727 else:
1728 markers = obsutil.getmarkers(repo)
1728 markers = obsutil.getmarkers(repo)
1729
1729
1730 markerstoiter = markers
1730 markerstoiter = markers
1731 isrelevant = lambda m: True
1731 isrelevant = lambda m: True
1732 if opts.get('rev') and opts.get('index'):
1732 if opts.get('rev') and opts.get('index'):
1733 markerstoiter = obsutil.getmarkers(repo)
1733 markerstoiter = obsutil.getmarkers(repo)
1734 markerset = set(markers)
1734 markerset = set(markers)
1735 isrelevant = lambda m: m in markerset
1735 isrelevant = lambda m: m in markerset
1736
1736
1737 fm = ui.formatter('debugobsolete', opts)
1737 fm = ui.formatter('debugobsolete', opts)
1738 for i, m in enumerate(markerstoiter):
1738 for i, m in enumerate(markerstoiter):
1739 if not isrelevant(m):
1739 if not isrelevant(m):
1740 # marker can be irrelevant when we're iterating over a set
1740 # marker can be irrelevant when we're iterating over a set
1741 # of markers (markerstoiter) which is bigger than the set
1741 # of markers (markerstoiter) which is bigger than the set
1742 # of markers we want to display (markers)
1742 # of markers we want to display (markers)
1743 # this can happen if both --index and --rev options are
1743 # this can happen if both --index and --rev options are
1744 # provided and thus we need to iterate over all of the markers
1744 # provided and thus we need to iterate over all of the markers
1745 # to get the correct indices, but only display the ones that
1745 # to get the correct indices, but only display the ones that
1746 # are relevant to --rev value
1746 # are relevant to --rev value
1747 continue
1747 continue
1748 fm.startitem()
1748 fm.startitem()
1749 ind = i if opts.get('index') else None
1749 ind = i if opts.get('index') else None
1750 cmdutil.showmarker(fm, m, index=ind)
1750 cmdutil.showmarker(fm, m, index=ind)
1751 fm.end()
1751 fm.end()
1752
1752
1753 @command('debugp1copies',
1753 @command('debugp1copies',
1754 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1754 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1755 _('[-r REV]'))
1755 _('[-r REV]'))
1756 def debugp1copies(ui, repo, **opts):
1756 def debugp1copies(ui, repo, **opts):
1757 """dump copy information compared to p1"""
1757 """dump copy information compared to p1"""
1758
1758
1759 opts = pycompat.byteskwargs(opts)
1759 opts = pycompat.byteskwargs(opts)
1760 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1760 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1761 for dst, src in ctx.p1copies().items():
1761 for dst, src in ctx.p1copies().items():
1762 ui.write('%s -> %s\n' % (src, dst))
1762 ui.write('%s -> %s\n' % (src, dst))
1763
1763
1764 @command('debugp2copies',
1764 @command('debugp2copies',
1765 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1765 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1766 _('[-r REV]'))
1766 _('[-r REV]'))
1767 def debugp1copies(ui, repo, **opts):
1767 def debugp1copies(ui, repo, **opts):
1768 """dump copy information compared to p2"""
1768 """dump copy information compared to p2"""
1769
1769
1770 opts = pycompat.byteskwargs(opts)
1770 opts = pycompat.byteskwargs(opts)
1771 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1771 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1772 for dst, src in ctx.p2copies().items():
1772 for dst, src in ctx.p2copies().items():
1773 ui.write('%s -> %s\n' % (src, dst))
1773 ui.write('%s -> %s\n' % (src, dst))
1774
1774
1775 @command('debugpathcomplete',
1775 @command('debugpathcomplete',
1776 [('f', 'full', None, _('complete an entire path')),
1776 [('f', 'full', None, _('complete an entire path')),
1777 ('n', 'normal', None, _('show only normal files')),
1777 ('n', 'normal', None, _('show only normal files')),
1778 ('a', 'added', None, _('show only added files')),
1778 ('a', 'added', None, _('show only added files')),
1779 ('r', 'removed', None, _('show only removed files'))],
1779 ('r', 'removed', None, _('show only removed files'))],
1780 _('FILESPEC...'))
1780 _('FILESPEC...'))
1781 def debugpathcomplete(ui, repo, *specs, **opts):
1781 def debugpathcomplete(ui, repo, *specs, **opts):
1782 '''complete part or all of a tracked path
1782 '''complete part or all of a tracked path
1783
1783
1784 This command supports shells that offer path name completion. It
1784 This command supports shells that offer path name completion. It
1785 currently completes only files already known to the dirstate.
1785 currently completes only files already known to the dirstate.
1786
1786
1787 Completion extends only to the next path segment unless
1787 Completion extends only to the next path segment unless
1788 --full is specified, in which case entire paths are used.'''
1788 --full is specified, in which case entire paths are used.'''
1789
1789
1790 def complete(path, acceptable):
1790 def complete(path, acceptable):
1791 dirstate = repo.dirstate
1791 dirstate = repo.dirstate
1792 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1792 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1793 rootdir = repo.root + pycompat.ossep
1793 rootdir = repo.root + pycompat.ossep
1794 if spec != repo.root and not spec.startswith(rootdir):
1794 if spec != repo.root and not spec.startswith(rootdir):
1795 return [], []
1795 return [], []
1796 if os.path.isdir(spec):
1796 if os.path.isdir(spec):
1797 spec += '/'
1797 spec += '/'
1798 spec = spec[len(rootdir):]
1798 spec = spec[len(rootdir):]
1799 fixpaths = pycompat.ossep != '/'
1799 fixpaths = pycompat.ossep != '/'
1800 if fixpaths:
1800 if fixpaths:
1801 spec = spec.replace(pycompat.ossep, '/')
1801 spec = spec.replace(pycompat.ossep, '/')
1802 speclen = len(spec)
1802 speclen = len(spec)
1803 fullpaths = opts[r'full']
1803 fullpaths = opts[r'full']
1804 files, dirs = set(), set()
1804 files, dirs = set(), set()
1805 adddir, addfile = dirs.add, files.add
1805 adddir, addfile = dirs.add, files.add
1806 for f, st in dirstate.iteritems():
1806 for f, st in dirstate.iteritems():
1807 if f.startswith(spec) and st[0] in acceptable:
1807 if f.startswith(spec) and st[0] in acceptable:
1808 if fixpaths:
1808 if fixpaths:
1809 f = f.replace('/', pycompat.ossep)
1809 f = f.replace('/', pycompat.ossep)
1810 if fullpaths:
1810 if fullpaths:
1811 addfile(f)
1811 addfile(f)
1812 continue
1812 continue
1813 s = f.find(pycompat.ossep, speclen)
1813 s = f.find(pycompat.ossep, speclen)
1814 if s >= 0:
1814 if s >= 0:
1815 adddir(f[:s])
1815 adddir(f[:s])
1816 else:
1816 else:
1817 addfile(f)
1817 addfile(f)
1818 return files, dirs
1818 return files, dirs
1819
1819
1820 acceptable = ''
1820 acceptable = ''
1821 if opts[r'normal']:
1821 if opts[r'normal']:
1822 acceptable += 'nm'
1822 acceptable += 'nm'
1823 if opts[r'added']:
1823 if opts[r'added']:
1824 acceptable += 'a'
1824 acceptable += 'a'
1825 if opts[r'removed']:
1825 if opts[r'removed']:
1826 acceptable += 'r'
1826 acceptable += 'r'
1827 cwd = repo.getcwd()
1827 cwd = repo.getcwd()
1828 if not specs:
1828 if not specs:
1829 specs = ['.']
1829 specs = ['.']
1830
1830
1831 files, dirs = set(), set()
1831 files, dirs = set(), set()
1832 for spec in specs:
1832 for spec in specs:
1833 f, d = complete(spec, acceptable or 'nmar')
1833 f, d = complete(spec, acceptable or 'nmar')
1834 files.update(f)
1834 files.update(f)
1835 dirs.update(d)
1835 dirs.update(d)
1836 files.update(dirs)
1836 files.update(dirs)
1837 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1837 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1838 ui.write('\n')
1838 ui.write('\n')
1839
1839
1840 @command('debugpathcopies',
1840 @command('debugpathcopies',
1841 cmdutil.walkopts,
1841 cmdutil.walkopts,
1842 'hg debugpathcopies REV1 REV2 [FILE]',
1842 'hg debugpathcopies REV1 REV2 [FILE]',
1843 inferrepo=True)
1843 inferrepo=True)
1844 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1844 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1845 """show copies between two revisions"""
1845 """show copies between two revisions"""
1846 ctx1 = scmutil.revsingle(repo, rev1)
1846 ctx1 = scmutil.revsingle(repo, rev1)
1847 ctx2 = scmutil.revsingle(repo, rev2)
1847 ctx2 = scmutil.revsingle(repo, rev2)
1848 m = scmutil.match(ctx1, pats, opts)
1848 m = scmutil.match(ctx1, pats, opts)
1849 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1849 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1850 ui.write('%s -> %s\n' % (src, dst))
1850 ui.write('%s -> %s\n' % (src, dst))
1851
1851
1852 @command('debugpeer', [], _('PATH'), norepo=True)
1852 @command('debugpeer', [], _('PATH'), norepo=True)
1853 def debugpeer(ui, path):
1853 def debugpeer(ui, path):
1854 """establish a connection to a peer repository"""
1854 """establish a connection to a peer repository"""
1855 # Always enable peer request logging. Requires --debug to display
1855 # Always enable peer request logging. Requires --debug to display
1856 # though.
1856 # though.
1857 overrides = {
1857 overrides = {
1858 ('devel', 'debug.peer-request'): True,
1858 ('devel', 'debug.peer-request'): True,
1859 }
1859 }
1860
1860
1861 with ui.configoverride(overrides):
1861 with ui.configoverride(overrides):
1862 peer = hg.peer(ui, {}, path)
1862 peer = hg.peer(ui, {}, path)
1863
1863
1864 local = peer.local() is not None
1864 local = peer.local() is not None
1865 canpush = peer.canpush()
1865 canpush = peer.canpush()
1866
1866
1867 ui.write(_('url: %s\n') % peer.url())
1867 ui.write(_('url: %s\n') % peer.url())
1868 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1868 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1869 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1869 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1870
1870
1871 @command('debugpickmergetool',
1871 @command('debugpickmergetool',
1872 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1872 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1873 ('', 'changedelete', None, _('emulate merging change and delete')),
1873 ('', 'changedelete', None, _('emulate merging change and delete')),
1874 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1874 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1875 _('[PATTERN]...'),
1875 _('[PATTERN]...'),
1876 inferrepo=True)
1876 inferrepo=True)
1877 def debugpickmergetool(ui, repo, *pats, **opts):
1877 def debugpickmergetool(ui, repo, *pats, **opts):
1878 """examine which merge tool is chosen for specified file
1878 """examine which merge tool is chosen for specified file
1879
1879
1880 As described in :hg:`help merge-tools`, Mercurial examines
1880 As described in :hg:`help merge-tools`, Mercurial examines
1881 configurations below in this order to decide which merge tool is
1881 configurations below in this order to decide which merge tool is
1882 chosen for specified file.
1882 chosen for specified file.
1883
1883
1884 1. ``--tool`` option
1884 1. ``--tool`` option
1885 2. ``HGMERGE`` environment variable
1885 2. ``HGMERGE`` environment variable
1886 3. configurations in ``merge-patterns`` section
1886 3. configurations in ``merge-patterns`` section
1887 4. configuration of ``ui.merge``
1887 4. configuration of ``ui.merge``
1888 5. configurations in ``merge-tools`` section
1888 5. configurations in ``merge-tools`` section
1889 6. ``hgmerge`` tool (for historical reason only)
1889 6. ``hgmerge`` tool (for historical reason only)
1890 7. default tool for fallback (``:merge`` or ``:prompt``)
1890 7. default tool for fallback (``:merge`` or ``:prompt``)
1891
1891
1892 This command writes out examination result in the style below::
1892 This command writes out examination result in the style below::
1893
1893
1894 FILE = MERGETOOL
1894 FILE = MERGETOOL
1895
1895
1896 By default, all files known in the first parent context of the
1896 By default, all files known in the first parent context of the
1897 working directory are examined. Use file patterns and/or -I/-X
1897 working directory are examined. Use file patterns and/or -I/-X
1898 options to limit target files. -r/--rev is also useful to examine
1898 options to limit target files. -r/--rev is also useful to examine
1899 files in another context without actual updating to it.
1899 files in another context without actual updating to it.
1900
1900
1901 With --debug, this command shows warning messages while matching
1901 With --debug, this command shows warning messages while matching
1902 against ``merge-patterns`` and so on, too. It is recommended to
1902 against ``merge-patterns`` and so on, too. It is recommended to
1903 use this option with explicit file patterns and/or -I/-X options,
1903 use this option with explicit file patterns and/or -I/-X options,
1904 because this option increases amount of output per file according
1904 because this option increases amount of output per file according
1905 to configurations in hgrc.
1905 to configurations in hgrc.
1906
1906
1907 With -v/--verbose, this command shows configurations below at
1907 With -v/--verbose, this command shows configurations below at
1908 first (only if specified).
1908 first (only if specified).
1909
1909
1910 - ``--tool`` option
1910 - ``--tool`` option
1911 - ``HGMERGE`` environment variable
1911 - ``HGMERGE`` environment variable
1912 - configuration of ``ui.merge``
1912 - configuration of ``ui.merge``
1913
1913
1914 If merge tool is chosen before matching against
1914 If merge tool is chosen before matching against
1915 ``merge-patterns``, this command can't show any helpful
1915 ``merge-patterns``, this command can't show any helpful
1916 information, even with --debug. In such case, information above is
1916 information, even with --debug. In such case, information above is
1917 useful to know why a merge tool is chosen.
1917 useful to know why a merge tool is chosen.
1918 """
1918 """
1919 opts = pycompat.byteskwargs(opts)
1919 opts = pycompat.byteskwargs(opts)
1920 overrides = {}
1920 overrides = {}
1921 if opts['tool']:
1921 if opts['tool']:
1922 overrides[('ui', 'forcemerge')] = opts['tool']
1922 overrides[('ui', 'forcemerge')] = opts['tool']
1923 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1923 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1924
1924
1925 with ui.configoverride(overrides, 'debugmergepatterns'):
1925 with ui.configoverride(overrides, 'debugmergepatterns'):
1926 hgmerge = encoding.environ.get("HGMERGE")
1926 hgmerge = encoding.environ.get("HGMERGE")
1927 if hgmerge is not None:
1927 if hgmerge is not None:
1928 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1928 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1929 uimerge = ui.config("ui", "merge")
1929 uimerge = ui.config("ui", "merge")
1930 if uimerge:
1930 if uimerge:
1931 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1931 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1932
1932
1933 ctx = scmutil.revsingle(repo, opts.get('rev'))
1933 ctx = scmutil.revsingle(repo, opts.get('rev'))
1934 m = scmutil.match(ctx, pats, opts)
1934 m = scmutil.match(ctx, pats, opts)
1935 changedelete = opts['changedelete']
1935 changedelete = opts['changedelete']
1936 for path in ctx.walk(m):
1936 for path in ctx.walk(m):
1937 fctx = ctx[path]
1937 fctx = ctx[path]
1938 try:
1938 try:
1939 if not ui.debugflag:
1939 if not ui.debugflag:
1940 ui.pushbuffer(error=True)
1940 ui.pushbuffer(error=True)
1941 tool, toolpath = filemerge._picktool(repo, ui, path,
1941 tool, toolpath = filemerge._picktool(repo, ui, path,
1942 fctx.isbinary(),
1942 fctx.isbinary(),
1943 'l' in fctx.flags(),
1943 'l' in fctx.flags(),
1944 changedelete)
1944 changedelete)
1945 finally:
1945 finally:
1946 if not ui.debugflag:
1946 if not ui.debugflag:
1947 ui.popbuffer()
1947 ui.popbuffer()
1948 ui.write(('%s = %s\n') % (path, tool))
1948 ui.write(('%s = %s\n') % (path, tool))
1949
1949
1950 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1950 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1951 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1951 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1952 '''access the pushkey key/value protocol
1952 '''access the pushkey key/value protocol
1953
1953
1954 With two args, list the keys in the given namespace.
1954 With two args, list the keys in the given namespace.
1955
1955
1956 With five args, set a key to new if it currently is set to old.
1956 With five args, set a key to new if it currently is set to old.
1957 Reports success or failure.
1957 Reports success or failure.
1958 '''
1958 '''
1959
1959
1960 target = hg.peer(ui, {}, repopath)
1960 target = hg.peer(ui, {}, repopath)
1961 if keyinfo:
1961 if keyinfo:
1962 key, old, new = keyinfo
1962 key, old, new = keyinfo
1963 with target.commandexecutor() as e:
1963 with target.commandexecutor() as e:
1964 r = e.callcommand('pushkey', {
1964 r = e.callcommand('pushkey', {
1965 'namespace': namespace,
1965 'namespace': namespace,
1966 'key': key,
1966 'key': key,
1967 'old': old,
1967 'old': old,
1968 'new': new,
1968 'new': new,
1969 }).result()
1969 }).result()
1970
1970
1971 ui.status(pycompat.bytestr(r) + '\n')
1971 ui.status(pycompat.bytestr(r) + '\n')
1972 return not r
1972 return not r
1973 else:
1973 else:
1974 for k, v in sorted(target.listkeys(namespace).iteritems()):
1974 for k, v in sorted(target.listkeys(namespace).iteritems()):
1975 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1975 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
1976 stringutil.escapestr(v)))
1976 stringutil.escapestr(v)))
1977
1977
1978 @command('debugpvec', [], _('A B'))
1978 @command('debugpvec', [], _('A B'))
1979 def debugpvec(ui, repo, a, b=None):
1979 def debugpvec(ui, repo, a, b=None):
1980 ca = scmutil.revsingle(repo, a)
1980 ca = scmutil.revsingle(repo, a)
1981 cb = scmutil.revsingle(repo, b)
1981 cb = scmutil.revsingle(repo, b)
1982 pa = pvec.ctxpvec(ca)
1982 pa = pvec.ctxpvec(ca)
1983 pb = pvec.ctxpvec(cb)
1983 pb = pvec.ctxpvec(cb)
1984 if pa == pb:
1984 if pa == pb:
1985 rel = "="
1985 rel = "="
1986 elif pa > pb:
1986 elif pa > pb:
1987 rel = ">"
1987 rel = ">"
1988 elif pa < pb:
1988 elif pa < pb:
1989 rel = "<"
1989 rel = "<"
1990 elif pa | pb:
1990 elif pa | pb:
1991 rel = "|"
1991 rel = "|"
1992 ui.write(_("a: %s\n") % pa)
1992 ui.write(_("a: %s\n") % pa)
1993 ui.write(_("b: %s\n") % pb)
1993 ui.write(_("b: %s\n") % pb)
1994 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1994 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1995 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1995 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1996 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1996 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1997 pa.distance(pb), rel))
1997 pa.distance(pb), rel))
1998
1998
1999 @command('debugrebuilddirstate|debugrebuildstate',
1999 @command('debugrebuilddirstate|debugrebuildstate',
2000 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2000 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2001 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2001 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2002 'the working copy parent')),
2002 'the working copy parent')),
2003 ],
2003 ],
2004 _('[-r REV]'))
2004 _('[-r REV]'))
2005 def debugrebuilddirstate(ui, repo, rev, **opts):
2005 def debugrebuilddirstate(ui, repo, rev, **opts):
2006 """rebuild the dirstate as it would look like for the given revision
2006 """rebuild the dirstate as it would look like for the given revision
2007
2007
2008 If no revision is specified the first current parent will be used.
2008 If no revision is specified the first current parent will be used.
2009
2009
2010 The dirstate will be set to the files of the given revision.
2010 The dirstate will be set to the files of the given revision.
2011 The actual working directory content or existing dirstate
2011 The actual working directory content or existing dirstate
2012 information such as adds or removes is not considered.
2012 information such as adds or removes is not considered.
2013
2013
2014 ``minimal`` will only rebuild the dirstate status for files that claim to be
2014 ``minimal`` will only rebuild the dirstate status for files that claim to be
2015 tracked but are not in the parent manifest, or that exist in the parent
2015 tracked but are not in the parent manifest, or that exist in the parent
2016 manifest but are not in the dirstate. It will not change adds, removes, or
2016 manifest but are not in the dirstate. It will not change adds, removes, or
2017 modified files that are in the working copy parent.
2017 modified files that are in the working copy parent.
2018
2018
2019 One use of this command is to make the next :hg:`status` invocation
2019 One use of this command is to make the next :hg:`status` invocation
2020 check the actual file content.
2020 check the actual file content.
2021 """
2021 """
2022 ctx = scmutil.revsingle(repo, rev)
2022 ctx = scmutil.revsingle(repo, rev)
2023 with repo.wlock():
2023 with repo.wlock():
2024 dirstate = repo.dirstate
2024 dirstate = repo.dirstate
2025 changedfiles = None
2025 changedfiles = None
2026 # See command doc for what minimal does.
2026 # See command doc for what minimal does.
2027 if opts.get(r'minimal'):
2027 if opts.get(r'minimal'):
2028 manifestfiles = set(ctx.manifest().keys())
2028 manifestfiles = set(ctx.manifest().keys())
2029 dirstatefiles = set(dirstate)
2029 dirstatefiles = set(dirstate)
2030 manifestonly = manifestfiles - dirstatefiles
2030 manifestonly = manifestfiles - dirstatefiles
2031 dsonly = dirstatefiles - manifestfiles
2031 dsonly = dirstatefiles - manifestfiles
2032 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2032 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2033 changedfiles = manifestonly | dsnotadded
2033 changedfiles = manifestonly | dsnotadded
2034
2034
2035 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2035 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2036
2036
2037 @command('debugrebuildfncache', [], '')
2037 @command('debugrebuildfncache', [], '')
2038 def debugrebuildfncache(ui, repo):
2038 def debugrebuildfncache(ui, repo):
2039 """rebuild the fncache file"""
2039 """rebuild the fncache file"""
2040 repair.rebuildfncache(ui, repo)
2040 repair.rebuildfncache(ui, repo)
2041
2041
2042 @command('debugrename',
2042 @command('debugrename',
2043 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2043 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2044 _('[-r REV] [FILE]...'))
2044 _('[-r REV] [FILE]...'))
2045 def debugrename(ui, repo, *pats, **opts):
2045 def debugrename(ui, repo, *pats, **opts):
2046 """dump rename information"""
2046 """dump rename information"""
2047
2047
2048 opts = pycompat.byteskwargs(opts)
2048 opts = pycompat.byteskwargs(opts)
2049 ctx = scmutil.revsingle(repo, opts.get('rev'))
2049 ctx = scmutil.revsingle(repo, opts.get('rev'))
2050 m = scmutil.match(ctx, pats, opts)
2050 m = scmutil.match(ctx, pats, opts)
2051 for abs in ctx.walk(m):
2051 for abs in ctx.walk(m):
2052 fctx = ctx[abs]
2052 fctx = ctx[abs]
2053 o = fctx.filelog().renamed(fctx.filenode())
2053 o = fctx.filelog().renamed(fctx.filenode())
2054 rel = repo.pathto(abs)
2054 rel = repo.pathto(abs)
2055 if o:
2055 if o:
2056 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2056 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2057 else:
2057 else:
2058 ui.write(_("%s not renamed\n") % rel)
2058 ui.write(_("%s not renamed\n") % rel)
2059
2059
2060 @command('debugrevlog', cmdutil.debugrevlogopts +
2060 @command('debugrevlog', cmdutil.debugrevlogopts +
2061 [('d', 'dump', False, _('dump index data'))],
2061 [('d', 'dump', False, _('dump index data'))],
2062 _('-c|-m|FILE'),
2062 _('-c|-m|FILE'),
2063 optionalrepo=True)
2063 optionalrepo=True)
2064 def debugrevlog(ui, repo, file_=None, **opts):
2064 def debugrevlog(ui, repo, file_=None, **opts):
2065 """show data and statistics about a revlog"""
2065 """show data and statistics about a revlog"""
2066 opts = pycompat.byteskwargs(opts)
2066 opts = pycompat.byteskwargs(opts)
2067 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2067 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2068
2068
2069 if opts.get("dump"):
2069 if opts.get("dump"):
2070 numrevs = len(r)
2070 numrevs = len(r)
2071 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2071 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2072 " rawsize totalsize compression heads chainlen\n"))
2072 " rawsize totalsize compression heads chainlen\n"))
2073 ts = 0
2073 ts = 0
2074 heads = set()
2074 heads = set()
2075
2075
2076 for rev in pycompat.xrange(numrevs):
2076 for rev in pycompat.xrange(numrevs):
2077 dbase = r.deltaparent(rev)
2077 dbase = r.deltaparent(rev)
2078 if dbase == -1:
2078 if dbase == -1:
2079 dbase = rev
2079 dbase = rev
2080 cbase = r.chainbase(rev)
2080 cbase = r.chainbase(rev)
2081 clen = r.chainlen(rev)
2081 clen = r.chainlen(rev)
2082 p1, p2 = r.parentrevs(rev)
2082 p1, p2 = r.parentrevs(rev)
2083 rs = r.rawsize(rev)
2083 rs = r.rawsize(rev)
2084 ts = ts + rs
2084 ts = ts + rs
2085 heads -= set(r.parentrevs(rev))
2085 heads -= set(r.parentrevs(rev))
2086 heads.add(rev)
2086 heads.add(rev)
2087 try:
2087 try:
2088 compression = ts / r.end(rev)
2088 compression = ts / r.end(rev)
2089 except ZeroDivisionError:
2089 except ZeroDivisionError:
2090 compression = 0
2090 compression = 0
2091 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2091 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2092 "%11d %5d %8d\n" %
2092 "%11d %5d %8d\n" %
2093 (rev, p1, p2, r.start(rev), r.end(rev),
2093 (rev, p1, p2, r.start(rev), r.end(rev),
2094 r.start(dbase), r.start(cbase),
2094 r.start(dbase), r.start(cbase),
2095 r.start(p1), r.start(p2),
2095 r.start(p1), r.start(p2),
2096 rs, ts, compression, len(heads), clen))
2096 rs, ts, compression, len(heads), clen))
2097 return 0
2097 return 0
2098
2098
2099 v = r.version
2099 v = r.version
2100 format = v & 0xFFFF
2100 format = v & 0xFFFF
2101 flags = []
2101 flags = []
2102 gdelta = False
2102 gdelta = False
2103 if v & revlog.FLAG_INLINE_DATA:
2103 if v & revlog.FLAG_INLINE_DATA:
2104 flags.append('inline')
2104 flags.append('inline')
2105 if v & revlog.FLAG_GENERALDELTA:
2105 if v & revlog.FLAG_GENERALDELTA:
2106 gdelta = True
2106 gdelta = True
2107 flags.append('generaldelta')
2107 flags.append('generaldelta')
2108 if not flags:
2108 if not flags:
2109 flags = ['(none)']
2109 flags = ['(none)']
2110
2110
2111 ### tracks merge vs single parent
2111 ### tracks merge vs single parent
2112 nummerges = 0
2112 nummerges = 0
2113
2113
2114 ### tracks ways the "delta" are build
2114 ### tracks ways the "delta" are build
2115 # nodelta
2115 # nodelta
2116 numempty = 0
2116 numempty = 0
2117 numemptytext = 0
2117 numemptytext = 0
2118 numemptydelta = 0
2118 numemptydelta = 0
2119 # full file content
2119 # full file content
2120 numfull = 0
2120 numfull = 0
2121 # intermediate snapshot against a prior snapshot
2121 # intermediate snapshot against a prior snapshot
2122 numsemi = 0
2122 numsemi = 0
2123 # snapshot count per depth
2123 # snapshot count per depth
2124 numsnapdepth = collections.defaultdict(lambda: 0)
2124 numsnapdepth = collections.defaultdict(lambda: 0)
2125 # delta against previous revision
2125 # delta against previous revision
2126 numprev = 0
2126 numprev = 0
2127 # delta against first or second parent (not prev)
2127 # delta against first or second parent (not prev)
2128 nump1 = 0
2128 nump1 = 0
2129 nump2 = 0
2129 nump2 = 0
2130 # delta against neither prev nor parents
2130 # delta against neither prev nor parents
2131 numother = 0
2131 numother = 0
2132 # delta against prev that are also first or second parent
2132 # delta against prev that are also first or second parent
2133 # (details of `numprev`)
2133 # (details of `numprev`)
2134 nump1prev = 0
2134 nump1prev = 0
2135 nump2prev = 0
2135 nump2prev = 0
2136
2136
2137 # data about delta chain of each revs
2137 # data about delta chain of each revs
2138 chainlengths = []
2138 chainlengths = []
2139 chainbases = []
2139 chainbases = []
2140 chainspans = []
2140 chainspans = []
2141
2141
2142 # data about each revision
2142 # data about each revision
2143 datasize = [None, 0, 0]
2143 datasize = [None, 0, 0]
2144 fullsize = [None, 0, 0]
2144 fullsize = [None, 0, 0]
2145 semisize = [None, 0, 0]
2145 semisize = [None, 0, 0]
2146 # snapshot count per depth
2146 # snapshot count per depth
2147 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2147 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2148 deltasize = [None, 0, 0]
2148 deltasize = [None, 0, 0]
2149 chunktypecounts = {}
2149 chunktypecounts = {}
2150 chunktypesizes = {}
2150 chunktypesizes = {}
2151
2151
2152 def addsize(size, l):
2152 def addsize(size, l):
2153 if l[0] is None or size < l[0]:
2153 if l[0] is None or size < l[0]:
2154 l[0] = size
2154 l[0] = size
2155 if size > l[1]:
2155 if size > l[1]:
2156 l[1] = size
2156 l[1] = size
2157 l[2] += size
2157 l[2] += size
2158
2158
2159 numrevs = len(r)
2159 numrevs = len(r)
2160 for rev in pycompat.xrange(numrevs):
2160 for rev in pycompat.xrange(numrevs):
2161 p1, p2 = r.parentrevs(rev)
2161 p1, p2 = r.parentrevs(rev)
2162 delta = r.deltaparent(rev)
2162 delta = r.deltaparent(rev)
2163 if format > 0:
2163 if format > 0:
2164 addsize(r.rawsize(rev), datasize)
2164 addsize(r.rawsize(rev), datasize)
2165 if p2 != nullrev:
2165 if p2 != nullrev:
2166 nummerges += 1
2166 nummerges += 1
2167 size = r.length(rev)
2167 size = r.length(rev)
2168 if delta == nullrev:
2168 if delta == nullrev:
2169 chainlengths.append(0)
2169 chainlengths.append(0)
2170 chainbases.append(r.start(rev))
2170 chainbases.append(r.start(rev))
2171 chainspans.append(size)
2171 chainspans.append(size)
2172 if size == 0:
2172 if size == 0:
2173 numempty += 1
2173 numempty += 1
2174 numemptytext += 1
2174 numemptytext += 1
2175 else:
2175 else:
2176 numfull += 1
2176 numfull += 1
2177 numsnapdepth[0] += 1
2177 numsnapdepth[0] += 1
2178 addsize(size, fullsize)
2178 addsize(size, fullsize)
2179 addsize(size, snapsizedepth[0])
2179 addsize(size, snapsizedepth[0])
2180 else:
2180 else:
2181 chainlengths.append(chainlengths[delta] + 1)
2181 chainlengths.append(chainlengths[delta] + 1)
2182 baseaddr = chainbases[delta]
2182 baseaddr = chainbases[delta]
2183 revaddr = r.start(rev)
2183 revaddr = r.start(rev)
2184 chainbases.append(baseaddr)
2184 chainbases.append(baseaddr)
2185 chainspans.append((revaddr - baseaddr) + size)
2185 chainspans.append((revaddr - baseaddr) + size)
2186 if size == 0:
2186 if size == 0:
2187 numempty += 1
2187 numempty += 1
2188 numemptydelta += 1
2188 numemptydelta += 1
2189 elif r.issnapshot(rev):
2189 elif r.issnapshot(rev):
2190 addsize(size, semisize)
2190 addsize(size, semisize)
2191 numsemi += 1
2191 numsemi += 1
2192 depth = r.snapshotdepth(rev)
2192 depth = r.snapshotdepth(rev)
2193 numsnapdepth[depth] += 1
2193 numsnapdepth[depth] += 1
2194 addsize(size, snapsizedepth[depth])
2194 addsize(size, snapsizedepth[depth])
2195 else:
2195 else:
2196 addsize(size, deltasize)
2196 addsize(size, deltasize)
2197 if delta == rev - 1:
2197 if delta == rev - 1:
2198 numprev += 1
2198 numprev += 1
2199 if delta == p1:
2199 if delta == p1:
2200 nump1prev += 1
2200 nump1prev += 1
2201 elif delta == p2:
2201 elif delta == p2:
2202 nump2prev += 1
2202 nump2prev += 1
2203 elif delta == p1:
2203 elif delta == p1:
2204 nump1 += 1
2204 nump1 += 1
2205 elif delta == p2:
2205 elif delta == p2:
2206 nump2 += 1
2206 nump2 += 1
2207 elif delta != nullrev:
2207 elif delta != nullrev:
2208 numother += 1
2208 numother += 1
2209
2209
2210 # Obtain data on the raw chunks in the revlog.
2210 # Obtain data on the raw chunks in the revlog.
2211 if util.safehasattr(r, '_getsegmentforrevs'):
2211 if util.safehasattr(r, '_getsegmentforrevs'):
2212 segment = r._getsegmentforrevs(rev, rev)[1]
2212 segment = r._getsegmentforrevs(rev, rev)[1]
2213 else:
2213 else:
2214 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2214 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2215 if segment:
2215 if segment:
2216 chunktype = bytes(segment[0:1])
2216 chunktype = bytes(segment[0:1])
2217 else:
2217 else:
2218 chunktype = 'empty'
2218 chunktype = 'empty'
2219
2219
2220 if chunktype not in chunktypecounts:
2220 if chunktype not in chunktypecounts:
2221 chunktypecounts[chunktype] = 0
2221 chunktypecounts[chunktype] = 0
2222 chunktypesizes[chunktype] = 0
2222 chunktypesizes[chunktype] = 0
2223
2223
2224 chunktypecounts[chunktype] += 1
2224 chunktypecounts[chunktype] += 1
2225 chunktypesizes[chunktype] += size
2225 chunktypesizes[chunktype] += size
2226
2226
2227 # Adjust size min value for empty cases
2227 # Adjust size min value for empty cases
2228 for size in (datasize, fullsize, semisize, deltasize):
2228 for size in (datasize, fullsize, semisize, deltasize):
2229 if size[0] is None:
2229 if size[0] is None:
2230 size[0] = 0
2230 size[0] = 0
2231
2231
2232 numdeltas = numrevs - numfull - numempty - numsemi
2232 numdeltas = numrevs - numfull - numempty - numsemi
2233 numoprev = numprev - nump1prev - nump2prev
2233 numoprev = numprev - nump1prev - nump2prev
2234 totalrawsize = datasize[2]
2234 totalrawsize = datasize[2]
2235 datasize[2] /= numrevs
2235 datasize[2] /= numrevs
2236 fulltotal = fullsize[2]
2236 fulltotal = fullsize[2]
2237 fullsize[2] /= numfull
2237 fullsize[2] /= numfull
2238 semitotal = semisize[2]
2238 semitotal = semisize[2]
2239 snaptotal = {}
2239 snaptotal = {}
2240 if numsemi > 0:
2240 if numsemi > 0:
2241 semisize[2] /= numsemi
2241 semisize[2] /= numsemi
2242 for depth in snapsizedepth:
2242 for depth in snapsizedepth:
2243 snaptotal[depth] = snapsizedepth[depth][2]
2243 snaptotal[depth] = snapsizedepth[depth][2]
2244 snapsizedepth[depth][2] /= numsnapdepth[depth]
2244 snapsizedepth[depth][2] /= numsnapdepth[depth]
2245
2245
2246 deltatotal = deltasize[2]
2246 deltatotal = deltasize[2]
2247 if numdeltas > 0:
2247 if numdeltas > 0:
2248 deltasize[2] /= numdeltas
2248 deltasize[2] /= numdeltas
2249 totalsize = fulltotal + semitotal + deltatotal
2249 totalsize = fulltotal + semitotal + deltatotal
2250 avgchainlen = sum(chainlengths) / numrevs
2250 avgchainlen = sum(chainlengths) / numrevs
2251 maxchainlen = max(chainlengths)
2251 maxchainlen = max(chainlengths)
2252 maxchainspan = max(chainspans)
2252 maxchainspan = max(chainspans)
2253 compratio = 1
2253 compratio = 1
2254 if totalsize:
2254 if totalsize:
2255 compratio = totalrawsize / totalsize
2255 compratio = totalrawsize / totalsize
2256
2256
2257 basedfmtstr = '%%%dd\n'
2257 basedfmtstr = '%%%dd\n'
2258 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2258 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2259
2259
2260 def dfmtstr(max):
2260 def dfmtstr(max):
2261 return basedfmtstr % len(str(max))
2261 return basedfmtstr % len(str(max))
2262 def pcfmtstr(max, padding=0):
2262 def pcfmtstr(max, padding=0):
2263 return basepcfmtstr % (len(str(max)), ' ' * padding)
2263 return basepcfmtstr % (len(str(max)), ' ' * padding)
2264
2264
2265 def pcfmt(value, total):
2265 def pcfmt(value, total):
2266 if total:
2266 if total:
2267 return (value, 100 * float(value) / total)
2267 return (value, 100 * float(value) / total)
2268 else:
2268 else:
2269 return value, 100.0
2269 return value, 100.0
2270
2270
2271 ui.write(('format : %d\n') % format)
2271 ui.write(('format : %d\n') % format)
2272 ui.write(('flags : %s\n') % ', '.join(flags))
2272 ui.write(('flags : %s\n') % ', '.join(flags))
2273
2273
2274 ui.write('\n')
2274 ui.write('\n')
2275 fmt = pcfmtstr(totalsize)
2275 fmt = pcfmtstr(totalsize)
2276 fmt2 = dfmtstr(totalsize)
2276 fmt2 = dfmtstr(totalsize)
2277 ui.write(('revisions : ') + fmt2 % numrevs)
2277 ui.write(('revisions : ') + fmt2 % numrevs)
2278 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2278 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2279 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2279 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2280 ui.write(('revisions : ') + fmt2 % numrevs)
2280 ui.write(('revisions : ') + fmt2 % numrevs)
2281 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2281 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2282 ui.write((' text : ')
2282 ui.write((' text : ')
2283 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2283 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2284 ui.write((' delta : ')
2284 ui.write((' delta : ')
2285 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2285 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2286 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2286 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2287 for depth in sorted(numsnapdepth):
2287 for depth in sorted(numsnapdepth):
2288 ui.write((' lvl-%-3d : ' % depth)
2288 ui.write((' lvl-%-3d : ' % depth)
2289 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2289 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2290 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2290 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2291 ui.write(('revision size : ') + fmt2 % totalsize)
2291 ui.write(('revision size : ') + fmt2 % totalsize)
2292 ui.write((' snapshot : ')
2292 ui.write((' snapshot : ')
2293 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2293 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2294 for depth in sorted(numsnapdepth):
2294 for depth in sorted(numsnapdepth):
2295 ui.write((' lvl-%-3d : ' % depth)
2295 ui.write((' lvl-%-3d : ' % depth)
2296 + fmt % pcfmt(snaptotal[depth], totalsize))
2296 + fmt % pcfmt(snaptotal[depth], totalsize))
2297 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2297 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2298
2298
2299 def fmtchunktype(chunktype):
2299 def fmtchunktype(chunktype):
2300 if chunktype == 'empty':
2300 if chunktype == 'empty':
2301 return ' %s : ' % chunktype
2301 return ' %s : ' % chunktype
2302 elif chunktype in pycompat.bytestr(string.ascii_letters):
2302 elif chunktype in pycompat.bytestr(string.ascii_letters):
2303 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2303 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2304 else:
2304 else:
2305 return ' 0x%s : ' % hex(chunktype)
2305 return ' 0x%s : ' % hex(chunktype)
2306
2306
2307 ui.write('\n')
2307 ui.write('\n')
2308 ui.write(('chunks : ') + fmt2 % numrevs)
2308 ui.write(('chunks : ') + fmt2 % numrevs)
2309 for chunktype in sorted(chunktypecounts):
2309 for chunktype in sorted(chunktypecounts):
2310 ui.write(fmtchunktype(chunktype))
2310 ui.write(fmtchunktype(chunktype))
2311 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2311 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2312 ui.write(('chunks size : ') + fmt2 % totalsize)
2312 ui.write(('chunks size : ') + fmt2 % totalsize)
2313 for chunktype in sorted(chunktypecounts):
2313 for chunktype in sorted(chunktypecounts):
2314 ui.write(fmtchunktype(chunktype))
2314 ui.write(fmtchunktype(chunktype))
2315 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2315 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2316
2316
2317 ui.write('\n')
2317 ui.write('\n')
2318 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2318 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2319 ui.write(('avg chain length : ') + fmt % avgchainlen)
2319 ui.write(('avg chain length : ') + fmt % avgchainlen)
2320 ui.write(('max chain length : ') + fmt % maxchainlen)
2320 ui.write(('max chain length : ') + fmt % maxchainlen)
2321 ui.write(('max chain reach : ') + fmt % maxchainspan)
2321 ui.write(('max chain reach : ') + fmt % maxchainspan)
2322 ui.write(('compression ratio : ') + fmt % compratio)
2322 ui.write(('compression ratio : ') + fmt % compratio)
2323
2323
2324 if format > 0:
2324 if format > 0:
2325 ui.write('\n')
2325 ui.write('\n')
2326 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2326 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2327 % tuple(datasize))
2327 % tuple(datasize))
2328 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2328 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2329 % tuple(fullsize))
2329 % tuple(fullsize))
2330 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2330 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2331 % tuple(semisize))
2331 % tuple(semisize))
2332 for depth in sorted(snapsizedepth):
2332 for depth in sorted(snapsizedepth):
2333 if depth == 0:
2333 if depth == 0:
2334 continue
2334 continue
2335 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2335 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2336 % ((depth,) + tuple(snapsizedepth[depth])))
2336 % ((depth,) + tuple(snapsizedepth[depth])))
2337 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2337 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2338 % tuple(deltasize))
2338 % tuple(deltasize))
2339
2339
2340 if numdeltas > 0:
2340 if numdeltas > 0:
2341 ui.write('\n')
2341 ui.write('\n')
2342 fmt = pcfmtstr(numdeltas)
2342 fmt = pcfmtstr(numdeltas)
2343 fmt2 = pcfmtstr(numdeltas, 4)
2343 fmt2 = pcfmtstr(numdeltas, 4)
2344 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2344 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2345 if numprev > 0:
2345 if numprev > 0:
2346 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2346 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2347 numprev))
2347 numprev))
2348 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2348 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2349 numprev))
2349 numprev))
2350 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2350 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2351 numprev))
2351 numprev))
2352 if gdelta:
2352 if gdelta:
2353 ui.write(('deltas against p1 : ')
2353 ui.write(('deltas against p1 : ')
2354 + fmt % pcfmt(nump1, numdeltas))
2354 + fmt % pcfmt(nump1, numdeltas))
2355 ui.write(('deltas against p2 : ')
2355 ui.write(('deltas against p2 : ')
2356 + fmt % pcfmt(nump2, numdeltas))
2356 + fmt % pcfmt(nump2, numdeltas))
2357 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2357 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2358 numdeltas))
2358 numdeltas))
2359
2359
2360 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2360 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2361 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2361 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2362 _('[-f FORMAT] -c|-m|FILE'),
2362 _('[-f FORMAT] -c|-m|FILE'),
2363 optionalrepo=True)
2363 optionalrepo=True)
2364 def debugrevlogindex(ui, repo, file_=None, **opts):
2364 def debugrevlogindex(ui, repo, file_=None, **opts):
2365 """dump the contents of a revlog index"""
2365 """dump the contents of a revlog index"""
2366 opts = pycompat.byteskwargs(opts)
2366 opts = pycompat.byteskwargs(opts)
2367 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2367 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2368 format = opts.get('format', 0)
2368 format = opts.get('format', 0)
2369 if format not in (0, 1):
2369 if format not in (0, 1):
2370 raise error.Abort(_("unknown format %d") % format)
2370 raise error.Abort(_("unknown format %d") % format)
2371
2371
2372 if ui.debugflag:
2372 if ui.debugflag:
2373 shortfn = hex
2373 shortfn = hex
2374 else:
2374 else:
2375 shortfn = short
2375 shortfn = short
2376
2376
2377 # There might not be anything in r, so have a sane default
2377 # There might not be anything in r, so have a sane default
2378 idlen = 12
2378 idlen = 12
2379 for i in r:
2379 for i in r:
2380 idlen = len(shortfn(r.node(i)))
2380 idlen = len(shortfn(r.node(i)))
2381 break
2381 break
2382
2382
2383 if format == 0:
2383 if format == 0:
2384 if ui.verbose:
2384 if ui.verbose:
2385 ui.write((" rev offset length linkrev"
2385 ui.write((" rev offset length linkrev"
2386 " %s %s p2\n") % ("nodeid".ljust(idlen),
2386 " %s %s p2\n") % ("nodeid".ljust(idlen),
2387 "p1".ljust(idlen)))
2387 "p1".ljust(idlen)))
2388 else:
2388 else:
2389 ui.write((" rev linkrev %s %s p2\n") % (
2389 ui.write((" rev linkrev %s %s p2\n") % (
2390 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2390 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2391 elif format == 1:
2391 elif format == 1:
2392 if ui.verbose:
2392 if ui.verbose:
2393 ui.write((" rev flag offset length size link p1"
2393 ui.write((" rev flag offset length size link p1"
2394 " p2 %s\n") % "nodeid".rjust(idlen))
2394 " p2 %s\n") % "nodeid".rjust(idlen))
2395 else:
2395 else:
2396 ui.write((" rev flag size link p1 p2 %s\n") %
2396 ui.write((" rev flag size link p1 p2 %s\n") %
2397 "nodeid".rjust(idlen))
2397 "nodeid".rjust(idlen))
2398
2398
2399 for i in r:
2399 for i in r:
2400 node = r.node(i)
2400 node = r.node(i)
2401 if format == 0:
2401 if format == 0:
2402 try:
2402 try:
2403 pp = r.parents(node)
2403 pp = r.parents(node)
2404 except Exception:
2404 except Exception:
2405 pp = [nullid, nullid]
2405 pp = [nullid, nullid]
2406 if ui.verbose:
2406 if ui.verbose:
2407 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2407 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2408 i, r.start(i), r.length(i), r.linkrev(i),
2408 i, r.start(i), r.length(i), r.linkrev(i),
2409 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2409 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2410 else:
2410 else:
2411 ui.write("% 6d % 7d %s %s %s\n" % (
2411 ui.write("% 6d % 7d %s %s %s\n" % (
2412 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2412 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2413 shortfn(pp[1])))
2413 shortfn(pp[1])))
2414 elif format == 1:
2414 elif format == 1:
2415 pr = r.parentrevs(i)
2415 pr = r.parentrevs(i)
2416 if ui.verbose:
2416 if ui.verbose:
2417 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2417 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2418 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2418 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2419 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2419 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2420 else:
2420 else:
2421 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2421 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2422 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2422 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2423 shortfn(node)))
2423 shortfn(node)))
2424
2424
2425 @command('debugrevspec',
2425 @command('debugrevspec',
2426 [('', 'optimize', None,
2426 [('', 'optimize', None,
2427 _('print parsed tree after optimizing (DEPRECATED)')),
2427 _('print parsed tree after optimizing (DEPRECATED)')),
2428 ('', 'show-revs', True, _('print list of result revisions (default)')),
2428 ('', 'show-revs', True, _('print list of result revisions (default)')),
2429 ('s', 'show-set', None, _('print internal representation of result set')),
2429 ('s', 'show-set', None, _('print internal representation of result set')),
2430 ('p', 'show-stage', [],
2430 ('p', 'show-stage', [],
2431 _('print parsed tree at the given stage'), _('NAME')),
2431 _('print parsed tree at the given stage'), _('NAME')),
2432 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2432 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2433 ('', 'verify-optimized', False, _('verify optimized result')),
2433 ('', 'verify-optimized', False, _('verify optimized result')),
2434 ],
2434 ],
2435 ('REVSPEC'))
2435 ('REVSPEC'))
2436 def debugrevspec(ui, repo, expr, **opts):
2436 def debugrevspec(ui, repo, expr, **opts):
2437 """parse and apply a revision specification
2437 """parse and apply a revision specification
2438
2438
2439 Use -p/--show-stage option to print the parsed tree at the given stages.
2439 Use -p/--show-stage option to print the parsed tree at the given stages.
2440 Use -p all to print tree at every stage.
2440 Use -p all to print tree at every stage.
2441
2441
2442 Use --no-show-revs option with -s or -p to print only the set
2442 Use --no-show-revs option with -s or -p to print only the set
2443 representation or the parsed tree respectively.
2443 representation or the parsed tree respectively.
2444
2444
2445 Use --verify-optimized to compare the optimized result with the unoptimized
2445 Use --verify-optimized to compare the optimized result with the unoptimized
2446 one. Returns 1 if the optimized result differs.
2446 one. Returns 1 if the optimized result differs.
2447 """
2447 """
2448 opts = pycompat.byteskwargs(opts)
2448 opts = pycompat.byteskwargs(opts)
2449 aliases = ui.configitems('revsetalias')
2449 aliases = ui.configitems('revsetalias')
2450 stages = [
2450 stages = [
2451 ('parsed', lambda tree: tree),
2451 ('parsed', lambda tree: tree),
2452 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2452 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2453 ui.warn)),
2453 ui.warn)),
2454 ('concatenated', revsetlang.foldconcat),
2454 ('concatenated', revsetlang.foldconcat),
2455 ('analyzed', revsetlang.analyze),
2455 ('analyzed', revsetlang.analyze),
2456 ('optimized', revsetlang.optimize),
2456 ('optimized', revsetlang.optimize),
2457 ]
2457 ]
2458 if opts['no_optimized']:
2458 if opts['no_optimized']:
2459 stages = stages[:-1]
2459 stages = stages[:-1]
2460 if opts['verify_optimized'] and opts['no_optimized']:
2460 if opts['verify_optimized'] and opts['no_optimized']:
2461 raise error.Abort(_('cannot use --verify-optimized with '
2461 raise error.Abort(_('cannot use --verify-optimized with '
2462 '--no-optimized'))
2462 '--no-optimized'))
2463 stagenames = set(n for n, f in stages)
2463 stagenames = set(n for n, f in stages)
2464
2464
2465 showalways = set()
2465 showalways = set()
2466 showchanged = set()
2466 showchanged = set()
2467 if ui.verbose and not opts['show_stage']:
2467 if ui.verbose and not opts['show_stage']:
2468 # show parsed tree by --verbose (deprecated)
2468 # show parsed tree by --verbose (deprecated)
2469 showalways.add('parsed')
2469 showalways.add('parsed')
2470 showchanged.update(['expanded', 'concatenated'])
2470 showchanged.update(['expanded', 'concatenated'])
2471 if opts['optimize']:
2471 if opts['optimize']:
2472 showalways.add('optimized')
2472 showalways.add('optimized')
2473 if opts['show_stage'] and opts['optimize']:
2473 if opts['show_stage'] and opts['optimize']:
2474 raise error.Abort(_('cannot use --optimize with --show-stage'))
2474 raise error.Abort(_('cannot use --optimize with --show-stage'))
2475 if opts['show_stage'] == ['all']:
2475 if opts['show_stage'] == ['all']:
2476 showalways.update(stagenames)
2476 showalways.update(stagenames)
2477 else:
2477 else:
2478 for n in opts['show_stage']:
2478 for n in opts['show_stage']:
2479 if n not in stagenames:
2479 if n not in stagenames:
2480 raise error.Abort(_('invalid stage name: %s') % n)
2480 raise error.Abort(_('invalid stage name: %s') % n)
2481 showalways.update(opts['show_stage'])
2481 showalways.update(opts['show_stage'])
2482
2482
2483 treebystage = {}
2483 treebystage = {}
2484 printedtree = None
2484 printedtree = None
2485 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2485 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2486 for n, f in stages:
2486 for n, f in stages:
2487 treebystage[n] = tree = f(tree)
2487 treebystage[n] = tree = f(tree)
2488 if n in showalways or (n in showchanged and tree != printedtree):
2488 if n in showalways or (n in showchanged and tree != printedtree):
2489 if opts['show_stage'] or n != 'parsed':
2489 if opts['show_stage'] or n != 'parsed':
2490 ui.write(("* %s:\n") % n)
2490 ui.write(("* %s:\n") % n)
2491 ui.write(revsetlang.prettyformat(tree), "\n")
2491 ui.write(revsetlang.prettyformat(tree), "\n")
2492 printedtree = tree
2492 printedtree = tree
2493
2493
2494 if opts['verify_optimized']:
2494 if opts['verify_optimized']:
2495 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2495 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2496 brevs = revset.makematcher(treebystage['optimized'])(repo)
2496 brevs = revset.makematcher(treebystage['optimized'])(repo)
2497 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2497 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2498 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2498 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2499 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2499 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2500 arevs = list(arevs)
2500 arevs = list(arevs)
2501 brevs = list(brevs)
2501 brevs = list(brevs)
2502 if arevs == brevs:
2502 if arevs == brevs:
2503 return 0
2503 return 0
2504 ui.write(('--- analyzed\n'), label='diff.file_a')
2504 ui.write(('--- analyzed\n'), label='diff.file_a')
2505 ui.write(('+++ optimized\n'), label='diff.file_b')
2505 ui.write(('+++ optimized\n'), label='diff.file_b')
2506 sm = difflib.SequenceMatcher(None, arevs, brevs)
2506 sm = difflib.SequenceMatcher(None, arevs, brevs)
2507 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2507 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2508 if tag in (r'delete', r'replace'):
2508 if tag in (r'delete', r'replace'):
2509 for c in arevs[alo:ahi]:
2509 for c in arevs[alo:ahi]:
2510 ui.write('-%d\n' % c, label='diff.deleted')
2510 ui.write('-%d\n' % c, label='diff.deleted')
2511 if tag in (r'insert', r'replace'):
2511 if tag in (r'insert', r'replace'):
2512 for c in brevs[blo:bhi]:
2512 for c in brevs[blo:bhi]:
2513 ui.write('+%d\n' % c, label='diff.inserted')
2513 ui.write('+%d\n' % c, label='diff.inserted')
2514 if tag == r'equal':
2514 if tag == r'equal':
2515 for c in arevs[alo:ahi]:
2515 for c in arevs[alo:ahi]:
2516 ui.write(' %d\n' % c)
2516 ui.write(' %d\n' % c)
2517 return 1
2517 return 1
2518
2518
2519 func = revset.makematcher(tree)
2519 func = revset.makematcher(tree)
2520 revs = func(repo)
2520 revs = func(repo)
2521 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2521 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2522 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2522 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2523 if not opts['show_revs']:
2523 if not opts['show_revs']:
2524 return
2524 return
2525 for c in revs:
2525 for c in revs:
2526 ui.write("%d\n" % c)
2526 ui.write("%d\n" % c)
2527
2527
2528 @command('debugserve', [
2528 @command('debugserve', [
2529 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2529 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2530 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2530 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2531 ('', 'logiofile', '', _('file to log server I/O to')),
2531 ('', 'logiofile', '', _('file to log server I/O to')),
2532 ], '')
2532 ], '')
2533 def debugserve(ui, repo, **opts):
2533 def debugserve(ui, repo, **opts):
2534 """run a server with advanced settings
2534 """run a server with advanced settings
2535
2535
2536 This command is similar to :hg:`serve`. It exists partially as a
2536 This command is similar to :hg:`serve`. It exists partially as a
2537 workaround to the fact that ``hg serve --stdio`` must have specific
2537 workaround to the fact that ``hg serve --stdio`` must have specific
2538 arguments for security reasons.
2538 arguments for security reasons.
2539 """
2539 """
2540 opts = pycompat.byteskwargs(opts)
2540 opts = pycompat.byteskwargs(opts)
2541
2541
2542 if not opts['sshstdio']:
2542 if not opts['sshstdio']:
2543 raise error.Abort(_('only --sshstdio is currently supported'))
2543 raise error.Abort(_('only --sshstdio is currently supported'))
2544
2544
2545 logfh = None
2545 logfh = None
2546
2546
2547 if opts['logiofd'] and opts['logiofile']:
2547 if opts['logiofd'] and opts['logiofile']:
2548 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2548 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2549
2549
2550 if opts['logiofd']:
2550 if opts['logiofd']:
2551 # Line buffered because output is line based.
2551 # Line buffered because output is line based.
2552 try:
2552 try:
2553 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2553 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2554 except OSError as e:
2554 except OSError as e:
2555 if e.errno != errno.ESPIPE:
2555 if e.errno != errno.ESPIPE:
2556 raise
2556 raise
2557 # can't seek a pipe, so `ab` mode fails on py3
2557 # can't seek a pipe, so `ab` mode fails on py3
2558 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2558 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2559 elif opts['logiofile']:
2559 elif opts['logiofile']:
2560 logfh = open(opts['logiofile'], 'ab', 1)
2560 logfh = open(opts['logiofile'], 'ab', 1)
2561
2561
2562 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2562 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2563 s.serve_forever()
2563 s.serve_forever()
2564
2564
2565 @command('debugsetparents', [], _('REV1 [REV2]'))
2565 @command('debugsetparents', [], _('REV1 [REV2]'))
2566 def debugsetparents(ui, repo, rev1, rev2=None):
2566 def debugsetparents(ui, repo, rev1, rev2=None):
2567 """manually set the parents of the current working directory
2567 """manually set the parents of the current working directory
2568
2568
2569 This is useful for writing repository conversion tools, but should
2569 This is useful for writing repository conversion tools, but should
2570 be used with care. For example, neither the working directory nor the
2570 be used with care. For example, neither the working directory nor the
2571 dirstate is updated, so file status may be incorrect after running this
2571 dirstate is updated, so file status may be incorrect after running this
2572 command.
2572 command.
2573
2573
2574 Returns 0 on success.
2574 Returns 0 on success.
2575 """
2575 """
2576
2576
2577 node1 = scmutil.revsingle(repo, rev1).node()
2577 node1 = scmutil.revsingle(repo, rev1).node()
2578 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2578 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2579
2579
2580 with repo.wlock():
2580 with repo.wlock():
2581 repo.setparents(node1, node2)
2581 repo.setparents(node1, node2)
2582
2582
2583 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2583 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2584 def debugssl(ui, repo, source=None, **opts):
2584 def debugssl(ui, repo, source=None, **opts):
2585 '''test a secure connection to a server
2585 '''test a secure connection to a server
2586
2586
2587 This builds the certificate chain for the server on Windows, installing the
2587 This builds the certificate chain for the server on Windows, installing the
2588 missing intermediates and trusted root via Windows Update if necessary. It
2588 missing intermediates and trusted root via Windows Update if necessary. It
2589 does nothing on other platforms.
2589 does nothing on other platforms.
2590
2590
2591 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2591 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2592 that server is used. See :hg:`help urls` for more information.
2592 that server is used. See :hg:`help urls` for more information.
2593
2593
2594 If the update succeeds, retry the original operation. Otherwise, the cause
2594 If the update succeeds, retry the original operation. Otherwise, the cause
2595 of the SSL error is likely another issue.
2595 of the SSL error is likely another issue.
2596 '''
2596 '''
2597 if not pycompat.iswindows:
2597 if not pycompat.iswindows:
2598 raise error.Abort(_('certificate chain building is only possible on '
2598 raise error.Abort(_('certificate chain building is only possible on '
2599 'Windows'))
2599 'Windows'))
2600
2600
2601 if not source:
2601 if not source:
2602 if not repo:
2602 if not repo:
2603 raise error.Abort(_("there is no Mercurial repository here, and no "
2603 raise error.Abort(_("there is no Mercurial repository here, and no "
2604 "server specified"))
2604 "server specified"))
2605 source = "default"
2605 source = "default"
2606
2606
2607 source, branches = hg.parseurl(ui.expandpath(source))
2607 source, branches = hg.parseurl(ui.expandpath(source))
2608 url = util.url(source)
2608 url = util.url(source)
2609
2609
2610 defaultport = {'https': 443, 'ssh': 22}
2610 defaultport = {'https': 443, 'ssh': 22}
2611 if url.scheme in defaultport:
2611 if url.scheme in defaultport:
2612 try:
2612 try:
2613 addr = (url.host, int(url.port or defaultport[url.scheme]))
2613 addr = (url.host, int(url.port or defaultport[url.scheme]))
2614 except ValueError:
2614 except ValueError:
2615 raise error.Abort(_("malformed port number in URL"))
2615 raise error.Abort(_("malformed port number in URL"))
2616 else:
2616 else:
2617 raise error.Abort(_("only https and ssh connections are supported"))
2617 raise error.Abort(_("only https and ssh connections are supported"))
2618
2618
2619 from . import win32
2619 from . import win32
2620
2620
2621 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2621 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2622 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2622 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2623
2623
2624 try:
2624 try:
2625 s.connect(addr)
2625 s.connect(addr)
2626 cert = s.getpeercert(True)
2626 cert = s.getpeercert(True)
2627
2627
2628 ui.status(_('checking the certificate chain for %s\n') % url.host)
2628 ui.status(_('checking the certificate chain for %s\n') % url.host)
2629
2629
2630 complete = win32.checkcertificatechain(cert, build=False)
2630 complete = win32.checkcertificatechain(cert, build=False)
2631
2631
2632 if not complete:
2632 if not complete:
2633 ui.status(_('certificate chain is incomplete, updating... '))
2633 ui.status(_('certificate chain is incomplete, updating... '))
2634
2634
2635 if not win32.checkcertificatechain(cert):
2635 if not win32.checkcertificatechain(cert):
2636 ui.status(_('failed.\n'))
2636 ui.status(_('failed.\n'))
2637 else:
2637 else:
2638 ui.status(_('done.\n'))
2638 ui.status(_('done.\n'))
2639 else:
2639 else:
2640 ui.status(_('full certificate chain is available\n'))
2640 ui.status(_('full certificate chain is available\n'))
2641 finally:
2641 finally:
2642 s.close()
2642 s.close()
2643
2643
2644 @command('debugsub',
2644 @command('debugsub',
2645 [('r', 'rev', '',
2645 [('r', 'rev', '',
2646 _('revision to check'), _('REV'))],
2646 _('revision to check'), _('REV'))],
2647 _('[-r REV] [REV]'))
2647 _('[-r REV] [REV]'))
2648 def debugsub(ui, repo, rev=None):
2648 def debugsub(ui, repo, rev=None):
2649 ctx = scmutil.revsingle(repo, rev, None)
2649 ctx = scmutil.revsingle(repo, rev, None)
2650 for k, v in sorted(ctx.substate.items()):
2650 for k, v in sorted(ctx.substate.items()):
2651 ui.write(('path %s\n') % k)
2651 ui.write(('path %s\n') % k)
2652 ui.write((' source %s\n') % v[0])
2652 ui.write((' source %s\n') % v[0])
2653 ui.write((' revision %s\n') % v[1])
2653 ui.write((' revision %s\n') % v[1])
2654
2654
2655 @command('debugsuccessorssets',
2655 @command('debugsuccessorssets',
2656 [('', 'closest', False, _('return closest successors sets only'))],
2656 [('', 'closest', False, _('return closest successors sets only'))],
2657 _('[REV]'))
2657 _('[REV]'))
2658 def debugsuccessorssets(ui, repo, *revs, **opts):
2658 def debugsuccessorssets(ui, repo, *revs, **opts):
2659 """show set of successors for revision
2659 """show set of successors for revision
2660
2660
2661 A successors set of changeset A is a consistent group of revisions that
2661 A successors set of changeset A is a consistent group of revisions that
2662 succeed A. It contains non-obsolete changesets only unless closests
2662 succeed A. It contains non-obsolete changesets only unless closests
2663 successors set is set.
2663 successors set is set.
2664
2664
2665 In most cases a changeset A has a single successors set containing a single
2665 In most cases a changeset A has a single successors set containing a single
2666 successor (changeset A replaced by A').
2666 successor (changeset A replaced by A').
2667
2667
2668 A changeset that is made obsolete with no successors are called "pruned".
2668 A changeset that is made obsolete with no successors are called "pruned".
2669 Such changesets have no successors sets at all.
2669 Such changesets have no successors sets at all.
2670
2670
2671 A changeset that has been "split" will have a successors set containing
2671 A changeset that has been "split" will have a successors set containing
2672 more than one successor.
2672 more than one successor.
2673
2673
2674 A changeset that has been rewritten in multiple different ways is called
2674 A changeset that has been rewritten in multiple different ways is called
2675 "divergent". Such changesets have multiple successor sets (each of which
2675 "divergent". Such changesets have multiple successor sets (each of which
2676 may also be split, i.e. have multiple successors).
2676 may also be split, i.e. have multiple successors).
2677
2677
2678 Results are displayed as follows::
2678 Results are displayed as follows::
2679
2679
2680 <rev1>
2680 <rev1>
2681 <successors-1A>
2681 <successors-1A>
2682 <rev2>
2682 <rev2>
2683 <successors-2A>
2683 <successors-2A>
2684 <successors-2B1> <successors-2B2> <successors-2B3>
2684 <successors-2B1> <successors-2B2> <successors-2B3>
2685
2685
2686 Here rev2 has two possible (i.e. divergent) successors sets. The first
2686 Here rev2 has two possible (i.e. divergent) successors sets. The first
2687 holds one element, whereas the second holds three (i.e. the changeset has
2687 holds one element, whereas the second holds three (i.e. the changeset has
2688 been split).
2688 been split).
2689 """
2689 """
2690 # passed to successorssets caching computation from one call to another
2690 # passed to successorssets caching computation from one call to another
2691 cache = {}
2691 cache = {}
2692 ctx2str = bytes
2692 ctx2str = bytes
2693 node2str = short
2693 node2str = short
2694 for rev in scmutil.revrange(repo, revs):
2694 for rev in scmutil.revrange(repo, revs):
2695 ctx = repo[rev]
2695 ctx = repo[rev]
2696 ui.write('%s\n'% ctx2str(ctx))
2696 ui.write('%s\n'% ctx2str(ctx))
2697 for succsset in obsutil.successorssets(repo, ctx.node(),
2697 for succsset in obsutil.successorssets(repo, ctx.node(),
2698 closest=opts[r'closest'],
2698 closest=opts[r'closest'],
2699 cache=cache):
2699 cache=cache):
2700 if succsset:
2700 if succsset:
2701 ui.write(' ')
2701 ui.write(' ')
2702 ui.write(node2str(succsset[0]))
2702 ui.write(node2str(succsset[0]))
2703 for node in succsset[1:]:
2703 for node in succsset[1:]:
2704 ui.write(' ')
2704 ui.write(' ')
2705 ui.write(node2str(node))
2705 ui.write(node2str(node))
2706 ui.write('\n')
2706 ui.write('\n')
2707
2707
2708 @command('debugtemplate',
2708 @command('debugtemplate',
2709 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2709 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2710 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2710 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2711 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2711 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2712 optionalrepo=True)
2712 optionalrepo=True)
2713 def debugtemplate(ui, repo, tmpl, **opts):
2713 def debugtemplate(ui, repo, tmpl, **opts):
2714 """parse and apply a template
2714 """parse and apply a template
2715
2715
2716 If -r/--rev is given, the template is processed as a log template and
2716 If -r/--rev is given, the template is processed as a log template and
2717 applied to the given changesets. Otherwise, it is processed as a generic
2717 applied to the given changesets. Otherwise, it is processed as a generic
2718 template.
2718 template.
2719
2719
2720 Use --verbose to print the parsed tree.
2720 Use --verbose to print the parsed tree.
2721 """
2721 """
2722 revs = None
2722 revs = None
2723 if opts[r'rev']:
2723 if opts[r'rev']:
2724 if repo is None:
2724 if repo is None:
2725 raise error.RepoError(_('there is no Mercurial repository here '
2725 raise error.RepoError(_('there is no Mercurial repository here '
2726 '(.hg not found)'))
2726 '(.hg not found)'))
2727 revs = scmutil.revrange(repo, opts[r'rev'])
2727 revs = scmutil.revrange(repo, opts[r'rev'])
2728
2728
2729 props = {}
2729 props = {}
2730 for d in opts[r'define']:
2730 for d in opts[r'define']:
2731 try:
2731 try:
2732 k, v = (e.strip() for e in d.split('=', 1))
2732 k, v = (e.strip() for e in d.split('=', 1))
2733 if not k or k == 'ui':
2733 if not k or k == 'ui':
2734 raise ValueError
2734 raise ValueError
2735 props[k] = v
2735 props[k] = v
2736 except ValueError:
2736 except ValueError:
2737 raise error.Abort(_('malformed keyword definition: %s') % d)
2737 raise error.Abort(_('malformed keyword definition: %s') % d)
2738
2738
2739 if ui.verbose:
2739 if ui.verbose:
2740 aliases = ui.configitems('templatealias')
2740 aliases = ui.configitems('templatealias')
2741 tree = templater.parse(tmpl)
2741 tree = templater.parse(tmpl)
2742 ui.note(templater.prettyformat(tree), '\n')
2742 ui.note(templater.prettyformat(tree), '\n')
2743 newtree = templater.expandaliases(tree, aliases)
2743 newtree = templater.expandaliases(tree, aliases)
2744 if newtree != tree:
2744 if newtree != tree:
2745 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2745 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2746
2746
2747 if revs is None:
2747 if revs is None:
2748 tres = formatter.templateresources(ui, repo)
2748 tres = formatter.templateresources(ui, repo)
2749 t = formatter.maketemplater(ui, tmpl, resources=tres)
2749 t = formatter.maketemplater(ui, tmpl, resources=tres)
2750 if ui.verbose:
2750 if ui.verbose:
2751 kwds, funcs = t.symbolsuseddefault()
2751 kwds, funcs = t.symbolsuseddefault()
2752 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2752 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2753 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2753 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2754 ui.write(t.renderdefault(props))
2754 ui.write(t.renderdefault(props))
2755 else:
2755 else:
2756 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2756 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2757 if ui.verbose:
2757 if ui.verbose:
2758 kwds, funcs = displayer.t.symbolsuseddefault()
2758 kwds, funcs = displayer.t.symbolsuseddefault()
2759 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2759 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2760 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2760 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2761 for r in revs:
2761 for r in revs:
2762 displayer.show(repo[r], **pycompat.strkwargs(props))
2762 displayer.show(repo[r], **pycompat.strkwargs(props))
2763 displayer.close()
2763 displayer.close()
2764
2764
2765 @command('debuguigetpass', [
2765 @command('debuguigetpass', [
2766 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2766 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2767 ], _('[-p TEXT]'), norepo=True)
2767 ], _('[-p TEXT]'), norepo=True)
2768 def debuguigetpass(ui, prompt=''):
2768 def debuguigetpass(ui, prompt=''):
2769 """show prompt to type password"""
2769 """show prompt to type password"""
2770 r = ui.getpass(prompt)
2770 r = ui.getpass(prompt)
2771 ui.write(('respose: %s\n') % r)
2771 ui.write(('respose: %s\n') % r)
2772
2772
2773 @command('debuguiprompt', [
2773 @command('debuguiprompt', [
2774 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2774 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2775 ], _('[-p TEXT]'), norepo=True)
2775 ], _('[-p TEXT]'), norepo=True)
2776 def debuguiprompt(ui, prompt=''):
2776 def debuguiprompt(ui, prompt=''):
2777 """show plain prompt"""
2777 """show plain prompt"""
2778 r = ui.prompt(prompt)
2778 r = ui.prompt(prompt)
2779 ui.write(('response: %s\n') % r)
2779 ui.write(('response: %s\n') % r)
2780
2780
2781 @command('debugupdatecaches', [])
2781 @command('debugupdatecaches', [])
2782 def debugupdatecaches(ui, repo, *pats, **opts):
2782 def debugupdatecaches(ui, repo, *pats, **opts):
2783 """warm all known caches in the repository"""
2783 """warm all known caches in the repository"""
2784 with repo.wlock(), repo.lock():
2784 with repo.wlock(), repo.lock():
2785 repo.updatecaches(full=True)
2785 repo.updatecaches(full=True)
2786
2786
2787 @command('debugupgraderepo', [
2787 @command('debugupgraderepo', [
2788 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2788 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2789 ('', 'run', False, _('performs an upgrade')),
2789 ('', 'run', False, _('performs an upgrade')),
2790 ('', 'backup', True, _('keep the old repository content around')),
2790 ('', 'backup', True, _('keep the old repository content around')),
2791 ])
2791 ])
2792 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2792 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
2793 """upgrade a repository to use different features
2793 """upgrade a repository to use different features
2794
2794
2795 If no arguments are specified, the repository is evaluated for upgrade
2795 If no arguments are specified, the repository is evaluated for upgrade
2796 and a list of problems and potential optimizations is printed.
2796 and a list of problems and potential optimizations is printed.
2797
2797
2798 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2798 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2799 can be influenced via additional arguments. More details will be provided
2799 can be influenced via additional arguments. More details will be provided
2800 by the command output when run without ``--run``.
2800 by the command output when run without ``--run``.
2801
2801
2802 During the upgrade, the repository will be locked and no writes will be
2802 During the upgrade, the repository will be locked and no writes will be
2803 allowed.
2803 allowed.
2804
2804
2805 At the end of the upgrade, the repository may not be readable while new
2805 At the end of the upgrade, the repository may not be readable while new
2806 repository data is swapped in. This window will be as long as it takes to
2806 repository data is swapped in. This window will be as long as it takes to
2807 rename some directories inside the ``.hg`` directory. On most machines, this
2807 rename some directories inside the ``.hg`` directory. On most machines, this
2808 should complete almost instantaneously and the chances of a consumer being
2808 should complete almost instantaneously and the chances of a consumer being
2809 unable to access the repository should be low.
2809 unable to access the repository should be low.
2810 """
2810 """
2811 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2811 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2812 backup=backup)
2812 backup=backup)
2813
2813
2814 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2814 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2815 inferrepo=True)
2815 inferrepo=True)
2816 def debugwalk(ui, repo, *pats, **opts):
2816 def debugwalk(ui, repo, *pats, **opts):
2817 """show how files match on given patterns"""
2817 """show how files match on given patterns"""
2818 opts = pycompat.byteskwargs(opts)
2818 opts = pycompat.byteskwargs(opts)
2819 m = scmutil.match(repo[None], pats, opts)
2819 m = scmutil.match(repo[None], pats, opts)
2820 if ui.verbose:
2820 if ui.verbose:
2821 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2821 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2822 items = list(repo[None].walk(m))
2822 items = list(repo[None].walk(m))
2823 if not items:
2823 if not items:
2824 return
2824 return
2825 f = lambda fn: fn
2825 f = lambda fn: fn
2826 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2826 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2827 f = lambda fn: util.normpath(fn)
2827 f = lambda fn: util.normpath(fn)
2828 fmt = 'f %%-%ds %%-%ds %%s' % (
2828 fmt = 'f %%-%ds %%-%ds %%s' % (
2829 max([len(abs) for abs in items]),
2829 max([len(abs) for abs in items]),
2830 max([len(repo.pathto(abs)) for abs in items]))
2830 max([len(repo.pathto(abs)) for abs in items]))
2831 for abs in items:
2831 for abs in items:
2832 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2832 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2833 ui.write("%s\n" % line.rstrip())
2833 ui.write("%s\n" % line.rstrip())
2834
2834
2835 @command('debugwhyunstable', [], _('REV'))
2835 @command('debugwhyunstable', [], _('REV'))
2836 def debugwhyunstable(ui, repo, rev):
2836 def debugwhyunstable(ui, repo, rev):
2837 """explain instabilities of a changeset"""
2837 """explain instabilities of a changeset"""
2838 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2838 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2839 dnodes = ''
2839 dnodes = ''
2840 if entry.get('divergentnodes'):
2840 if entry.get('divergentnodes'):
2841 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2841 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2842 for ctx in entry['divergentnodes']) + ' '
2842 for ctx in entry['divergentnodes']) + ' '
2843 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2843 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2844 entry['reason'], entry['node']))
2844 entry['reason'], entry['node']))
2845
2845
2846 @command('debugwireargs',
2846 @command('debugwireargs',
2847 [('', 'three', '', 'three'),
2847 [('', 'three', '', 'three'),
2848 ('', 'four', '', 'four'),
2848 ('', 'four', '', 'four'),
2849 ('', 'five', '', 'five'),
2849 ('', 'five', '', 'five'),
2850 ] + cmdutil.remoteopts,
2850 ] + cmdutil.remoteopts,
2851 _('REPO [OPTIONS]... [ONE [TWO]]'),
2851 _('REPO [OPTIONS]... [ONE [TWO]]'),
2852 norepo=True)
2852 norepo=True)
2853 def debugwireargs(ui, repopath, *vals, **opts):
2853 def debugwireargs(ui, repopath, *vals, **opts):
2854 opts = pycompat.byteskwargs(opts)
2854 opts = pycompat.byteskwargs(opts)
2855 repo = hg.peer(ui, opts, repopath)
2855 repo = hg.peer(ui, opts, repopath)
2856 for opt in cmdutil.remoteopts:
2856 for opt in cmdutil.remoteopts:
2857 del opts[opt[1]]
2857 del opts[opt[1]]
2858 args = {}
2858 args = {}
2859 for k, v in opts.iteritems():
2859 for k, v in opts.iteritems():
2860 if v:
2860 if v:
2861 args[k] = v
2861 args[k] = v
2862 args = pycompat.strkwargs(args)
2862 args = pycompat.strkwargs(args)
2863 # run twice to check that we don't mess up the stream for the next command
2863 # run twice to check that we don't mess up the stream for the next command
2864 res1 = repo.debugwireargs(*vals, **args)
2864 res1 = repo.debugwireargs(*vals, **args)
2865 res2 = repo.debugwireargs(*vals, **args)
2865 res2 = repo.debugwireargs(*vals, **args)
2866 ui.write("%s\n" % res1)
2866 ui.write("%s\n" % res1)
2867 if res1 != res2:
2867 if res1 != res2:
2868 ui.warn("%s\n" % res2)
2868 ui.warn("%s\n" % res2)
2869
2869
2870 def _parsewirelangblocks(fh):
2870 def _parsewirelangblocks(fh):
2871 activeaction = None
2871 activeaction = None
2872 blocklines = []
2872 blocklines = []
2873 lastindent = 0
2873 lastindent = 0
2874
2874
2875 for line in fh:
2875 for line in fh:
2876 line = line.rstrip()
2876 line = line.rstrip()
2877 if not line:
2877 if not line:
2878 continue
2878 continue
2879
2879
2880 if line.startswith(b'#'):
2880 if line.startswith(b'#'):
2881 continue
2881 continue
2882
2882
2883 if not line.startswith(b' '):
2883 if not line.startswith(b' '):
2884 # New block. Flush previous one.
2884 # New block. Flush previous one.
2885 if activeaction:
2885 if activeaction:
2886 yield activeaction, blocklines
2886 yield activeaction, blocklines
2887
2887
2888 activeaction = line
2888 activeaction = line
2889 blocklines = []
2889 blocklines = []
2890 lastindent = 0
2890 lastindent = 0
2891 continue
2891 continue
2892
2892
2893 # Else we start with an indent.
2893 # Else we start with an indent.
2894
2894
2895 if not activeaction:
2895 if not activeaction:
2896 raise error.Abort(_('indented line outside of block'))
2896 raise error.Abort(_('indented line outside of block'))
2897
2897
2898 indent = len(line) - len(line.lstrip())
2898 indent = len(line) - len(line.lstrip())
2899
2899
2900 # If this line is indented more than the last line, concatenate it.
2900 # If this line is indented more than the last line, concatenate it.
2901 if indent > lastindent and blocklines:
2901 if indent > lastindent and blocklines:
2902 blocklines[-1] += line.lstrip()
2902 blocklines[-1] += line.lstrip()
2903 else:
2903 else:
2904 blocklines.append(line)
2904 blocklines.append(line)
2905 lastindent = indent
2905 lastindent = indent
2906
2906
2907 # Flush last block.
2907 # Flush last block.
2908 if activeaction:
2908 if activeaction:
2909 yield activeaction, blocklines
2909 yield activeaction, blocklines
2910
2910
2911 @command('debugwireproto',
2911 @command('debugwireproto',
2912 [
2912 [
2913 ('', 'localssh', False, _('start an SSH server for this repo')),
2913 ('', 'localssh', False, _('start an SSH server for this repo')),
2914 ('', 'peer', '', _('construct a specific version of the peer')),
2914 ('', 'peer', '', _('construct a specific version of the peer')),
2915 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2915 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2916 ('', 'nologhandshake', False,
2916 ('', 'nologhandshake', False,
2917 _('do not log I/O related to the peer handshake')),
2917 _('do not log I/O related to the peer handshake')),
2918 ] + cmdutil.remoteopts,
2918 ] + cmdutil.remoteopts,
2919 _('[PATH]'),
2919 _('[PATH]'),
2920 optionalrepo=True)
2920 optionalrepo=True)
2921 def debugwireproto(ui, repo, path=None, **opts):
2921 def debugwireproto(ui, repo, path=None, **opts):
2922 """send wire protocol commands to a server
2922 """send wire protocol commands to a server
2923
2923
2924 This command can be used to issue wire protocol commands to remote
2924 This command can be used to issue wire protocol commands to remote
2925 peers and to debug the raw data being exchanged.
2925 peers and to debug the raw data being exchanged.
2926
2926
2927 ``--localssh`` will start an SSH server against the current repository
2927 ``--localssh`` will start an SSH server against the current repository
2928 and connect to that. By default, the connection will perform a handshake
2928 and connect to that. By default, the connection will perform a handshake
2929 and establish an appropriate peer instance.
2929 and establish an appropriate peer instance.
2930
2930
2931 ``--peer`` can be used to bypass the handshake protocol and construct a
2931 ``--peer`` can be used to bypass the handshake protocol and construct a
2932 peer instance using the specified class type. Valid values are ``raw``,
2932 peer instance using the specified class type. Valid values are ``raw``,
2933 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2933 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
2934 raw data payloads and don't support higher-level command actions.
2934 raw data payloads and don't support higher-level command actions.
2935
2935
2936 ``--noreadstderr`` can be used to disable automatic reading from stderr
2936 ``--noreadstderr`` can be used to disable automatic reading from stderr
2937 of the peer (for SSH connections only). Disabling automatic reading of
2937 of the peer (for SSH connections only). Disabling automatic reading of
2938 stderr is useful for making output more deterministic.
2938 stderr is useful for making output more deterministic.
2939
2939
2940 Commands are issued via a mini language which is specified via stdin.
2940 Commands are issued via a mini language which is specified via stdin.
2941 The language consists of individual actions to perform. An action is
2941 The language consists of individual actions to perform. An action is
2942 defined by a block. A block is defined as a line with no leading
2942 defined by a block. A block is defined as a line with no leading
2943 space followed by 0 or more lines with leading space. Blocks are
2943 space followed by 0 or more lines with leading space. Blocks are
2944 effectively a high-level command with additional metadata.
2944 effectively a high-level command with additional metadata.
2945
2945
2946 Lines beginning with ``#`` are ignored.
2946 Lines beginning with ``#`` are ignored.
2947
2947
2948 The following sections denote available actions.
2948 The following sections denote available actions.
2949
2949
2950 raw
2950 raw
2951 ---
2951 ---
2952
2952
2953 Send raw data to the server.
2953 Send raw data to the server.
2954
2954
2955 The block payload contains the raw data to send as one atomic send
2955 The block payload contains the raw data to send as one atomic send
2956 operation. The data may not actually be delivered in a single system
2956 operation. The data may not actually be delivered in a single system
2957 call: it depends on the abilities of the transport being used.
2957 call: it depends on the abilities of the transport being used.
2958
2958
2959 Each line in the block is de-indented and concatenated. Then, that
2959 Each line in the block is de-indented and concatenated. Then, that
2960 value is evaluated as a Python b'' literal. This allows the use of
2960 value is evaluated as a Python b'' literal. This allows the use of
2961 backslash escaping, etc.
2961 backslash escaping, etc.
2962
2962
2963 raw+
2963 raw+
2964 ----
2964 ----
2965
2965
2966 Behaves like ``raw`` except flushes output afterwards.
2966 Behaves like ``raw`` except flushes output afterwards.
2967
2967
2968 command <X>
2968 command <X>
2969 -----------
2969 -----------
2970
2970
2971 Send a request to run a named command, whose name follows the ``command``
2971 Send a request to run a named command, whose name follows the ``command``
2972 string.
2972 string.
2973
2973
2974 Arguments to the command are defined as lines in this block. The format of
2974 Arguments to the command are defined as lines in this block. The format of
2975 each line is ``<key> <value>``. e.g.::
2975 each line is ``<key> <value>``. e.g.::
2976
2976
2977 command listkeys
2977 command listkeys
2978 namespace bookmarks
2978 namespace bookmarks
2979
2979
2980 If the value begins with ``eval:``, it will be interpreted as a Python
2980 If the value begins with ``eval:``, it will be interpreted as a Python
2981 literal expression. Otherwise values are interpreted as Python b'' literals.
2981 literal expression. Otherwise values are interpreted as Python b'' literals.
2982 This allows sending complex types and encoding special byte sequences via
2982 This allows sending complex types and encoding special byte sequences via
2983 backslash escaping.
2983 backslash escaping.
2984
2984
2985 The following arguments have special meaning:
2985 The following arguments have special meaning:
2986
2986
2987 ``PUSHFILE``
2987 ``PUSHFILE``
2988 When defined, the *push* mechanism of the peer will be used instead
2988 When defined, the *push* mechanism of the peer will be used instead
2989 of the static request-response mechanism and the content of the
2989 of the static request-response mechanism and the content of the
2990 file specified in the value of this argument will be sent as the
2990 file specified in the value of this argument will be sent as the
2991 command payload.
2991 command payload.
2992
2992
2993 This can be used to submit a local bundle file to the remote.
2993 This can be used to submit a local bundle file to the remote.
2994
2994
2995 batchbegin
2995 batchbegin
2996 ----------
2996 ----------
2997
2997
2998 Instruct the peer to begin a batched send.
2998 Instruct the peer to begin a batched send.
2999
2999
3000 All ``command`` blocks are queued for execution until the next
3000 All ``command`` blocks are queued for execution until the next
3001 ``batchsubmit`` block.
3001 ``batchsubmit`` block.
3002
3002
3003 batchsubmit
3003 batchsubmit
3004 -----------
3004 -----------
3005
3005
3006 Submit previously queued ``command`` blocks as a batch request.
3006 Submit previously queued ``command`` blocks as a batch request.
3007
3007
3008 This action MUST be paired with a ``batchbegin`` action.
3008 This action MUST be paired with a ``batchbegin`` action.
3009
3009
3010 httprequest <method> <path>
3010 httprequest <method> <path>
3011 ---------------------------
3011 ---------------------------
3012
3012
3013 (HTTP peer only)
3013 (HTTP peer only)
3014
3014
3015 Send an HTTP request to the peer.
3015 Send an HTTP request to the peer.
3016
3016
3017 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3017 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3018
3018
3019 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3019 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3020 headers to add to the request. e.g. ``Accept: foo``.
3020 headers to add to the request. e.g. ``Accept: foo``.
3021
3021
3022 The following arguments are special:
3022 The following arguments are special:
3023
3023
3024 ``BODYFILE``
3024 ``BODYFILE``
3025 The content of the file defined as the value to this argument will be
3025 The content of the file defined as the value to this argument will be
3026 transferred verbatim as the HTTP request body.
3026 transferred verbatim as the HTTP request body.
3027
3027
3028 ``frame <type> <flags> <payload>``
3028 ``frame <type> <flags> <payload>``
3029 Send a unified protocol frame as part of the request body.
3029 Send a unified protocol frame as part of the request body.
3030
3030
3031 All frames will be collected and sent as the body to the HTTP
3031 All frames will be collected and sent as the body to the HTTP
3032 request.
3032 request.
3033
3033
3034 close
3034 close
3035 -----
3035 -----
3036
3036
3037 Close the connection to the server.
3037 Close the connection to the server.
3038
3038
3039 flush
3039 flush
3040 -----
3040 -----
3041
3041
3042 Flush data written to the server.
3042 Flush data written to the server.
3043
3043
3044 readavailable
3044 readavailable
3045 -------------
3045 -------------
3046
3046
3047 Close the write end of the connection and read all available data from
3047 Close the write end of the connection and read all available data from
3048 the server.
3048 the server.
3049
3049
3050 If the connection to the server encompasses multiple pipes, we poll both
3050 If the connection to the server encompasses multiple pipes, we poll both
3051 pipes and read available data.
3051 pipes and read available data.
3052
3052
3053 readline
3053 readline
3054 --------
3054 --------
3055
3055
3056 Read a line of output from the server. If there are multiple output
3056 Read a line of output from the server. If there are multiple output
3057 pipes, reads only the main pipe.
3057 pipes, reads only the main pipe.
3058
3058
3059 ereadline
3059 ereadline
3060 ---------
3060 ---------
3061
3061
3062 Like ``readline``, but read from the stderr pipe, if available.
3062 Like ``readline``, but read from the stderr pipe, if available.
3063
3063
3064 read <X>
3064 read <X>
3065 --------
3065 --------
3066
3066
3067 ``read()`` N bytes from the server's main output pipe.
3067 ``read()`` N bytes from the server's main output pipe.
3068
3068
3069 eread <X>
3069 eread <X>
3070 ---------
3070 ---------
3071
3071
3072 ``read()`` N bytes from the server's stderr pipe, if available.
3072 ``read()`` N bytes from the server's stderr pipe, if available.
3073
3073
3074 Specifying Unified Frame-Based Protocol Frames
3074 Specifying Unified Frame-Based Protocol Frames
3075 ----------------------------------------------
3075 ----------------------------------------------
3076
3076
3077 It is possible to emit a *Unified Frame-Based Protocol* by using special
3077 It is possible to emit a *Unified Frame-Based Protocol* by using special
3078 syntax.
3078 syntax.
3079
3079
3080 A frame is composed as a type, flags, and payload. These can be parsed
3080 A frame is composed as a type, flags, and payload. These can be parsed
3081 from a string of the form:
3081 from a string of the form:
3082
3082
3083 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3083 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3084
3084
3085 ``request-id`` and ``stream-id`` are integers defining the request and
3085 ``request-id`` and ``stream-id`` are integers defining the request and
3086 stream identifiers.
3086 stream identifiers.
3087
3087
3088 ``type`` can be an integer value for the frame type or the string name
3088 ``type`` can be an integer value for the frame type or the string name
3089 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3089 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3090 ``command-name``.
3090 ``command-name``.
3091
3091
3092 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3092 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3093 components. Each component (and there can be just one) can be an integer
3093 components. Each component (and there can be just one) can be an integer
3094 or a flag name for stream flags or frame flags, respectively. Values are
3094 or a flag name for stream flags or frame flags, respectively. Values are
3095 resolved to integers and then bitwise OR'd together.
3095 resolved to integers and then bitwise OR'd together.
3096
3096
3097 ``payload`` represents the raw frame payload. If it begins with
3097 ``payload`` represents the raw frame payload. If it begins with
3098 ``cbor:``, the following string is evaluated as Python code and the
3098 ``cbor:``, the following string is evaluated as Python code and the
3099 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3099 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3100 as a Python byte string literal.
3100 as a Python byte string literal.
3101 """
3101 """
3102 opts = pycompat.byteskwargs(opts)
3102 opts = pycompat.byteskwargs(opts)
3103
3103
3104 if opts['localssh'] and not repo:
3104 if opts['localssh'] and not repo:
3105 raise error.Abort(_('--localssh requires a repository'))
3105 raise error.Abort(_('--localssh requires a repository'))
3106
3106
3107 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3107 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3108 raise error.Abort(_('invalid value for --peer'),
3108 raise error.Abort(_('invalid value for --peer'),
3109 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3109 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3110
3110
3111 if path and opts['localssh']:
3111 if path and opts['localssh']:
3112 raise error.Abort(_('cannot specify --localssh with an explicit '
3112 raise error.Abort(_('cannot specify --localssh with an explicit '
3113 'path'))
3113 'path'))
3114
3114
3115 if ui.interactive():
3115 if ui.interactive():
3116 ui.write(_('(waiting for commands on stdin)\n'))
3116 ui.write(_('(waiting for commands on stdin)\n'))
3117
3117
3118 blocks = list(_parsewirelangblocks(ui.fin))
3118 blocks = list(_parsewirelangblocks(ui.fin))
3119
3119
3120 proc = None
3120 proc = None
3121 stdin = None
3121 stdin = None
3122 stdout = None
3122 stdout = None
3123 stderr = None
3123 stderr = None
3124 opener = None
3124 opener = None
3125
3125
3126 if opts['localssh']:
3126 if opts['localssh']:
3127 # We start the SSH server in its own process so there is process
3127 # We start the SSH server in its own process so there is process
3128 # separation. This prevents a whole class of potential bugs around
3128 # separation. This prevents a whole class of potential bugs around
3129 # shared state from interfering with server operation.
3129 # shared state from interfering with server operation.
3130 args = procutil.hgcmd() + [
3130 args = procutil.hgcmd() + [
3131 '-R', repo.root,
3131 '-R', repo.root,
3132 'debugserve', '--sshstdio',
3132 'debugserve', '--sshstdio',
3133 ]
3133 ]
3134 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3134 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3135 stdin=subprocess.PIPE,
3135 stdin=subprocess.PIPE,
3136 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3136 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3137 bufsize=0)
3137 bufsize=0)
3138
3138
3139 stdin = proc.stdin
3139 stdin = proc.stdin
3140 stdout = proc.stdout
3140 stdout = proc.stdout
3141 stderr = proc.stderr
3141 stderr = proc.stderr
3142
3142
3143 # We turn the pipes into observers so we can log I/O.
3143 # We turn the pipes into observers so we can log I/O.
3144 if ui.verbose or opts['peer'] == 'raw':
3144 if ui.verbose or opts['peer'] == 'raw':
3145 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3145 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3146 logdata=True)
3146 logdata=True)
3147 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3147 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3148 logdata=True)
3148 logdata=True)
3149 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3149 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3150 logdata=True)
3150 logdata=True)
3151
3151
3152 # --localssh also implies the peer connection settings.
3152 # --localssh also implies the peer connection settings.
3153
3153
3154 url = 'ssh://localserver'
3154 url = 'ssh://localserver'
3155 autoreadstderr = not opts['noreadstderr']
3155 autoreadstderr = not opts['noreadstderr']
3156
3156
3157 if opts['peer'] == 'ssh1':
3157 if opts['peer'] == 'ssh1':
3158 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3158 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3159 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3159 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3160 None, autoreadstderr=autoreadstderr)
3160 None, autoreadstderr=autoreadstderr)
3161 elif opts['peer'] == 'ssh2':
3161 elif opts['peer'] == 'ssh2':
3162 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3162 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3163 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3163 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3164 None, autoreadstderr=autoreadstderr)
3164 None, autoreadstderr=autoreadstderr)
3165 elif opts['peer'] == 'raw':
3165 elif opts['peer'] == 'raw':
3166 ui.write(_('using raw connection to peer\n'))
3166 ui.write(_('using raw connection to peer\n'))
3167 peer = None
3167 peer = None
3168 else:
3168 else:
3169 ui.write(_('creating ssh peer from handshake results\n'))
3169 ui.write(_('creating ssh peer from handshake results\n'))
3170 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3170 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3171 autoreadstderr=autoreadstderr)
3171 autoreadstderr=autoreadstderr)
3172
3172
3173 elif path:
3173 elif path:
3174 # We bypass hg.peer() so we can proxy the sockets.
3174 # We bypass hg.peer() so we can proxy the sockets.
3175 # TODO consider not doing this because we skip
3175 # TODO consider not doing this because we skip
3176 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3176 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3177 u = util.url(path)
3177 u = util.url(path)
3178 if u.scheme != 'http':
3178 if u.scheme != 'http':
3179 raise error.Abort(_('only http:// paths are currently supported'))
3179 raise error.Abort(_('only http:// paths are currently supported'))
3180
3180
3181 url, authinfo = u.authinfo()
3181 url, authinfo = u.authinfo()
3182 openerargs = {
3182 openerargs = {
3183 r'useragent': b'Mercurial debugwireproto',
3183 r'useragent': b'Mercurial debugwireproto',
3184 }
3184 }
3185
3185
3186 # Turn pipes/sockets into observers so we can log I/O.
3186 # Turn pipes/sockets into observers so we can log I/O.
3187 if ui.verbose:
3187 if ui.verbose:
3188 openerargs.update({
3188 openerargs.update({
3189 r'loggingfh': ui,
3189 r'loggingfh': ui,
3190 r'loggingname': b's',
3190 r'loggingname': b's',
3191 r'loggingopts': {
3191 r'loggingopts': {
3192 r'logdata': True,
3192 r'logdata': True,
3193 r'logdataapis': False,
3193 r'logdataapis': False,
3194 },
3194 },
3195 })
3195 })
3196
3196
3197 if ui.debugflag:
3197 if ui.debugflag:
3198 openerargs[r'loggingopts'][r'logdataapis'] = True
3198 openerargs[r'loggingopts'][r'logdataapis'] = True
3199
3199
3200 # Don't send default headers when in raw mode. This allows us to
3200 # Don't send default headers when in raw mode. This allows us to
3201 # bypass most of the behavior of our URL handling code so we can
3201 # bypass most of the behavior of our URL handling code so we can
3202 # have near complete control over what's sent on the wire.
3202 # have near complete control over what's sent on the wire.
3203 if opts['peer'] == 'raw':
3203 if opts['peer'] == 'raw':
3204 openerargs[r'sendaccept'] = False
3204 openerargs[r'sendaccept'] = False
3205
3205
3206 opener = urlmod.opener(ui, authinfo, **openerargs)
3206 opener = urlmod.opener(ui, authinfo, **openerargs)
3207
3207
3208 if opts['peer'] == 'http2':
3208 if opts['peer'] == 'http2':
3209 ui.write(_('creating http peer for wire protocol version 2\n'))
3209 ui.write(_('creating http peer for wire protocol version 2\n'))
3210 # We go through makepeer() because we need an API descriptor for
3210 # We go through makepeer() because we need an API descriptor for
3211 # the peer instance to be useful.
3211 # the peer instance to be useful.
3212 with ui.configoverride({
3212 with ui.configoverride({
3213 ('experimental', 'httppeer.advertise-v2'): True}):
3213 ('experimental', 'httppeer.advertise-v2'): True}):
3214 if opts['nologhandshake']:
3214 if opts['nologhandshake']:
3215 ui.pushbuffer()
3215 ui.pushbuffer()
3216
3216
3217 peer = httppeer.makepeer(ui, path, opener=opener)
3217 peer = httppeer.makepeer(ui, path, opener=opener)
3218
3218
3219 if opts['nologhandshake']:
3219 if opts['nologhandshake']:
3220 ui.popbuffer()
3220 ui.popbuffer()
3221
3221
3222 if not isinstance(peer, httppeer.httpv2peer):
3222 if not isinstance(peer, httppeer.httpv2peer):
3223 raise error.Abort(_('could not instantiate HTTP peer for '
3223 raise error.Abort(_('could not instantiate HTTP peer for '
3224 'wire protocol version 2'),
3224 'wire protocol version 2'),
3225 hint=_('the server may not have the feature '
3225 hint=_('the server may not have the feature '
3226 'enabled or is not allowing this '
3226 'enabled or is not allowing this '
3227 'client version'))
3227 'client version'))
3228
3228
3229 elif opts['peer'] == 'raw':
3229 elif opts['peer'] == 'raw':
3230 ui.write(_('using raw connection to peer\n'))
3230 ui.write(_('using raw connection to peer\n'))
3231 peer = None
3231 peer = None
3232 elif opts['peer']:
3232 elif opts['peer']:
3233 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3233 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3234 opts['peer'])
3234 opts['peer'])
3235 else:
3235 else:
3236 peer = httppeer.makepeer(ui, path, opener=opener)
3236 peer = httppeer.makepeer(ui, path, opener=opener)
3237
3237
3238 # We /could/ populate stdin/stdout with sock.makefile()...
3238 # We /could/ populate stdin/stdout with sock.makefile()...
3239 else:
3239 else:
3240 raise error.Abort(_('unsupported connection configuration'))
3240 raise error.Abort(_('unsupported connection configuration'))
3241
3241
3242 batchedcommands = None
3242 batchedcommands = None
3243
3243
3244 # Now perform actions based on the parsed wire language instructions.
3244 # Now perform actions based on the parsed wire language instructions.
3245 for action, lines in blocks:
3245 for action, lines in blocks:
3246 if action in ('raw', 'raw+'):
3246 if action in ('raw', 'raw+'):
3247 if not stdin:
3247 if not stdin:
3248 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3248 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3249
3249
3250 # Concatenate the data together.
3250 # Concatenate the data together.
3251 data = ''.join(l.lstrip() for l in lines)
3251 data = ''.join(l.lstrip() for l in lines)
3252 data = stringutil.unescapestr(data)
3252 data = stringutil.unescapestr(data)
3253 stdin.write(data)
3253 stdin.write(data)
3254
3254
3255 if action == 'raw+':
3255 if action == 'raw+':
3256 stdin.flush()
3256 stdin.flush()
3257 elif action == 'flush':
3257 elif action == 'flush':
3258 if not stdin:
3258 if not stdin:
3259 raise error.Abort(_('cannot call flush on this peer'))
3259 raise error.Abort(_('cannot call flush on this peer'))
3260 stdin.flush()
3260 stdin.flush()
3261 elif action.startswith('command'):
3261 elif action.startswith('command'):
3262 if not peer:
3262 if not peer:
3263 raise error.Abort(_('cannot send commands unless peer instance '
3263 raise error.Abort(_('cannot send commands unless peer instance '
3264 'is available'))
3264 'is available'))
3265
3265
3266 command = action.split(' ', 1)[1]
3266 command = action.split(' ', 1)[1]
3267
3267
3268 args = {}
3268 args = {}
3269 for line in lines:
3269 for line in lines:
3270 # We need to allow empty values.
3270 # We need to allow empty values.
3271 fields = line.lstrip().split(' ', 1)
3271 fields = line.lstrip().split(' ', 1)
3272 if len(fields) == 1:
3272 if len(fields) == 1:
3273 key = fields[0]
3273 key = fields[0]
3274 value = ''
3274 value = ''
3275 else:
3275 else:
3276 key, value = fields
3276 key, value = fields
3277
3277
3278 if value.startswith('eval:'):
3278 if value.startswith('eval:'):
3279 value = stringutil.evalpythonliteral(value[5:])
3279 value = stringutil.evalpythonliteral(value[5:])
3280 else:
3280 else:
3281 value = stringutil.unescapestr(value)
3281 value = stringutil.unescapestr(value)
3282
3282
3283 args[key] = value
3283 args[key] = value
3284
3284
3285 if batchedcommands is not None:
3285 if batchedcommands is not None:
3286 batchedcommands.append((command, args))
3286 batchedcommands.append((command, args))
3287 continue
3287 continue
3288
3288
3289 ui.status(_('sending %s command\n') % command)
3289 ui.status(_('sending %s command\n') % command)
3290
3290
3291 if 'PUSHFILE' in args:
3291 if 'PUSHFILE' in args:
3292 with open(args['PUSHFILE'], r'rb') as fh:
3292 with open(args['PUSHFILE'], r'rb') as fh:
3293 del args['PUSHFILE']
3293 del args['PUSHFILE']
3294 res, output = peer._callpush(command, fh,
3294 res, output = peer._callpush(command, fh,
3295 **pycompat.strkwargs(args))
3295 **pycompat.strkwargs(args))
3296 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3296 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3297 ui.status(_('remote output: %s\n') %
3297 ui.status(_('remote output: %s\n') %
3298 stringutil.escapestr(output))
3298 stringutil.escapestr(output))
3299 else:
3299 else:
3300 with peer.commandexecutor() as e:
3300 with peer.commandexecutor() as e:
3301 res = e.callcommand(command, args).result()
3301 res = e.callcommand(command, args).result()
3302
3302
3303 if isinstance(res, wireprotov2peer.commandresponse):
3303 if isinstance(res, wireprotov2peer.commandresponse):
3304 val = res.objects()
3304 val = res.objects()
3305 ui.status(_('response: %s\n') %
3305 ui.status(_('response: %s\n') %
3306 stringutil.pprint(val, bprefix=True, indent=2))
3306 stringutil.pprint(val, bprefix=True, indent=2))
3307 else:
3307 else:
3308 ui.status(_('response: %s\n') %
3308 ui.status(_('response: %s\n') %
3309 stringutil.pprint(res, bprefix=True, indent=2))
3309 stringutil.pprint(res, bprefix=True, indent=2))
3310
3310
3311 elif action == 'batchbegin':
3311 elif action == 'batchbegin':
3312 if batchedcommands is not None:
3312 if batchedcommands is not None:
3313 raise error.Abort(_('nested batchbegin not allowed'))
3313 raise error.Abort(_('nested batchbegin not allowed'))
3314
3314
3315 batchedcommands = []
3315 batchedcommands = []
3316 elif action == 'batchsubmit':
3316 elif action == 'batchsubmit':
3317 # There is a batching API we could go through. But it would be
3317 # There is a batching API we could go through. But it would be
3318 # difficult to normalize requests into function calls. It is easier
3318 # difficult to normalize requests into function calls. It is easier
3319 # to bypass this layer and normalize to commands + args.
3319 # to bypass this layer and normalize to commands + args.
3320 ui.status(_('sending batch with %d sub-commands\n') %
3320 ui.status(_('sending batch with %d sub-commands\n') %
3321 len(batchedcommands))
3321 len(batchedcommands))
3322 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3322 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3323 ui.status(_('response #%d: %s\n') %
3323 ui.status(_('response #%d: %s\n') %
3324 (i, stringutil.escapestr(chunk)))
3324 (i, stringutil.escapestr(chunk)))
3325
3325
3326 batchedcommands = None
3326 batchedcommands = None
3327
3327
3328 elif action.startswith('httprequest '):
3328 elif action.startswith('httprequest '):
3329 if not opener:
3329 if not opener:
3330 raise error.Abort(_('cannot use httprequest without an HTTP '
3330 raise error.Abort(_('cannot use httprequest without an HTTP '
3331 'peer'))
3331 'peer'))
3332
3332
3333 request = action.split(' ', 2)
3333 request = action.split(' ', 2)
3334 if len(request) != 3:
3334 if len(request) != 3:
3335 raise error.Abort(_('invalid httprequest: expected format is '
3335 raise error.Abort(_('invalid httprequest: expected format is '
3336 '"httprequest <method> <path>'))
3336 '"httprequest <method> <path>'))
3337
3337
3338 method, httppath = request[1:]
3338 method, httppath = request[1:]
3339 headers = {}
3339 headers = {}
3340 body = None
3340 body = None
3341 frames = []
3341 frames = []
3342 for line in lines:
3342 for line in lines:
3343 line = line.lstrip()
3343 line = line.lstrip()
3344 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3344 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3345 if m:
3345 if m:
3346 # Headers need to use native strings.
3346 # Headers need to use native strings.
3347 key = pycompat.strurl(m.group(1))
3347 key = pycompat.strurl(m.group(1))
3348 value = pycompat.strurl(m.group(2))
3348 value = pycompat.strurl(m.group(2))
3349 headers[key] = value
3349 headers[key] = value
3350 continue
3350 continue
3351
3351
3352 if line.startswith(b'BODYFILE '):
3352 if line.startswith(b'BODYFILE '):
3353 with open(line.split(b' ', 1), 'rb') as fh:
3353 with open(line.split(b' ', 1), 'rb') as fh:
3354 body = fh.read()
3354 body = fh.read()
3355 elif line.startswith(b'frame '):
3355 elif line.startswith(b'frame '):
3356 frame = wireprotoframing.makeframefromhumanstring(
3356 frame = wireprotoframing.makeframefromhumanstring(
3357 line[len(b'frame '):])
3357 line[len(b'frame '):])
3358
3358
3359 frames.append(frame)
3359 frames.append(frame)
3360 else:
3360 else:
3361 raise error.Abort(_('unknown argument to httprequest: %s') %
3361 raise error.Abort(_('unknown argument to httprequest: %s') %
3362 line)
3362 line)
3363
3363
3364 url = path + httppath
3364 url = path + httppath
3365
3365
3366 if frames:
3366 if frames:
3367 body = b''.join(bytes(f) for f in frames)
3367 body = b''.join(bytes(f) for f in frames)
3368
3368
3369 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3369 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3370
3370
3371 # urllib.Request insists on using has_data() as a proxy for
3371 # urllib.Request insists on using has_data() as a proxy for
3372 # determining the request method. Override that to use our
3372 # determining the request method. Override that to use our
3373 # explicitly requested method.
3373 # explicitly requested method.
3374 req.get_method = lambda: pycompat.sysstr(method)
3374 req.get_method = lambda: pycompat.sysstr(method)
3375
3375
3376 try:
3376 try:
3377 res = opener.open(req)
3377 res = opener.open(req)
3378 body = res.read()
3378 body = res.read()
3379 except util.urlerr.urlerror as e:
3379 except util.urlerr.urlerror as e:
3380 # read() method must be called, but only exists in Python 2
3380 # read() method must be called, but only exists in Python 2
3381 getattr(e, 'read', lambda: None)()
3381 getattr(e, 'read', lambda: None)()
3382 continue
3382 continue
3383
3383
3384 ct = res.headers.get(r'Content-Type')
3384 ct = res.headers.get(r'Content-Type')
3385 if ct == r'application/mercurial-cbor':
3385 if ct == r'application/mercurial-cbor':
3386 ui.write(_('cbor> %s\n') %
3386 ui.write(_('cbor> %s\n') %
3387 stringutil.pprint(cborutil.decodeall(body),
3387 stringutil.pprint(cborutil.decodeall(body),
3388 bprefix=True,
3388 bprefix=True,
3389 indent=2))
3389 indent=2))
3390
3390
3391 elif action == 'close':
3391 elif action == 'close':
3392 peer.close()
3392 peer.close()
3393 elif action == 'readavailable':
3393 elif action == 'readavailable':
3394 if not stdout or not stderr:
3394 if not stdout or not stderr:
3395 raise error.Abort(_('readavailable not available on this peer'))
3395 raise error.Abort(_('readavailable not available on this peer'))
3396
3396
3397 stdin.close()
3397 stdin.close()
3398 stdout.read()
3398 stdout.read()
3399 stderr.read()
3399 stderr.read()
3400
3400
3401 elif action == 'readline':
3401 elif action == 'readline':
3402 if not stdout:
3402 if not stdout:
3403 raise error.Abort(_('readline not available on this peer'))
3403 raise error.Abort(_('readline not available on this peer'))
3404 stdout.readline()
3404 stdout.readline()
3405 elif action == 'ereadline':
3405 elif action == 'ereadline':
3406 if not stderr:
3406 if not stderr:
3407 raise error.Abort(_('ereadline not available on this peer'))
3407 raise error.Abort(_('ereadline not available on this peer'))
3408 stderr.readline()
3408 stderr.readline()
3409 elif action.startswith('read '):
3409 elif action.startswith('read '):
3410 count = int(action.split(' ', 1)[1])
3410 count = int(action.split(' ', 1)[1])
3411 if not stdout:
3411 if not stdout:
3412 raise error.Abort(_('read not available on this peer'))
3412 raise error.Abort(_('read not available on this peer'))
3413 stdout.read(count)
3413 stdout.read(count)
3414 elif action.startswith('eread '):
3414 elif action.startswith('eread '):
3415 count = int(action.split(' ', 1)[1])
3415 count = int(action.split(' ', 1)[1])
3416 if not stderr:
3416 if not stderr:
3417 raise error.Abort(_('eread not available on this peer'))
3417 raise error.Abort(_('eread not available on this peer'))
3418 stderr.read(count)
3418 stderr.read(count)
3419 else:
3419 else:
3420 raise error.Abort(_('unknown action: %s') % action)
3420 raise error.Abort(_('unknown action: %s') % action)
3421
3421
3422 if batchedcommands is not None:
3422 if batchedcommands is not None:
3423 raise error.Abort(_('unclosed "batchbegin" request'))
3423 raise error.Abort(_('unclosed "batchbegin" request'))
3424
3424
3425 if peer:
3425 if peer:
3426 peer.close()
3426 peer.close()
3427
3427
3428 if proc:
3428 if proc:
3429 proc.kill()
3429 proc.kill()
@@ -1,2055 +1,2055 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 error,
23 error,
24 mdiff,
24 mdiff,
25 policy,
25 policy,
26 pycompat,
26 pycompat,
27 repository,
27 repository,
28 revlog,
28 revlog,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 interfaceutil,
32 interfaceutil,
33 )
33 )
34
34
35 parsers = policy.importmod(r'parsers')
35 parsers = policy.importmod(r'parsers')
36 propertycache = util.propertycache
36 propertycache = util.propertycache
37
37
38 def _parse(data):
38 def _parse(data):
39 # This method does a little bit of excessive-looking
39 # This method does a little bit of excessive-looking
40 # precondition checking. This is so that the behavior of this
40 # precondition checking. This is so that the behavior of this
41 # class exactly matches its C counterpart to try and help
41 # class exactly matches its C counterpart to try and help
42 # prevent surprise breakage for anyone that develops against
42 # prevent surprise breakage for anyone that develops against
43 # the pure version.
43 # the pure version.
44 if data and data[-1:] != '\n':
44 if data and data[-1:] != '\n':
45 raise ValueError('Manifest did not end in a newline.')
45 raise ValueError('Manifest did not end in a newline.')
46 prev = None
46 prev = None
47 for l in data.splitlines():
47 for l in data.splitlines():
48 if prev is not None and prev > l:
48 if prev is not None and prev > l:
49 raise ValueError('Manifest lines not in sorted order.')
49 raise ValueError('Manifest lines not in sorted order.')
50 prev = l
50 prev = l
51 f, n = l.split('\0')
51 f, n = l.split('\0')
52 if len(n) > 40:
52 if len(n) > 40:
53 yield f, bin(n[:40]), n[40:]
53 yield f, bin(n[:40]), n[40:]
54 else:
54 else:
55 yield f, bin(n), ''
55 yield f, bin(n), ''
56
56
57 def _text(it):
57 def _text(it):
58 files = []
58 files = []
59 lines = []
59 lines = []
60 for f, n, fl in it:
60 for f, n, fl in it:
61 files.append(f)
61 files.append(f)
62 # if this is changed to support newlines in filenames,
62 # if this is changed to support newlines in filenames,
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65
65
66 _checkforbidden(files)
66 _checkforbidden(files)
67 return ''.join(lines)
67 return ''.join(lines)
68
68
69 class lazymanifestiter(object):
69 class lazymanifestiter(object):
70 def __init__(self, lm):
70 def __init__(self, lm):
71 self.pos = 0
71 self.pos = 0
72 self.lm = lm
72 self.lm = lm
73
73
74 def __iter__(self):
74 def __iter__(self):
75 return self
75 return self
76
76
77 def next(self):
77 def next(self):
78 try:
78 try:
79 data, pos = self.lm._get(self.pos)
79 data, pos = self.lm._get(self.pos)
80 except IndexError:
80 except IndexError:
81 raise StopIteration
81 raise StopIteration
82 if pos == -1:
82 if pos == -1:
83 self.pos += 1
83 self.pos += 1
84 return data[0]
84 return data[0]
85 self.pos += 1
85 self.pos += 1
86 zeropos = data.find('\x00', pos)
86 zeropos = data.find('\x00', pos)
87 return data[pos:zeropos]
87 return data[pos:zeropos]
88
88
89 __next__ = next
89 __next__ = next
90
90
91 class lazymanifestiterentries(object):
91 class lazymanifestiterentries(object):
92 def __init__(self, lm):
92 def __init__(self, lm):
93 self.lm = lm
93 self.lm = lm
94 self.pos = 0
94 self.pos = 0
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return self
97 return self
98
98
99 def next(self):
99 def next(self):
100 try:
100 try:
101 data, pos = self.lm._get(self.pos)
101 data, pos = self.lm._get(self.pos)
102 except IndexError:
102 except IndexError:
103 raise StopIteration
103 raise StopIteration
104 if pos == -1:
104 if pos == -1:
105 self.pos += 1
105 self.pos += 1
106 return data
106 return data
107 zeropos = data.find('\x00', pos)
107 zeropos = data.find('\x00', pos)
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 zeropos + 1, 40)
109 zeropos + 1, 40)
110 flags = self.lm._getflags(data, self.pos, zeropos)
110 flags = self.lm._getflags(data, self.pos, zeropos)
111 self.pos += 1
111 self.pos += 1
112 return (data[pos:zeropos], hashval, flags)
112 return (data[pos:zeropos], hashval, flags)
113
113
114 __next__ = next
114 __next__ = next
115
115
116 def unhexlify(data, extra, pos, length):
116 def unhexlify(data, extra, pos, length):
117 s = bin(data[pos:pos + length])
117 s = bin(data[pos:pos + length])
118 if extra:
118 if extra:
119 s += chr(extra & 0xff)
119 s += chr(extra & 0xff)
120 return s
120 return s
121
121
122 def _cmp(a, b):
122 def _cmp(a, b):
123 return (a > b) - (a < b)
123 return (a > b) - (a < b)
124
124
125 class _lazymanifest(object):
125 class _lazymanifest(object):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 if positions is None:
127 if positions is None:
128 self.positions = self.findlines(data)
128 self.positions = self.findlines(data)
129 self.extrainfo = [0] * len(self.positions)
129 self.extrainfo = [0] * len(self.positions)
130 self.data = data
130 self.data = data
131 self.extradata = []
131 self.extradata = []
132 else:
132 else:
133 self.positions = positions[:]
133 self.positions = positions[:]
134 self.extrainfo = extrainfo[:]
134 self.extrainfo = extrainfo[:]
135 self.extradata = extradata[:]
135 self.extradata = extradata[:]
136 self.data = data
136 self.data = data
137
137
138 def findlines(self, data):
138 def findlines(self, data):
139 if not data:
139 if not data:
140 return []
140 return []
141 pos = data.find("\n")
141 pos = data.find("\n")
142 if pos == -1 or data[-1:] != '\n':
142 if pos == -1 or data[-1:] != '\n':
143 raise ValueError("Manifest did not end in a newline.")
143 raise ValueError("Manifest did not end in a newline.")
144 positions = [0]
144 positions = [0]
145 prev = data[:data.find('\x00')]
145 prev = data[:data.find('\x00')]
146 while pos < len(data) - 1 and pos != -1:
146 while pos < len(data) - 1 and pos != -1:
147 positions.append(pos + 1)
147 positions.append(pos + 1)
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 if nexts < prev:
149 if nexts < prev:
150 raise ValueError("Manifest lines not in sorted order.")
150 raise ValueError("Manifest lines not in sorted order.")
151 prev = nexts
151 prev = nexts
152 pos = data.find("\n", pos + 1)
152 pos = data.find("\n", pos + 1)
153 return positions
153 return positions
154
154
155 def _get(self, index):
155 def _get(self, index):
156 # get the position encoded in pos:
156 # get the position encoded in pos:
157 # positive number is an index in 'data'
157 # positive number is an index in 'data'
158 # negative number is in extrapieces
158 # negative number is in extrapieces
159 pos = self.positions[index]
159 pos = self.positions[index]
160 if pos >= 0:
160 if pos >= 0:
161 return self.data, pos
161 return self.data, pos
162 return self.extradata[-pos - 1], -1
162 return self.extradata[-pos - 1], -1
163
163
164 def _getkey(self, pos):
164 def _getkey(self, pos):
165 if pos >= 0:
165 if pos >= 0:
166 return self.data[pos:self.data.find('\x00', pos + 1)]
166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 return self.extradata[-pos - 1][0]
167 return self.extradata[-pos - 1][0]
168
168
169 def bsearch(self, key):
169 def bsearch(self, key):
170 first = 0
170 first = 0
171 last = len(self.positions) - 1
171 last = len(self.positions) - 1
172
172
173 while first <= last:
173 while first <= last:
174 midpoint = (first + last)//2
174 midpoint = (first + last)//2
175 nextpos = self.positions[midpoint]
175 nextpos = self.positions[midpoint]
176 candidate = self._getkey(nextpos)
176 candidate = self._getkey(nextpos)
177 r = _cmp(key, candidate)
177 r = _cmp(key, candidate)
178 if r == 0:
178 if r == 0:
179 return midpoint
179 return midpoint
180 else:
180 else:
181 if r < 0:
181 if r < 0:
182 last = midpoint - 1
182 last = midpoint - 1
183 else:
183 else:
184 first = midpoint + 1
184 first = midpoint + 1
185 return -1
185 return -1
186
186
187 def bsearch2(self, key):
187 def bsearch2(self, key):
188 # same as the above, but will always return the position
188 # same as the above, but will always return the position
189 # done for performance reasons
189 # done for performance reasons
190 first = 0
190 first = 0
191 last = len(self.positions) - 1
191 last = len(self.positions) - 1
192
192
193 while first <= last:
193 while first <= last:
194 midpoint = (first + last)//2
194 midpoint = (first + last)//2
195 nextpos = self.positions[midpoint]
195 nextpos = self.positions[midpoint]
196 candidate = self._getkey(nextpos)
196 candidate = self._getkey(nextpos)
197 r = _cmp(key, candidate)
197 r = _cmp(key, candidate)
198 if r == 0:
198 if r == 0:
199 return (midpoint, True)
199 return (midpoint, True)
200 else:
200 else:
201 if r < 0:
201 if r < 0:
202 last = midpoint - 1
202 last = midpoint - 1
203 else:
203 else:
204 first = midpoint + 1
204 first = midpoint + 1
205 return (first, False)
205 return (first, False)
206
206
207 def __contains__(self, key):
207 def __contains__(self, key):
208 return self.bsearch(key) != -1
208 return self.bsearch(key) != -1
209
209
210 def _getflags(self, data, needle, pos):
210 def _getflags(self, data, needle, pos):
211 start = pos + 41
211 start = pos + 41
212 end = data.find("\n", start)
212 end = data.find("\n", start)
213 if end == -1:
213 if end == -1:
214 end = len(data) - 1
214 end = len(data) - 1
215 if start == end:
215 if start == end:
216 return ''
216 return ''
217 return self.data[start:end]
217 return self.data[start:end]
218
218
219 def __getitem__(self, key):
219 def __getitem__(self, key):
220 if not isinstance(key, bytes):
220 if not isinstance(key, bytes):
221 raise TypeError("getitem: manifest keys must be a bytes.")
221 raise TypeError("getitem: manifest keys must be a bytes.")
222 needle = self.bsearch(key)
222 needle = self.bsearch(key)
223 if needle == -1:
223 if needle == -1:
224 raise KeyError
224 raise KeyError
225 data, pos = self._get(needle)
225 data, pos = self._get(needle)
226 if pos == -1:
226 if pos == -1:
227 return (data[1], data[2])
227 return (data[1], data[2])
228 zeropos = data.find('\x00', pos)
228 zeropos = data.find('\x00', pos)
229 assert 0 <= needle <= len(self.positions)
229 assert 0 <= needle <= len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 flags = self._getflags(data, needle, zeropos)
232 flags = self._getflags(data, needle, zeropos)
233 return (hashval, flags)
233 return (hashval, flags)
234
234
235 def __delitem__(self, key):
235 def __delitem__(self, key):
236 needle, found = self.bsearch2(key)
236 needle, found = self.bsearch2(key)
237 if not found:
237 if not found:
238 raise KeyError
238 raise KeyError
239 cur = self.positions[needle]
239 cur = self.positions[needle]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 if cur >= 0:
242 if cur >= 0:
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244
244
245 def __setitem__(self, key, value):
245 def __setitem__(self, key, value):
246 if not isinstance(key, bytes):
246 if not isinstance(key, bytes):
247 raise TypeError("setitem: manifest keys must be a byte string.")
247 raise TypeError("setitem: manifest keys must be a byte string.")
248 if not isinstance(value, tuple) or len(value) != 2:
248 if not isinstance(value, tuple) or len(value) != 2:
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 hashval = value[0]
250 hashval = value[0]
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 raise TypeError("node must be a 20-byte byte string")
252 raise TypeError("node must be a 20-byte byte string")
253 flags = value[1]
253 flags = value[1]
254 if len(hashval) == 22:
254 if len(hashval) == 22:
255 hashval = hashval[:-1]
255 hashval = hashval[:-1]
256 if not isinstance(flags, bytes) or len(flags) > 1:
256 if not isinstance(flags, bytes) or len(flags) > 1:
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 needle, found = self.bsearch2(key)
258 needle, found = self.bsearch2(key)
259 if found:
259 if found:
260 # put the item
260 # put the item
261 pos = self.positions[needle]
261 pos = self.positions[needle]
262 if pos < 0:
262 if pos < 0:
263 self.extradata[-pos - 1] = (key, hashval, value[1])
263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 else:
264 else:
265 # just don't bother
265 # just don't bother
266 self.extradata.append((key, hashval, value[1]))
266 self.extradata.append((key, hashval, value[1]))
267 self.positions[needle] = -len(self.extradata)
267 self.positions[needle] = -len(self.extradata)
268 else:
268 else:
269 # not found, put it in with extra positions
269 # not found, put it in with extra positions
270 self.extradata.append((key, hashval, value[1]))
270 self.extradata.append((key, hashval, value[1]))
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 + self.positions[needle:])
272 + self.positions[needle:])
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 self.extrainfo[needle:])
274 self.extrainfo[needle:])
275
275
276 def copy(self):
276 def copy(self):
277 # XXX call _compact like in C?
277 # XXX call _compact like in C?
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 self.extradata)
279 self.extradata)
280
280
281 def _compact(self):
281 def _compact(self):
282 # hopefully not called TOO often
282 # hopefully not called TOO often
283 if len(self.extradata) == 0:
283 if len(self.extradata) == 0:
284 return
284 return
285 l = []
285 l = []
286 i = 0
286 i = 0
287 offset = 0
287 offset = 0
288 self.extrainfo = [0] * len(self.positions)
288 self.extrainfo = [0] * len(self.positions)
289 while i < len(self.positions):
289 while i < len(self.positions):
290 if self.positions[i] >= 0:
290 if self.positions[i] >= 0:
291 cur = self.positions[i]
291 cur = self.positions[i]
292 last_cut = cur
292 last_cut = cur
293 while True:
293 while True:
294 self.positions[i] = offset
294 self.positions[i] = offset
295 i += 1
295 i += 1
296 if i == len(self.positions) or self.positions[i] < 0:
296 if i == len(self.positions) or self.positions[i] < 0:
297 break
297 break
298 offset += self.positions[i] - cur
298 offset += self.positions[i] - cur
299 cur = self.positions[i]
299 cur = self.positions[i]
300 end_cut = self.data.find('\n', cur)
300 end_cut = self.data.find('\n', cur)
301 if end_cut != -1:
301 if end_cut != -1:
302 end_cut += 1
302 end_cut += 1
303 offset += end_cut - cur
303 offset += end_cut - cur
304 l.append(self.data[last_cut:end_cut])
304 l.append(self.data[last_cut:end_cut])
305 else:
305 else:
306 while i < len(self.positions) and self.positions[i] < 0:
306 while i < len(self.positions) and self.positions[i] < 0:
307 cur = self.positions[i]
307 cur = self.positions[i]
308 t = self.extradata[-cur - 1]
308 t = self.extradata[-cur - 1]
309 l.append(self._pack(t))
309 l.append(self._pack(t))
310 self.positions[i] = offset
310 self.positions[i] = offset
311 if len(t[1]) > 20:
311 if len(t[1]) > 20:
312 self.extrainfo[i] = ord(t[1][21])
312 self.extrainfo[i] = ord(t[1][21])
313 offset += len(l[-1])
313 offset += len(l[-1])
314 i += 1
314 i += 1
315 self.data = ''.join(l)
315 self.data = ''.join(l)
316 self.extradata = []
316 self.extradata = []
317
317
318 def _pack(self, d):
318 def _pack(self, d):
319 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
319 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
320
320
321 def text(self):
321 def text(self):
322 self._compact()
322 self._compact()
323 return self.data
323 return self.data
324
324
325 def diff(self, m2, clean=False):
325 def diff(self, m2, clean=False):
326 '''Finds changes between the current manifest and m2.'''
326 '''Finds changes between the current manifest and m2.'''
327 # XXX think whether efficiency matters here
327 # XXX think whether efficiency matters here
328 diff = {}
328 diff = {}
329
329
330 for fn, e1, flags in self.iterentries():
330 for fn, e1, flags in self.iterentries():
331 if fn not in m2:
331 if fn not in m2:
332 diff[fn] = (e1, flags), (None, '')
332 diff[fn] = (e1, flags), (None, '')
333 else:
333 else:
334 e2 = m2[fn]
334 e2 = m2[fn]
335 if (e1, flags) != e2:
335 if (e1, flags) != e2:
336 diff[fn] = (e1, flags), e2
336 diff[fn] = (e1, flags), e2
337 elif clean:
337 elif clean:
338 diff[fn] = None
338 diff[fn] = None
339
339
340 for fn, e2, flags in m2.iterentries():
340 for fn, e2, flags in m2.iterentries():
341 if fn not in self:
341 if fn not in self:
342 diff[fn] = (None, ''), (e2, flags)
342 diff[fn] = (None, ''), (e2, flags)
343
343
344 return diff
344 return diff
345
345
346 def iterentries(self):
346 def iterentries(self):
347 return lazymanifestiterentries(self)
347 return lazymanifestiterentries(self)
348
348
349 def iterkeys(self):
349 def iterkeys(self):
350 return lazymanifestiter(self)
350 return lazymanifestiter(self)
351
351
352 def __iter__(self):
352 def __iter__(self):
353 return lazymanifestiter(self)
353 return lazymanifestiter(self)
354
354
355 def __len__(self):
355 def __len__(self):
356 return len(self.positions)
356 return len(self.positions)
357
357
358 def filtercopy(self, filterfn):
358 def filtercopy(self, filterfn):
359 # XXX should be optimized
359 # XXX should be optimized
360 c = _lazymanifest('')
360 c = _lazymanifest('')
361 for f, n, fl in self.iterentries():
361 for f, n, fl in self.iterentries():
362 if filterfn(f):
362 if filterfn(f):
363 c[f] = n, fl
363 c[f] = n, fl
364 return c
364 return c
365
365
366 try:
366 try:
367 _lazymanifest = parsers.lazymanifest
367 _lazymanifest = parsers.lazymanifest
368 except AttributeError:
368 except AttributeError:
369 pass
369 pass
370
370
371 @interfaceutil.implementer(repository.imanifestdict)
371 @interfaceutil.implementer(repository.imanifestdict)
372 class manifestdict(object):
372 class manifestdict(object):
373 def __init__(self, data=''):
373 def __init__(self, data=''):
374 self._lm = _lazymanifest(data)
374 self._lm = _lazymanifest(data)
375
375
376 def __getitem__(self, key):
376 def __getitem__(self, key):
377 return self._lm[key][0]
377 return self._lm[key][0]
378
378
379 def find(self, key):
379 def find(self, key):
380 return self._lm[key]
380 return self._lm[key]
381
381
382 def __len__(self):
382 def __len__(self):
383 return len(self._lm)
383 return len(self._lm)
384
384
385 def __nonzero__(self):
385 def __nonzero__(self):
386 # nonzero is covered by the __len__ function, but implementing it here
386 # nonzero is covered by the __len__ function, but implementing it here
387 # makes it easier for extensions to override.
387 # makes it easier for extensions to override.
388 return len(self._lm) != 0
388 return len(self._lm) != 0
389
389
390 __bool__ = __nonzero__
390 __bool__ = __nonzero__
391
391
392 def __setitem__(self, key, node):
392 def __setitem__(self, key, node):
393 self._lm[key] = node, self.flags(key, '')
393 self._lm[key] = node, self.flags(key, '')
394
394
395 def __contains__(self, key):
395 def __contains__(self, key):
396 if key is None:
396 if key is None:
397 return False
397 return False
398 return key in self._lm
398 return key in self._lm
399
399
400 def __delitem__(self, key):
400 def __delitem__(self, key):
401 del self._lm[key]
401 del self._lm[key]
402
402
403 def __iter__(self):
403 def __iter__(self):
404 return self._lm.__iter__()
404 return self._lm.__iter__()
405
405
406 def iterkeys(self):
406 def iterkeys(self):
407 return self._lm.iterkeys()
407 return self._lm.iterkeys()
408
408
409 def keys(self):
409 def keys(self):
410 return list(self.iterkeys())
410 return list(self.iterkeys())
411
411
412 def filesnotin(self, m2, match=None):
412 def filesnotin(self, m2, match=None):
413 '''Set of files in this manifest that are not in the other'''
413 '''Set of files in this manifest that are not in the other'''
414 if match:
414 if match:
415 m1 = self.matches(match)
415 m1 = self.matches(match)
416 m2 = m2.matches(match)
416 m2 = m2.matches(match)
417 return m1.filesnotin(m2)
417 return m1.filesnotin(m2)
418 diff = self.diff(m2)
418 diff = self.diff(m2)
419 files = set(filepath
419 files = set(filepath
420 for filepath, hashflags in diff.iteritems()
420 for filepath, hashflags in diff.iteritems()
421 if hashflags[1][0] is None)
421 if hashflags[1][0] is None)
422 return files
422 return files
423
423
424 @propertycache
424 @propertycache
425 def _dirs(self):
425 def _dirs(self):
426 return util.dirs(self)
426 return util.dirs(self)
427
427
428 def dirs(self):
428 def dirs(self):
429 return self._dirs
429 return self._dirs
430
430
431 def hasdir(self, dir):
431 def hasdir(self, dir):
432 return dir in self._dirs
432 return dir in self._dirs
433
433
434 def _filesfastpath(self, match):
434 def _filesfastpath(self, match):
435 '''Checks whether we can correctly and quickly iterate over matcher
435 '''Checks whether we can correctly and quickly iterate over matcher
436 files instead of over manifest files.'''
436 files instead of over manifest files.'''
437 files = match.files()
437 files = match.files()
438 return (len(files) < 100 and (match.isexact() or
438 return (len(files) < 100 and (match.isexact() or
439 (match.prefix() and all(fn in self for fn in files))))
439 (match.prefix() and all(fn in self for fn in files))))
440
440
441 def walk(self, match):
441 def walk(self, match):
442 '''Generates matching file names.
442 '''Generates matching file names.
443
443
444 Equivalent to manifest.matches(match).iterkeys(), but without creating
444 Equivalent to manifest.matches(match).iterkeys(), but without creating
445 an entirely new manifest.
445 an entirely new manifest.
446
446
447 It also reports nonexistent files by marking them bad with match.bad().
447 It also reports nonexistent files by marking them bad with match.bad().
448 '''
448 '''
449 if match.always():
449 if match.always():
450 for f in iter(self):
450 for f in iter(self):
451 yield f
451 yield f
452 return
452 return
453
453
454 fset = set(match.files())
454 fset = set(match.files())
455
455
456 # avoid the entire walk if we're only looking for specific files
456 # avoid the entire walk if we're only looking for specific files
457 if self._filesfastpath(match):
457 if self._filesfastpath(match):
458 for fn in sorted(fset):
458 for fn in sorted(fset):
459 yield fn
459 yield fn
460 return
460 return
461
461
462 for fn in self:
462 for fn in self:
463 if fn in fset:
463 if fn in fset:
464 # specified pattern is the exact name
464 # specified pattern is the exact name
465 fset.remove(fn)
465 fset.remove(fn)
466 if match(fn):
466 if match(fn):
467 yield fn
467 yield fn
468
468
469 # for dirstate.walk, files=['.'] means "walk the whole tree".
469 # for dirstate.walk, files=['.'] means "walk the whole tree".
470 # follow that here, too
470 # follow that here, too
471 fset.discard('.')
471 fset.discard('.')
472
472
473 for fn in sorted(fset):
473 for fn in sorted(fset):
474 if not self.hasdir(fn):
474 if not self.hasdir(fn):
475 match.bad(fn, None)
475 match.bad(fn, None)
476
476
477 def matches(self, match):
477 def matches(self, match):
478 '''generate a new manifest filtered by the match argument'''
478 '''generate a new manifest filtered by the match argument'''
479 if match.always():
479 if match.always():
480 return self.copy()
480 return self.copy()
481
481
482 if self._filesfastpath(match):
482 if self._filesfastpath(match):
483 m = manifestdict()
483 m = manifestdict()
484 lm = self._lm
484 lm = self._lm
485 for fn in match.files():
485 for fn in match.files():
486 if fn in lm:
486 if fn in lm:
487 m._lm[fn] = lm[fn]
487 m._lm[fn] = lm[fn]
488 return m
488 return m
489
489
490 m = manifestdict()
490 m = manifestdict()
491 m._lm = self._lm.filtercopy(match)
491 m._lm = self._lm.filtercopy(match)
492 return m
492 return m
493
493
494 def diff(self, m2, match=None, clean=False):
494 def diff(self, m2, match=None, clean=False):
495 '''Finds changes between the current manifest and m2.
495 '''Finds changes between the current manifest and m2.
496
496
497 Args:
497 Args:
498 m2: the manifest to which this manifest should be compared.
498 m2: the manifest to which this manifest should be compared.
499 clean: if true, include files unchanged between these manifests
499 clean: if true, include files unchanged between these manifests
500 with a None value in the returned dictionary.
500 with a None value in the returned dictionary.
501
501
502 The result is returned as a dict with filename as key and
502 The result is returned as a dict with filename as key and
503 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
503 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
504 nodeid in the current/other manifest and fl1/fl2 is the flag
504 nodeid in the current/other manifest and fl1/fl2 is the flag
505 in the current/other manifest. Where the file does not exist,
505 in the current/other manifest. Where the file does not exist,
506 the nodeid will be None and the flags will be the empty
506 the nodeid will be None and the flags will be the empty
507 string.
507 string.
508 '''
508 '''
509 if match:
509 if match:
510 m1 = self.matches(match)
510 m1 = self.matches(match)
511 m2 = m2.matches(match)
511 m2 = m2.matches(match)
512 return m1.diff(m2, clean=clean)
512 return m1.diff(m2, clean=clean)
513 return self._lm.diff(m2._lm, clean)
513 return self._lm.diff(m2._lm, clean)
514
514
515 def setflag(self, key, flag):
515 def setflag(self, key, flag):
516 self._lm[key] = self[key], flag
516 self._lm[key] = self[key], flag
517
517
518 def get(self, key, default=None):
518 def get(self, key, default=None):
519 try:
519 try:
520 return self._lm[key][0]
520 return self._lm[key][0]
521 except KeyError:
521 except KeyError:
522 return default
522 return default
523
523
524 def flags(self, key, default=''):
524 def flags(self, key, default=''):
525 try:
525 try:
526 return self._lm[key][1]
526 return self._lm[key][1]
527 except KeyError:
527 except KeyError:
528 return default
528 return default
529
529
530 def copy(self):
530 def copy(self):
531 c = manifestdict()
531 c = manifestdict()
532 c._lm = self._lm.copy()
532 c._lm = self._lm.copy()
533 return c
533 return c
534
534
535 def items(self):
535 def items(self):
536 return (x[:2] for x in self._lm.iterentries())
536 return (x[:2] for x in self._lm.iterentries())
537
537
538 def iteritems(self):
538 def iteritems(self):
539 return (x[:2] for x in self._lm.iterentries())
539 return (x[:2] for x in self._lm.iterentries())
540
540
541 def iterentries(self):
541 def iterentries(self):
542 return self._lm.iterentries()
542 return self._lm.iterentries()
543
543
544 def text(self):
544 def text(self):
545 # most likely uses native version
545 # most likely uses native version
546 return self._lm.text()
546 return self._lm.text()
547
547
548 def fastdelta(self, base, changes):
548 def fastdelta(self, base, changes):
549 """Given a base manifest text as a bytearray and a list of changes
549 """Given a base manifest text as a bytearray and a list of changes
550 relative to that text, compute a delta that can be used by revlog.
550 relative to that text, compute a delta that can be used by revlog.
551 """
551 """
552 delta = []
552 delta = []
553 dstart = None
553 dstart = None
554 dend = None
554 dend = None
555 dline = [""]
555 dline = [""]
556 start = 0
556 start = 0
557 # zero copy representation of base as a buffer
557 # zero copy representation of base as a buffer
558 addbuf = util.buffer(base)
558 addbuf = util.buffer(base)
559
559
560 changes = list(changes)
560 changes = list(changes)
561 if len(changes) < 1000:
561 if len(changes) < 1000:
562 # start with a readonly loop that finds the offset of
562 # start with a readonly loop that finds the offset of
563 # each line and creates the deltas
563 # each line and creates the deltas
564 for f, todelete in changes:
564 for f, todelete in changes:
565 # bs will either be the index of the item or the insert point
565 # bs will either be the index of the item or the insert point
566 start, end = _msearch(addbuf, f, start)
566 start, end = _msearch(addbuf, f, start)
567 if not todelete:
567 if not todelete:
568 h, fl = self._lm[f]
568 h, fl = self._lm[f]
569 l = "%s\0%s%s\n" % (f, hex(h), fl)
569 l = "%s\0%s%s\n" % (f, hex(h), fl)
570 else:
570 else:
571 if start == end:
571 if start == end:
572 # item we want to delete was not found, error out
572 # item we want to delete was not found, error out
573 raise AssertionError(
573 raise AssertionError(
574 _("failed to remove %s from manifest") % f)
574 _("failed to remove %s from manifest") % f)
575 l = ""
575 l = ""
576 if dstart is not None and dstart <= start and dend >= start:
576 if dstart is not None and dstart <= start and dend >= start:
577 if dend < end:
577 if dend < end:
578 dend = end
578 dend = end
579 if l:
579 if l:
580 dline.append(l)
580 dline.append(l)
581 else:
581 else:
582 if dstart is not None:
582 if dstart is not None:
583 delta.append([dstart, dend, "".join(dline)])
583 delta.append([dstart, dend, "".join(dline)])
584 dstart = start
584 dstart = start
585 dend = end
585 dend = end
586 dline = [l]
586 dline = [l]
587
587
588 if dstart is not None:
588 if dstart is not None:
589 delta.append([dstart, dend, "".join(dline)])
589 delta.append([dstart, dend, "".join(dline)])
590 # apply the delta to the base, and get a delta for addrevision
590 # apply the delta to the base, and get a delta for addrevision
591 deltatext, arraytext = _addlistdelta(base, delta)
591 deltatext, arraytext = _addlistdelta(base, delta)
592 else:
592 else:
593 # For large changes, it's much cheaper to just build the text and
593 # For large changes, it's much cheaper to just build the text and
594 # diff it.
594 # diff it.
595 arraytext = bytearray(self.text())
595 arraytext = bytearray(self.text())
596 deltatext = mdiff.textdiff(
596 deltatext = mdiff.textdiff(
597 util.buffer(base), util.buffer(arraytext))
597 util.buffer(base), util.buffer(arraytext))
598
598
599 return arraytext, deltatext
599 return arraytext, deltatext
600
600
601 def _msearch(m, s, lo=0, hi=None):
601 def _msearch(m, s, lo=0, hi=None):
602 '''return a tuple (start, end) that says where to find s within m.
602 '''return a tuple (start, end) that says where to find s within m.
603
603
604 If the string is found m[start:end] are the line containing
604 If the string is found m[start:end] are the line containing
605 that string. If start == end the string was not found and
605 that string. If start == end the string was not found and
606 they indicate the proper sorted insertion point.
606 they indicate the proper sorted insertion point.
607
607
608 m should be a buffer, a memoryview or a byte string.
608 m should be a buffer, a memoryview or a byte string.
609 s is a byte string'''
609 s is a byte string'''
610 def advance(i, c):
610 def advance(i, c):
611 while i < lenm and m[i:i + 1] != c:
611 while i < lenm and m[i:i + 1] != c:
612 i += 1
612 i += 1
613 return i
613 return i
614 if not s:
614 if not s:
615 return (lo, lo)
615 return (lo, lo)
616 lenm = len(m)
616 lenm = len(m)
617 if not hi:
617 if not hi:
618 hi = lenm
618 hi = lenm
619 while lo < hi:
619 while lo < hi:
620 mid = (lo + hi) // 2
620 mid = (lo + hi) // 2
621 start = mid
621 start = mid
622 while start > 0 and m[start - 1:start] != '\n':
622 while start > 0 and m[start - 1:start] != '\n':
623 start -= 1
623 start -= 1
624 end = advance(start, '\0')
624 end = advance(start, '\0')
625 if bytes(m[start:end]) < s:
625 if bytes(m[start:end]) < s:
626 # we know that after the null there are 40 bytes of sha1
626 # we know that after the null there are 40 bytes of sha1
627 # this translates to the bisect lo = mid + 1
627 # this translates to the bisect lo = mid + 1
628 lo = advance(end + 40, '\n') + 1
628 lo = advance(end + 40, '\n') + 1
629 else:
629 else:
630 # this translates to the bisect hi = mid
630 # this translates to the bisect hi = mid
631 hi = start
631 hi = start
632 end = advance(lo, '\0')
632 end = advance(lo, '\0')
633 found = m[lo:end]
633 found = m[lo:end]
634 if s == found:
634 if s == found:
635 # we know that after the null there are 40 bytes of sha1
635 # we know that after the null there are 40 bytes of sha1
636 end = advance(end + 40, '\n')
636 end = advance(end + 40, '\n')
637 return (lo, end + 1)
637 return (lo, end + 1)
638 else:
638 else:
639 return (lo, lo)
639 return (lo, lo)
640
640
641 def _checkforbidden(l):
641 def _checkforbidden(l):
642 """Check filenames for illegal characters."""
642 """Check filenames for illegal characters."""
643 for f in l:
643 for f in l:
644 if '\n' in f or '\r' in f:
644 if '\n' in f or '\r' in f:
645 raise error.StorageError(
645 raise error.StorageError(
646 _("'\\n' and '\\r' disallowed in filenames: %r")
646 _("'\\n' and '\\r' disallowed in filenames: %r")
647 % pycompat.bytestr(f))
647 % pycompat.bytestr(f))
648
648
649
649
650 # apply the changes collected during the bisect loop to our addlist
650 # apply the changes collected during the bisect loop to our addlist
651 # return a delta suitable for addrevision
651 # return a delta suitable for addrevision
652 def _addlistdelta(addlist, x):
652 def _addlistdelta(addlist, x):
653 # for large addlist arrays, building a new array is cheaper
653 # for large addlist arrays, building a new array is cheaper
654 # than repeatedly modifying the existing one
654 # than repeatedly modifying the existing one
655 currentposition = 0
655 currentposition = 0
656 newaddlist = bytearray()
656 newaddlist = bytearray()
657
657
658 for start, end, content in x:
658 for start, end, content in x:
659 newaddlist += addlist[currentposition:start]
659 newaddlist += addlist[currentposition:start]
660 if content:
660 if content:
661 newaddlist += bytearray(content)
661 newaddlist += bytearray(content)
662
662
663 currentposition = end
663 currentposition = end
664
664
665 newaddlist += addlist[currentposition:]
665 newaddlist += addlist[currentposition:]
666
666
667 deltatext = "".join(struct.pack(">lll", start, end, len(content))
667 deltatext = "".join(struct.pack(">lll", start, end, len(content))
668 + content for start, end, content in x)
668 + content for start, end, content in x)
669 return deltatext, newaddlist
669 return deltatext, newaddlist
670
670
671 def _splittopdir(f):
671 def _splittopdir(f):
672 if '/' in f:
672 if '/' in f:
673 dir, subpath = f.split('/', 1)
673 dir, subpath = f.split('/', 1)
674 return dir + '/', subpath
674 return dir + '/', subpath
675 else:
675 else:
676 return '', f
676 return '', f
677
677
678 _noop = lambda s: None
678 _noop = lambda s: None
679
679
680 class treemanifest(object):
680 class treemanifest(object):
681 def __init__(self, dir='', text=''):
681 def __init__(self, dir='', text=''):
682 self._dir = dir
682 self._dir = dir
683 self._node = nullid
683 self._node = nullid
684 self._loadfunc = _noop
684 self._loadfunc = _noop
685 self._copyfunc = _noop
685 self._copyfunc = _noop
686 self._dirty = False
686 self._dirty = False
687 self._dirs = {}
687 self._dirs = {}
688 self._lazydirs = {}
688 self._lazydirs = {}
689 # Using _lazymanifest here is a little slower than plain old dicts
689 # Using _lazymanifest here is a little slower than plain old dicts
690 self._files = {}
690 self._files = {}
691 self._flags = {}
691 self._flags = {}
692 if text:
692 if text:
693 def readsubtree(subdir, subm):
693 def readsubtree(subdir, subm):
694 raise AssertionError('treemanifest constructor only accepts '
694 raise AssertionError('treemanifest constructor only accepts '
695 'flat manifests')
695 'flat manifests')
696 self.parse(text, readsubtree)
696 self.parse(text, readsubtree)
697 self._dirty = True # Mark flat manifest dirty after parsing
697 self._dirty = True # Mark flat manifest dirty after parsing
698
698
699 def _subpath(self, path):
699 def _subpath(self, path):
700 return self._dir + path
700 return self._dir + path
701
701
702 def _loadalllazy(self):
702 def _loadalllazy(self):
703 selfdirs = self._dirs
703 selfdirs = self._dirs
704 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
704 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
705 if docopy:
705 if docopy:
706 selfdirs[d] = readsubtree(path, node).copy()
706 selfdirs[d] = readsubtree(path, node).copy()
707 else:
707 else:
708 selfdirs[d] = readsubtree(path, node)
708 selfdirs[d] = readsubtree(path, node)
709 self._lazydirs = {}
709 self._lazydirs = {}
710
710
711 def _loadlazy(self, d):
711 def _loadlazy(self, d):
712 v = self._lazydirs.get(d)
712 v = self._lazydirs.get(d)
713 if v:
713 if v:
714 path, node, readsubtree, docopy = v
714 path, node, readsubtree, docopy = v
715 if docopy:
715 if docopy:
716 self._dirs[d] = readsubtree(path, node).copy()
716 self._dirs[d] = readsubtree(path, node).copy()
717 else:
717 else:
718 self._dirs[d] = readsubtree(path, node)
718 self._dirs[d] = readsubtree(path, node)
719 del self._lazydirs[d]
719 del self._lazydirs[d]
720
720
721 def _loadchildrensetlazy(self, visit):
721 def _loadchildrensetlazy(self, visit):
722 if not visit:
722 if not visit:
723 return None
723 return None
724 if visit == 'all' or visit == 'this':
724 if visit == 'all' or visit == 'this':
725 self._loadalllazy()
725 self._loadalllazy()
726 return None
726 return None
727
727
728 loadlazy = self._loadlazy
728 loadlazy = self._loadlazy
729 for k in visit:
729 for k in visit:
730 loadlazy(k + '/')
730 loadlazy(k + '/')
731 return visit
731 return visit
732
732
733 def _loaddifflazy(self, t1, t2):
733 def _loaddifflazy(self, t1, t2):
734 """load items in t1 and t2 if they're needed for diffing.
734 """load items in t1 and t2 if they're needed for diffing.
735
735
736 The criteria currently is:
736 The criteria currently is:
737 - if it's not present in _lazydirs in either t1 or t2, load it in the
737 - if it's not present in _lazydirs in either t1 or t2, load it in the
738 other (it may already be loaded or it may not exist, doesn't matter)
738 other (it may already be loaded or it may not exist, doesn't matter)
739 - if it's present in _lazydirs in both, compare the nodeid; if it
739 - if it's present in _lazydirs in both, compare the nodeid; if it
740 differs, load it in both
740 differs, load it in both
741 """
741 """
742 toloadlazy = []
742 toloadlazy = []
743 for d, v1 in t1._lazydirs.iteritems():
743 for d, v1 in t1._lazydirs.iteritems():
744 v2 = t2._lazydirs.get(d)
744 v2 = t2._lazydirs.get(d)
745 if not v2 or v2[1] != v1[1]:
745 if not v2 or v2[1] != v1[1]:
746 toloadlazy.append(d)
746 toloadlazy.append(d)
747 for d, v1 in t2._lazydirs.iteritems():
747 for d, v1 in t2._lazydirs.iteritems():
748 if d not in t1._lazydirs:
748 if d not in t1._lazydirs:
749 toloadlazy.append(d)
749 toloadlazy.append(d)
750
750
751 for d in toloadlazy:
751 for d in toloadlazy:
752 t1._loadlazy(d)
752 t1._loadlazy(d)
753 t2._loadlazy(d)
753 t2._loadlazy(d)
754
754
755 def __len__(self):
755 def __len__(self):
756 self._load()
756 self._load()
757 size = len(self._files)
757 size = len(self._files)
758 self._loadalllazy()
758 self._loadalllazy()
759 for m in self._dirs.values():
759 for m in self._dirs.values():
760 size += m.__len__()
760 size += m.__len__()
761 return size
761 return size
762
762
763 def __nonzero__(self):
763 def __nonzero__(self):
764 # Faster than "__len() != 0" since it avoids loading sub-manifests
764 # Faster than "__len() != 0" since it avoids loading sub-manifests
765 return not self._isempty()
765 return not self._isempty()
766
766
767 __bool__ = __nonzero__
767 __bool__ = __nonzero__
768
768
769 def _isempty(self):
769 def _isempty(self):
770 self._load() # for consistency; already loaded by all callers
770 self._load() # for consistency; already loaded by all callers
771 # See if we can skip loading everything.
771 # See if we can skip loading everything.
772 if self._files or (self._dirs and
772 if self._files or (self._dirs and
773 any(not m._isempty() for m in self._dirs.values())):
773 any(not m._isempty() for m in self._dirs.values())):
774 return False
774 return False
775 self._loadalllazy()
775 self._loadalllazy()
776 return (not self._dirs or
776 return (not self._dirs or
777 all(m._isempty() for m in self._dirs.values()))
777 all(m._isempty() for m in self._dirs.values()))
778
778
779 def __repr__(self):
779 def __repr__(self):
780 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
780 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
781 (self._dir, hex(self._node),
781 (self._dir, hex(self._node),
782 bool(self._loadfunc is _noop),
782 bool(self._loadfunc is _noop),
783 self._dirty, id(self)))
783 self._dirty, id(self)))
784
784
785 def dir(self):
785 def dir(self):
786 '''The directory that this tree manifest represents, including a
786 '''The directory that this tree manifest represents, including a
787 trailing '/'. Empty string for the repo root directory.'''
787 trailing '/'. Empty string for the repo root directory.'''
788 return self._dir
788 return self._dir
789
789
790 def node(self):
790 def node(self):
791 '''This node of this instance. nullid for unsaved instances. Should
791 '''This node of this instance. nullid for unsaved instances. Should
792 be updated when the instance is read or written from a revlog.
792 be updated when the instance is read or written from a revlog.
793 '''
793 '''
794 assert not self._dirty
794 assert not self._dirty
795 return self._node
795 return self._node
796
796
797 def setnode(self, node):
797 def setnode(self, node):
798 self._node = node
798 self._node = node
799 self._dirty = False
799 self._dirty = False
800
800
801 def iterentries(self):
801 def iterentries(self):
802 self._load()
802 self._load()
803 self._loadalllazy()
803 self._loadalllazy()
804 for p, n in sorted(itertools.chain(self._dirs.items(),
804 for p, n in sorted(itertools.chain(self._dirs.items(),
805 self._files.items())):
805 self._files.items())):
806 if p in self._files:
806 if p in self._files:
807 yield self._subpath(p), n, self._flags.get(p, '')
807 yield self._subpath(p), n, self._flags.get(p, '')
808 else:
808 else:
809 for x in n.iterentries():
809 for x in n.iterentries():
810 yield x
810 yield x
811
811
812 def items(self):
812 def items(self):
813 self._load()
813 self._load()
814 self._loadalllazy()
814 self._loadalllazy()
815 for p, n in sorted(itertools.chain(self._dirs.items(),
815 for p, n in sorted(itertools.chain(self._dirs.items(),
816 self._files.items())):
816 self._files.items())):
817 if p in self._files:
817 if p in self._files:
818 yield self._subpath(p), n
818 yield self._subpath(p), n
819 else:
819 else:
820 for f, sn in n.iteritems():
820 for f, sn in n.iteritems():
821 yield f, sn
821 yield f, sn
822
822
823 iteritems = items
823 iteritems = items
824
824
825 def iterkeys(self):
825 def iterkeys(self):
826 self._load()
826 self._load()
827 self._loadalllazy()
827 self._loadalllazy()
828 for p in sorted(itertools.chain(self._dirs, self._files)):
828 for p in sorted(itertools.chain(self._dirs, self._files)):
829 if p in self._files:
829 if p in self._files:
830 yield self._subpath(p)
830 yield self._subpath(p)
831 else:
831 else:
832 for f in self._dirs[p]:
832 for f in self._dirs[p]:
833 yield f
833 yield f
834
834
835 def keys(self):
835 def keys(self):
836 return list(self.iterkeys())
836 return list(self.iterkeys())
837
837
838 def __iter__(self):
838 def __iter__(self):
839 return self.iterkeys()
839 return self.iterkeys()
840
840
841 def __contains__(self, f):
841 def __contains__(self, f):
842 if f is None:
842 if f is None:
843 return False
843 return False
844 self._load()
844 self._load()
845 dir, subpath = _splittopdir(f)
845 dir, subpath = _splittopdir(f)
846 if dir:
846 if dir:
847 self._loadlazy(dir)
847 self._loadlazy(dir)
848
848
849 if dir not in self._dirs:
849 if dir not in self._dirs:
850 return False
850 return False
851
851
852 return self._dirs[dir].__contains__(subpath)
852 return self._dirs[dir].__contains__(subpath)
853 else:
853 else:
854 return f in self._files
854 return f in self._files
855
855
856 def get(self, f, default=None):
856 def get(self, f, default=None):
857 self._load()
857 self._load()
858 dir, subpath = _splittopdir(f)
858 dir, subpath = _splittopdir(f)
859 if dir:
859 if dir:
860 self._loadlazy(dir)
860 self._loadlazy(dir)
861
861
862 if dir not in self._dirs:
862 if dir not in self._dirs:
863 return default
863 return default
864 return self._dirs[dir].get(subpath, default)
864 return self._dirs[dir].get(subpath, default)
865 else:
865 else:
866 return self._files.get(f, default)
866 return self._files.get(f, default)
867
867
868 def __getitem__(self, f):
868 def __getitem__(self, f):
869 self._load()
869 self._load()
870 dir, subpath = _splittopdir(f)
870 dir, subpath = _splittopdir(f)
871 if dir:
871 if dir:
872 self._loadlazy(dir)
872 self._loadlazy(dir)
873
873
874 return self._dirs[dir].__getitem__(subpath)
874 return self._dirs[dir].__getitem__(subpath)
875 else:
875 else:
876 return self._files[f]
876 return self._files[f]
877
877
878 def flags(self, f):
878 def flags(self, f):
879 self._load()
879 self._load()
880 dir, subpath = _splittopdir(f)
880 dir, subpath = _splittopdir(f)
881 if dir:
881 if dir:
882 self._loadlazy(dir)
882 self._loadlazy(dir)
883
883
884 if dir not in self._dirs:
884 if dir not in self._dirs:
885 return ''
885 return ''
886 return self._dirs[dir].flags(subpath)
886 return self._dirs[dir].flags(subpath)
887 else:
887 else:
888 if f in self._lazydirs or f in self._dirs:
888 if f in self._lazydirs or f in self._dirs:
889 return ''
889 return ''
890 return self._flags.get(f, '')
890 return self._flags.get(f, '')
891
891
892 def find(self, f):
892 def find(self, f):
893 self._load()
893 self._load()
894 dir, subpath = _splittopdir(f)
894 dir, subpath = _splittopdir(f)
895 if dir:
895 if dir:
896 self._loadlazy(dir)
896 self._loadlazy(dir)
897
897
898 return self._dirs[dir].find(subpath)
898 return self._dirs[dir].find(subpath)
899 else:
899 else:
900 return self._files[f], self._flags.get(f, '')
900 return self._files[f], self._flags.get(f, '')
901
901
902 def __delitem__(self, f):
902 def __delitem__(self, f):
903 self._load()
903 self._load()
904 dir, subpath = _splittopdir(f)
904 dir, subpath = _splittopdir(f)
905 if dir:
905 if dir:
906 self._loadlazy(dir)
906 self._loadlazy(dir)
907
907
908 self._dirs[dir].__delitem__(subpath)
908 self._dirs[dir].__delitem__(subpath)
909 # If the directory is now empty, remove it
909 # If the directory is now empty, remove it
910 if self._dirs[dir]._isempty():
910 if self._dirs[dir]._isempty():
911 del self._dirs[dir]
911 del self._dirs[dir]
912 else:
912 else:
913 del self._files[f]
913 del self._files[f]
914 if f in self._flags:
914 if f in self._flags:
915 del self._flags[f]
915 del self._flags[f]
916 self._dirty = True
916 self._dirty = True
917
917
918 def __setitem__(self, f, n):
918 def __setitem__(self, f, n):
919 assert n is not None
919 assert n is not None
920 self._load()
920 self._load()
921 dir, subpath = _splittopdir(f)
921 dir, subpath = _splittopdir(f)
922 if dir:
922 if dir:
923 self._loadlazy(dir)
923 self._loadlazy(dir)
924 if dir not in self._dirs:
924 if dir not in self._dirs:
925 self._dirs[dir] = treemanifest(self._subpath(dir))
925 self._dirs[dir] = treemanifest(self._subpath(dir))
926 self._dirs[dir].__setitem__(subpath, n)
926 self._dirs[dir].__setitem__(subpath, n)
927 else:
927 else:
928 self._files[f] = n[:21] # to match manifestdict's behavior
928 self._files[f] = n[:21] # to match manifestdict's behavior
929 self._dirty = True
929 self._dirty = True
930
930
931 def _load(self):
931 def _load(self):
932 if self._loadfunc is not _noop:
932 if self._loadfunc is not _noop:
933 lf, self._loadfunc = self._loadfunc, _noop
933 lf, self._loadfunc = self._loadfunc, _noop
934 lf(self)
934 lf(self)
935 elif self._copyfunc is not _noop:
935 elif self._copyfunc is not _noop:
936 cf, self._copyfunc = self._copyfunc, _noop
936 cf, self._copyfunc = self._copyfunc, _noop
937 cf(self)
937 cf(self)
938
938
939 def setflag(self, f, flags):
939 def setflag(self, f, flags):
940 """Set the flags (symlink, executable) for path f."""
940 """Set the flags (symlink, executable) for path f."""
941 self._load()
941 self._load()
942 dir, subpath = _splittopdir(f)
942 dir, subpath = _splittopdir(f)
943 if dir:
943 if dir:
944 self._loadlazy(dir)
944 self._loadlazy(dir)
945 if dir not in self._dirs:
945 if dir not in self._dirs:
946 self._dirs[dir] = treemanifest(self._subpath(dir))
946 self._dirs[dir] = treemanifest(self._subpath(dir))
947 self._dirs[dir].setflag(subpath, flags)
947 self._dirs[dir].setflag(subpath, flags)
948 else:
948 else:
949 self._flags[f] = flags
949 self._flags[f] = flags
950 self._dirty = True
950 self._dirty = True
951
951
952 def copy(self):
952 def copy(self):
953 copy = treemanifest(self._dir)
953 copy = treemanifest(self._dir)
954 copy._node = self._node
954 copy._node = self._node
955 copy._dirty = self._dirty
955 copy._dirty = self._dirty
956 if self._copyfunc is _noop:
956 if self._copyfunc is _noop:
957 def _copyfunc(s):
957 def _copyfunc(s):
958 self._load()
958 self._load()
959 s._lazydirs = {d: (p, n, r, True) for
959 s._lazydirs = {d: (p, n, r, True) for
960 d, (p, n, r, c) in self._lazydirs.iteritems()}
960 d, (p, n, r, c) in self._lazydirs.iteritems()}
961 sdirs = s._dirs
961 sdirs = s._dirs
962 for d, v in self._dirs.iteritems():
962 for d, v in self._dirs.iteritems():
963 sdirs[d] = v.copy()
963 sdirs[d] = v.copy()
964 s._files = dict.copy(self._files)
964 s._files = dict.copy(self._files)
965 s._flags = dict.copy(self._flags)
965 s._flags = dict.copy(self._flags)
966 if self._loadfunc is _noop:
966 if self._loadfunc is _noop:
967 _copyfunc(copy)
967 _copyfunc(copy)
968 else:
968 else:
969 copy._copyfunc = _copyfunc
969 copy._copyfunc = _copyfunc
970 else:
970 else:
971 copy._copyfunc = self._copyfunc
971 copy._copyfunc = self._copyfunc
972 return copy
972 return copy
973
973
974 def filesnotin(self, m2, match=None):
974 def filesnotin(self, m2, match=None):
975 '''Set of files in this manifest that are not in the other'''
975 '''Set of files in this manifest that are not in the other'''
976 if match and not match.always():
976 if match and not match.always():
977 m1 = self.matches(match)
977 m1 = self.matches(match)
978 m2 = m2.matches(match)
978 m2 = m2.matches(match)
979 return m1.filesnotin(m2)
979 return m1.filesnotin(m2)
980
980
981 files = set()
981 files = set()
982 def _filesnotin(t1, t2):
982 def _filesnotin(t1, t2):
983 if t1._node == t2._node and not t1._dirty and not t2._dirty:
983 if t1._node == t2._node and not t1._dirty and not t2._dirty:
984 return
984 return
985 t1._load()
985 t1._load()
986 t2._load()
986 t2._load()
987 self._loaddifflazy(t1, t2)
987 self._loaddifflazy(t1, t2)
988 for d, m1 in t1._dirs.iteritems():
988 for d, m1 in t1._dirs.iteritems():
989 if d in t2._dirs:
989 if d in t2._dirs:
990 m2 = t2._dirs[d]
990 m2 = t2._dirs[d]
991 _filesnotin(m1, m2)
991 _filesnotin(m1, m2)
992 else:
992 else:
993 files.update(m1.iterkeys())
993 files.update(m1.iterkeys())
994
994
995 for fn in t1._files:
995 for fn in t1._files:
996 if fn not in t2._files:
996 if fn not in t2._files:
997 files.add(t1._subpath(fn))
997 files.add(t1._subpath(fn))
998
998
999 _filesnotin(self, m2)
999 _filesnotin(self, m2)
1000 return files
1000 return files
1001
1001
1002 @propertycache
1002 @propertycache
1003 def _alldirs(self):
1003 def _alldirs(self):
1004 return util.dirs(self)
1004 return util.dirs(self)
1005
1005
1006 def dirs(self):
1006 def dirs(self):
1007 return self._alldirs
1007 return self._alldirs
1008
1008
1009 def hasdir(self, dir):
1009 def hasdir(self, dir):
1010 self._load()
1010 self._load()
1011 topdir, subdir = _splittopdir(dir)
1011 topdir, subdir = _splittopdir(dir)
1012 if topdir:
1012 if topdir:
1013 self._loadlazy(topdir)
1013 self._loadlazy(topdir)
1014 if topdir in self._dirs:
1014 if topdir in self._dirs:
1015 return self._dirs[topdir].hasdir(subdir)
1015 return self._dirs[topdir].hasdir(subdir)
1016 return False
1016 return False
1017 dirslash = dir + '/'
1017 dirslash = dir + '/'
1018 return dirslash in self._dirs or dirslash in self._lazydirs
1018 return dirslash in self._dirs or dirslash in self._lazydirs
1019
1019
1020 def walk(self, match):
1020 def walk(self, match):
1021 '''Generates matching file names.
1021 '''Generates matching file names.
1022
1022
1023 Equivalent to manifest.matches(match).iterkeys(), but without creating
1023 Equivalent to manifest.matches(match).iterkeys(), but without creating
1024 an entirely new manifest.
1024 an entirely new manifest.
1025
1025
1026 It also reports nonexistent files by marking them bad with match.bad().
1026 It also reports nonexistent files by marking them bad with match.bad().
1027 '''
1027 '''
1028 if match.always():
1028 if match.always():
1029 for f in iter(self):
1029 for f in iter(self):
1030 yield f
1030 yield f
1031 return
1031 return
1032
1032
1033 fset = set(match.files())
1033 fset = set(match.files())
1034
1034
1035 for fn in self._walk(match):
1035 for fn in self._walk(match):
1036 if fn in fset:
1036 if fn in fset:
1037 # specified pattern is the exact name
1037 # specified pattern is the exact name
1038 fset.remove(fn)
1038 fset.remove(fn)
1039 yield fn
1039 yield fn
1040
1040
1041 # for dirstate.walk, files=['.'] means "walk the whole tree".
1041 # for dirstate.walk, files=['.'] means "walk the whole tree".
1042 # follow that here, too
1042 # follow that here, too
1043 fset.discard('.')
1043 fset.discard('.')
1044
1044
1045 for fn in sorted(fset):
1045 for fn in sorted(fset):
1046 if not self.hasdir(fn):
1046 if not self.hasdir(fn):
1047 match.bad(fn, None)
1047 match.bad(fn, None)
1048
1048
1049 def _walk(self, match):
1049 def _walk(self, match):
1050 '''Recursively generates matching file names for walk().'''
1050 '''Recursively generates matching file names for walk().'''
1051 visit = match.visitchildrenset(self._dir[:-1] or '.')
1051 visit = match.visitchildrenset(self._dir[:-1] or '.')
1052 if not visit:
1052 if not visit:
1053 return
1053 return
1054
1054
1055 # yield this dir's files and walk its submanifests
1055 # yield this dir's files and walk its submanifests
1056 self._load()
1056 self._load()
1057 visit = self._loadchildrensetlazy(visit)
1057 visit = self._loadchildrensetlazy(visit)
1058 for p in sorted(list(self._dirs) + list(self._files)):
1058 for p in sorted(list(self._dirs) + list(self._files)):
1059 if p in self._files:
1059 if p in self._files:
1060 fullp = self._subpath(p)
1060 fullp = self._subpath(p)
1061 if match(fullp):
1061 if match(fullp):
1062 yield fullp
1062 yield fullp
1063 else:
1063 else:
1064 if not visit or p[:-1] in visit:
1064 if not visit or p[:-1] in visit:
1065 for f in self._dirs[p]._walk(match):
1065 for f in self._dirs[p]._walk(match):
1066 yield f
1066 yield f
1067
1067
1068 def matches(self, match):
1068 def matches(self, match):
1069 '''generate a new manifest filtered by the match argument'''
1069 '''generate a new manifest filtered by the match argument'''
1070 if match.always():
1070 if match.always():
1071 return self.copy()
1071 return self.copy()
1072
1072
1073 return self._matches(match)
1073 return self._matches(match)
1074
1074
1075 def _matches(self, match):
1075 def _matches(self, match):
1076 '''recursively generate a new manifest filtered by the match argument.
1076 '''recursively generate a new manifest filtered by the match argument.
1077 '''
1077 '''
1078
1078
1079 visit = match.visitchildrenset(self._dir[:-1] or '.')
1079 visit = match.visitchildrenset(self._dir[:-1] or '.')
1080 if visit == 'all':
1080 if visit == 'all':
1081 return self.copy()
1081 return self.copy()
1082 ret = treemanifest(self._dir)
1082 ret = treemanifest(self._dir)
1083 if not visit:
1083 if not visit:
1084 return ret
1084 return ret
1085
1085
1086 self._load()
1086 self._load()
1087 for fn in self._files:
1087 for fn in self._files:
1088 # While visitchildrenset *usually* lists only subdirs, this is
1088 # While visitchildrenset *usually* lists only subdirs, this is
1089 # actually up to the matcher and may have some files in the set().
1089 # actually up to the matcher and may have some files in the set().
1090 # If visit == 'this', we should obviously look at the files in this
1090 # If visit == 'this', we should obviously look at the files in this
1091 # directory; if visit is a set, and fn is in it, we should inspect
1091 # directory; if visit is a set, and fn is in it, we should inspect
1092 # fn (but no need to inspect things not in the set).
1092 # fn (but no need to inspect things not in the set).
1093 if visit != 'this' and fn not in visit:
1093 if visit != 'this' and fn not in visit:
1094 continue
1094 continue
1095 fullp = self._subpath(fn)
1095 fullp = self._subpath(fn)
1096 # visitchildrenset isn't perfect, we still need to call the regular
1096 # visitchildrenset isn't perfect, we still need to call the regular
1097 # matcher code to further filter results.
1097 # matcher code to further filter results.
1098 if not match(fullp):
1098 if not match(fullp):
1099 continue
1099 continue
1100 ret._files[fn] = self._files[fn]
1100 ret._files[fn] = self._files[fn]
1101 if fn in self._flags:
1101 if fn in self._flags:
1102 ret._flags[fn] = self._flags[fn]
1102 ret._flags[fn] = self._flags[fn]
1103
1103
1104 visit = self._loadchildrensetlazy(visit)
1104 visit = self._loadchildrensetlazy(visit)
1105 for dir, subm in self._dirs.iteritems():
1105 for dir, subm in self._dirs.iteritems():
1106 if visit and dir[:-1] not in visit:
1106 if visit and dir[:-1] not in visit:
1107 continue
1107 continue
1108 m = subm._matches(match)
1108 m = subm._matches(match)
1109 if not m._isempty():
1109 if not m._isempty():
1110 ret._dirs[dir] = m
1110 ret._dirs[dir] = m
1111
1111
1112 if not ret._isempty():
1112 if not ret._isempty():
1113 ret._dirty = True
1113 ret._dirty = True
1114 return ret
1114 return ret
1115
1115
1116 def diff(self, m2, match=None, clean=False):
1116 def diff(self, m2, match=None, clean=False):
1117 '''Finds changes between the current manifest and m2.
1117 '''Finds changes between the current manifest and m2.
1118
1118
1119 Args:
1119 Args:
1120 m2: the manifest to which this manifest should be compared.
1120 m2: the manifest to which this manifest should be compared.
1121 clean: if true, include files unchanged between these manifests
1121 clean: if true, include files unchanged between these manifests
1122 with a None value in the returned dictionary.
1122 with a None value in the returned dictionary.
1123
1123
1124 The result is returned as a dict with filename as key and
1124 The result is returned as a dict with filename as key and
1125 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1125 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1126 nodeid in the current/other manifest and fl1/fl2 is the flag
1126 nodeid in the current/other manifest and fl1/fl2 is the flag
1127 in the current/other manifest. Where the file does not exist,
1127 in the current/other manifest. Where the file does not exist,
1128 the nodeid will be None and the flags will be the empty
1128 the nodeid will be None and the flags will be the empty
1129 string.
1129 string.
1130 '''
1130 '''
1131 if match and not match.always():
1131 if match and not match.always():
1132 m1 = self.matches(match)
1132 m1 = self.matches(match)
1133 m2 = m2.matches(match)
1133 m2 = m2.matches(match)
1134 return m1.diff(m2, clean=clean)
1134 return m1.diff(m2, clean=clean)
1135 result = {}
1135 result = {}
1136 emptytree = treemanifest()
1136 emptytree = treemanifest()
1137
1137
1138 def _iterativediff(t1, t2, stack):
1138 def _iterativediff(t1, t2, stack):
1139 """compares two tree manifests and append new tree-manifests which
1139 """compares two tree manifests and append new tree-manifests which
1140 needs to be compared to stack"""
1140 needs to be compared to stack"""
1141 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1141 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1142 return
1142 return
1143 t1._load()
1143 t1._load()
1144 t2._load()
1144 t2._load()
1145 self._loaddifflazy(t1, t2)
1145 self._loaddifflazy(t1, t2)
1146
1146
1147 for d, m1 in t1._dirs.iteritems():
1147 for d, m1 in t1._dirs.iteritems():
1148 m2 = t2._dirs.get(d, emptytree)
1148 m2 = t2._dirs.get(d, emptytree)
1149 stack.append((m1, m2))
1149 stack.append((m1, m2))
1150
1150
1151 for d, m2 in t2._dirs.iteritems():
1151 for d, m2 in t2._dirs.iteritems():
1152 if d not in t1._dirs:
1152 if d not in t1._dirs:
1153 stack.append((emptytree, m2))
1153 stack.append((emptytree, m2))
1154
1154
1155 for fn, n1 in t1._files.iteritems():
1155 for fn, n1 in t1._files.iteritems():
1156 fl1 = t1._flags.get(fn, '')
1156 fl1 = t1._flags.get(fn, '')
1157 n2 = t2._files.get(fn, None)
1157 n2 = t2._files.get(fn, None)
1158 fl2 = t2._flags.get(fn, '')
1158 fl2 = t2._flags.get(fn, '')
1159 if n1 != n2 or fl1 != fl2:
1159 if n1 != n2 or fl1 != fl2:
1160 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1160 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1161 elif clean:
1161 elif clean:
1162 result[t1._subpath(fn)] = None
1162 result[t1._subpath(fn)] = None
1163
1163
1164 for fn, n2 in t2._files.iteritems():
1164 for fn, n2 in t2._files.iteritems():
1165 if fn not in t1._files:
1165 if fn not in t1._files:
1166 fl2 = t2._flags.get(fn, '')
1166 fl2 = t2._flags.get(fn, '')
1167 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1167 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1168
1168
1169 stackls = []
1169 stackls = []
1170 _iterativediff(self, m2, stackls)
1170 _iterativediff(self, m2, stackls)
1171 while stackls:
1171 while stackls:
1172 t1, t2 = stackls.pop()
1172 t1, t2 = stackls.pop()
1173 # stackls is populated in the function call
1173 # stackls is populated in the function call
1174 _iterativediff(t1, t2, stackls)
1174 _iterativediff(t1, t2, stackls)
1175 return result
1175 return result
1176
1176
1177 def unmodifiedsince(self, m2):
1177 def unmodifiedsince(self, m2):
1178 return not self._dirty and not m2._dirty and self._node == m2._node
1178 return not self._dirty and not m2._dirty and self._node == m2._node
1179
1179
1180 def parse(self, text, readsubtree):
1180 def parse(self, text, readsubtree):
1181 selflazy = self._lazydirs
1181 selflazy = self._lazydirs
1182 subpath = self._subpath
1182 subpath = self._subpath
1183 for f, n, fl in _parse(text):
1183 for f, n, fl in _parse(text):
1184 if fl == 't':
1184 if fl == 't':
1185 f = f + '/'
1185 f = f + '/'
1186 # False below means "doesn't need to be copied" and can use the
1186 # False below means "doesn't need to be copied" and can use the
1187 # cached value from readsubtree directly.
1187 # cached value from readsubtree directly.
1188 selflazy[f] = (subpath(f), n, readsubtree, False)
1188 selflazy[f] = (subpath(f), n, readsubtree, False)
1189 elif '/' in f:
1189 elif '/' in f:
1190 # This is a flat manifest, so use __setitem__ and setflag rather
1190 # This is a flat manifest, so use __setitem__ and setflag rather
1191 # than assigning directly to _files and _flags, so we can
1191 # than assigning directly to _files and _flags, so we can
1192 # assign a path in a subdirectory, and to mark dirty (compared
1192 # assign a path in a subdirectory, and to mark dirty (compared
1193 # to nullid).
1193 # to nullid).
1194 self[f] = n
1194 self[f] = n
1195 if fl:
1195 if fl:
1196 self.setflag(f, fl)
1196 self.setflag(f, fl)
1197 else:
1197 else:
1198 # Assigning to _files and _flags avoids marking as dirty,
1198 # Assigning to _files and _flags avoids marking as dirty,
1199 # and should be a little faster.
1199 # and should be a little faster.
1200 self._files[f] = n
1200 self._files[f] = n
1201 if fl:
1201 if fl:
1202 self._flags[f] = fl
1202 self._flags[f] = fl
1203
1203
1204 def text(self):
1204 def text(self):
1205 """Get the full data of this manifest as a bytestring."""
1205 """Get the full data of this manifest as a bytestring."""
1206 self._load()
1206 self._load()
1207 return _text(self.iterentries())
1207 return _text(self.iterentries())
1208
1208
1209 def dirtext(self):
1209 def dirtext(self):
1210 """Get the full data of this directory as a bytestring. Make sure that
1210 """Get the full data of this directory as a bytestring. Make sure that
1211 any submanifests have been written first, so their nodeids are correct.
1211 any submanifests have been written first, so their nodeids are correct.
1212 """
1212 """
1213 self._load()
1213 self._load()
1214 flags = self.flags
1214 flags = self.flags
1215 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1215 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1216 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1216 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1217 files = [(f, self._files[f], flags(f)) for f in self._files]
1217 files = [(f, self._files[f], flags(f)) for f in self._files]
1218 return _text(sorted(dirs + files + lazydirs))
1218 return _text(sorted(dirs + files + lazydirs))
1219
1219
1220 def read(self, gettext, readsubtree):
1220 def read(self, gettext, readsubtree):
1221 def _load_for_read(s):
1221 def _load_for_read(s):
1222 s.parse(gettext(), readsubtree)
1222 s.parse(gettext(), readsubtree)
1223 s._dirty = False
1223 s._dirty = False
1224 self._loadfunc = _load_for_read
1224 self._loadfunc = _load_for_read
1225
1225
1226 def writesubtrees(self, m1, m2, writesubtree, match):
1226 def writesubtrees(self, m1, m2, writesubtree, match):
1227 self._load() # for consistency; should never have any effect here
1227 self._load() # for consistency; should never have any effect here
1228 m1._load()
1228 m1._load()
1229 m2._load()
1229 m2._load()
1230 emptytree = treemanifest()
1230 emptytree = treemanifest()
1231 def getnode(m, d):
1231 def getnode(m, d):
1232 ld = m._lazydirs.get(d)
1232 ld = m._lazydirs.get(d)
1233 if ld:
1233 if ld:
1234 return ld[1]
1234 return ld[1]
1235 return m._dirs.get(d, emptytree)._node
1235 return m._dirs.get(d, emptytree)._node
1236
1236
1237 # let's skip investigating things that `match` says we do not need.
1237 # let's skip investigating things that `match` says we do not need.
1238 visit = match.visitchildrenset(self._dir[:-1] or '.')
1238 visit = match.visitchildrenset(self._dir[:-1] or '.')
1239 visit = self._loadchildrensetlazy(visit)
1239 visit = self._loadchildrensetlazy(visit)
1240 if visit == 'this' or visit == 'all':
1240 if visit == 'this' or visit == 'all':
1241 visit = None
1241 visit = None
1242 for d, subm in self._dirs.iteritems():
1242 for d, subm in self._dirs.iteritems():
1243 if visit and d[:-1] not in visit:
1243 if visit and d[:-1] not in visit:
1244 continue
1244 continue
1245 subp1 = getnode(m1, d)
1245 subp1 = getnode(m1, d)
1246 subp2 = getnode(m2, d)
1246 subp2 = getnode(m2, d)
1247 if subp1 == nullid:
1247 if subp1 == nullid:
1248 subp1, subp2 = subp2, subp1
1248 subp1, subp2 = subp2, subp1
1249 writesubtree(subm, subp1, subp2, match)
1249 writesubtree(subm, subp1, subp2, match)
1250
1250
1251 def walksubtrees(self, matcher=None):
1251 def walksubtrees(self, matcher=None):
1252 """Returns an iterator of the subtrees of this manifest, including this
1252 """Returns an iterator of the subtrees of this manifest, including this
1253 manifest itself.
1253 manifest itself.
1254
1254
1255 If `matcher` is provided, it only returns subtrees that match.
1255 If `matcher` is provided, it only returns subtrees that match.
1256 """
1256 """
1257 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1257 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1258 return
1258 return
1259 if not matcher or matcher(self._dir[:-1]):
1259 if not matcher or matcher(self._dir[:-1]):
1260 yield self
1260 yield self
1261
1261
1262 self._load()
1262 self._load()
1263 # OPT: use visitchildrenset to avoid loading everything.
1263 # OPT: use visitchildrenset to avoid loading everything.
1264 self._loadalllazy()
1264 self._loadalllazy()
1265 for d, subm in self._dirs.iteritems():
1265 for d, subm in self._dirs.iteritems():
1266 for subtree in subm.walksubtrees(matcher=matcher):
1266 for subtree in subm.walksubtrees(matcher=matcher):
1267 yield subtree
1267 yield subtree
1268
1268
1269 class manifestfulltextcache(util.lrucachedict):
1269 class manifestfulltextcache(util.lrucachedict):
1270 """File-backed LRU cache for the manifest cache
1270 """File-backed LRU cache for the manifest cache
1271
1271
1272 File consists of entries, up to EOF:
1272 File consists of entries, up to EOF:
1273
1273
1274 - 20 bytes node, 4 bytes length, <length> manifest data
1274 - 20 bytes node, 4 bytes length, <length> manifest data
1275
1275
1276 These are written in reverse cache order (oldest to newest).
1276 These are written in reverse cache order (oldest to newest).
1277
1277
1278 """
1278 """
1279
1279
1280 _file = 'manifestfulltextcache'
1280 _file = 'manifestfulltextcache'
1281
1281
1282 def __init__(self, max):
1282 def __init__(self, max):
1283 super(manifestfulltextcache, self).__init__(max)
1283 super(manifestfulltextcache, self).__init__(max)
1284 self._dirty = False
1284 self._dirty = False
1285 self._read = False
1285 self._read = False
1286 self._opener = None
1286 self._opener = None
1287
1287
1288 def read(self):
1288 def read(self):
1289 if self._read or self._opener is None:
1289 if self._read or self._opener is None:
1290 return
1290 return
1291
1291
1292 try:
1292 try:
1293 with self._opener(self._file) as fp:
1293 with self._opener(self._file) as fp:
1294 set = super(manifestfulltextcache, self).__setitem__
1294 set = super(manifestfulltextcache, self).__setitem__
1295 # ignore trailing data, this is a cache, corruption is skipped
1295 # ignore trailing data, this is a cache, corruption is skipped
1296 while True:
1296 while True:
1297 node = fp.read(20)
1297 node = fp.read(20)
1298 if len(node) < 20:
1298 if len(node) < 20:
1299 break
1299 break
1300 try:
1300 try:
1301 size = struct.unpack('>L', fp.read(4))[0]
1301 size = struct.unpack('>L', fp.read(4))[0]
1302 except struct.error:
1302 except struct.error:
1303 break
1303 break
1304 value = bytearray(fp.read(size))
1304 value = bytearray(fp.read(size))
1305 if len(value) != size:
1305 if len(value) != size:
1306 break
1306 break
1307 set(node, value)
1307 set(node, value)
1308 except IOError:
1308 except IOError:
1309 # the file is allowed to be missing
1309 # the file is allowed to be missing
1310 pass
1310 pass
1311
1311
1312 self._read = True
1312 self._read = True
1313 self._dirty = False
1313 self._dirty = False
1314
1314
1315 def write(self):
1315 def write(self):
1316 if not self._dirty or self._opener is None:
1316 if not self._dirty or self._opener is None:
1317 return
1317 return
1318 # rotate backwards to the first used node
1318 # rotate backwards to the first used node
1319 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1319 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1320 ) as fp:
1320 ) as fp:
1321 node = self._head.prev
1321 node = self._head.prev
1322 while True:
1322 while True:
1323 if node.key in self._cache:
1323 if node.key in self._cache:
1324 fp.write(node.key)
1324 fp.write(node.key)
1325 fp.write(struct.pack('>L', len(node.value)))
1325 fp.write(struct.pack('>L', len(node.value)))
1326 fp.write(node.value)
1326 fp.write(node.value)
1327 if node is self._head:
1327 if node is self._head:
1328 break
1328 break
1329 node = node.prev
1329 node = node.prev
1330
1330
1331 def __len__(self):
1331 def __len__(self):
1332 if not self._read:
1332 if not self._read:
1333 self.read()
1333 self.read()
1334 return super(manifestfulltextcache, self).__len__()
1334 return super(manifestfulltextcache, self).__len__()
1335
1335
1336 def __contains__(self, k):
1336 def __contains__(self, k):
1337 if not self._read:
1337 if not self._read:
1338 self.read()
1338 self.read()
1339 return super(manifestfulltextcache, self).__contains__(k)
1339 return super(manifestfulltextcache, self).__contains__(k)
1340
1340
1341 def __iter__(self):
1341 def __iter__(self):
1342 if not self._read:
1342 if not self._read:
1343 self.read()
1343 self.read()
1344 return super(manifestfulltextcache, self).__iter__()
1344 return super(manifestfulltextcache, self).__iter__()
1345
1345
1346 def __getitem__(self, k):
1346 def __getitem__(self, k):
1347 if not self._read:
1347 if not self._read:
1348 self.read()
1348 self.read()
1349 # the cache lru order can change on read
1349 # the cache lru order can change on read
1350 setdirty = self._cache.get(k) is not self._head
1350 setdirty = self._cache.get(k) is not self._head
1351 value = super(manifestfulltextcache, self).__getitem__(k)
1351 value = super(manifestfulltextcache, self).__getitem__(k)
1352 if setdirty:
1352 if setdirty:
1353 self._dirty = True
1353 self._dirty = True
1354 return value
1354 return value
1355
1355
1356 def __setitem__(self, k, v):
1356 def __setitem__(self, k, v):
1357 if not self._read:
1357 if not self._read:
1358 self.read()
1358 self.read()
1359 super(manifestfulltextcache, self).__setitem__(k, v)
1359 super(manifestfulltextcache, self).__setitem__(k, v)
1360 self._dirty = True
1360 self._dirty = True
1361
1361
1362 def __delitem__(self, k):
1362 def __delitem__(self, k):
1363 if not self._read:
1363 if not self._read:
1364 self.read()
1364 self.read()
1365 super(manifestfulltextcache, self).__delitem__(k)
1365 super(manifestfulltextcache, self).__delitem__(k)
1366 self._dirty = True
1366 self._dirty = True
1367
1367
1368 def get(self, k, default=None):
1368 def get(self, k, default=None):
1369 if not self._read:
1369 if not self._read:
1370 self.read()
1370 self.read()
1371 return super(manifestfulltextcache, self).get(k, default=default)
1371 return super(manifestfulltextcache, self).get(k, default=default)
1372
1372
1373 def clear(self, clear_persisted_data=False):
1373 def clear(self, clear_persisted_data=False):
1374 super(manifestfulltextcache, self).clear()
1374 super(manifestfulltextcache, self).clear()
1375 if clear_persisted_data:
1375 if clear_persisted_data:
1376 self._dirty = True
1376 self._dirty = True
1377 self.write()
1377 self.write()
1378 self._read = False
1378 self._read = False
1379
1379
1380 @interfaceutil.implementer(repository.imanifeststorage)
1380 @interfaceutil.implementer(repository.imanifeststorage)
1381 class manifestrevlog(object):
1381 class manifestrevlog(object):
1382 '''A revlog that stores manifest texts. This is responsible for caching the
1382 '''A revlog that stores manifest texts. This is responsible for caching the
1383 full-text manifest contents.
1383 full-text manifest contents.
1384 '''
1384 '''
1385 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1385 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1386 treemanifest=False):
1386 treemanifest=False):
1387 """Constructs a new manifest revlog
1387 """Constructs a new manifest revlog
1388
1388
1389 `indexfile` - used by extensions to have two manifests at once, like
1389 `indexfile` - used by extensions to have two manifests at once, like
1390 when transitioning between flatmanifeset and treemanifests.
1390 when transitioning between flatmanifeset and treemanifests.
1391
1391
1392 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1392 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1393 options can also be used to make this a tree manifest revlog. The opener
1393 options can also be used to make this a tree manifest revlog. The opener
1394 option takes precedence, so if it is set to True, we ignore whatever
1394 option takes precedence, so if it is set to True, we ignore whatever
1395 value is passed in to the constructor.
1395 value is passed in to the constructor.
1396 """
1396 """
1397 # During normal operations, we expect to deal with not more than four
1397 # During normal operations, we expect to deal with not more than four
1398 # revs at a time (such as during commit --amend). When rebasing large
1398 # revs at a time (such as during commit --amend). When rebasing large
1399 # stacks of commits, the number can go up, hence the config knob below.
1399 # stacks of commits, the number can go up, hence the config knob below.
1400 cachesize = 4
1400 cachesize = 4
1401 optiontreemanifest = False
1401 optiontreemanifest = False
1402 opts = getattr(opener, 'options', None)
1402 opts = getattr(opener, 'options', None)
1403 if opts is not None:
1403 if opts is not None:
1404 cachesize = opts.get('manifestcachesize', cachesize)
1404 cachesize = opts.get('manifestcachesize', cachesize)
1405 optiontreemanifest = opts.get('treemanifest', False)
1405 optiontreemanifest = opts.get('treemanifest', False)
1406
1406
1407 self._treeondisk = optiontreemanifest or treemanifest
1407 self._treeondisk = optiontreemanifest or treemanifest
1408
1408
1409 self._fulltextcache = manifestfulltextcache(cachesize)
1409 self._fulltextcache = manifestfulltextcache(cachesize)
1410
1410
1411 if tree:
1411 if tree:
1412 assert self._treeondisk, 'opts is %r' % opts
1412 assert self._treeondisk, 'opts is %r' % opts
1413
1413
1414 if indexfile is None:
1414 if indexfile is None:
1415 indexfile = '00manifest.i'
1415 indexfile = '00manifest.i'
1416 if tree:
1416 if tree:
1417 indexfile = "meta/" + tree + indexfile
1417 indexfile = "meta/" + tree + indexfile
1418
1418
1419 self.tree = tree
1419 self.tree = tree
1420
1420
1421 # The dirlogcache is kept on the root manifest log
1421 # The dirlogcache is kept on the root manifest log
1422 if tree:
1422 if tree:
1423 self._dirlogcache = dirlogcache
1423 self._dirlogcache = dirlogcache
1424 else:
1424 else:
1425 self._dirlogcache = {'': self}
1425 self._dirlogcache = {'': self}
1426
1426
1427 self._revlog = revlog.revlog(opener, indexfile,
1427 self._revlog = revlog.revlog(opener, indexfile,
1428 # only root indexfile is cached
1428 # only root indexfile is cached
1429 checkambig=not bool(tree),
1429 checkambig=not bool(tree),
1430 mmaplargeindex=True)
1430 mmaplargeindex=True)
1431
1431
1432 self.index = self._revlog.index
1432 self.index = self._revlog.index
1433 self.version = self._revlog.version
1433 self.version = self._revlog.version
1434 self._generaldelta = self._revlog._generaldelta
1434 self._generaldelta = self._revlog._generaldelta
1435
1435
1436 def _setupmanifestcachehooks(self, repo):
1436 def _setupmanifestcachehooks(self, repo):
1437 """Persist the manifestfulltextcache on lock release"""
1437 """Persist the manifestfulltextcache on lock release"""
1438 if not util.safehasattr(repo, '_lockref'):
1438 if not util.safehasattr(repo, '_wlockref'):
1439 return
1439 return
1440
1440
1441 self._fulltextcache._opener = repo.cachevfs
1441 self._fulltextcache._opener = repo.cachevfs
1442 if repo._currentlock(repo._lockref) is None:
1442 if repo._currentlock(repo._wlockref) is None:
1443 return
1443 return
1444
1444
1445 reporef = weakref.ref(repo)
1445 reporef = weakref.ref(repo)
1446 manifestrevlogref = weakref.ref(self)
1446 manifestrevlogref = weakref.ref(self)
1447
1447
1448 def persistmanifestcache():
1448 def persistmanifestcache():
1449 repo = reporef()
1449 repo = reporef()
1450 self = manifestrevlogref()
1450 self = manifestrevlogref()
1451 if repo is None or self is None:
1451 if repo is None or self is None:
1452 return
1452 return
1453 if repo.manifestlog.getstorage(b'') is not self:
1453 if repo.manifestlog.getstorage(b'') is not self:
1454 # there's a different manifest in play now, abort
1454 # there's a different manifest in play now, abort
1455 return
1455 return
1456 self._fulltextcache.write()
1456 self._fulltextcache.write()
1457
1457
1458 repo._afterlock(persistmanifestcache)
1458 repo._afterlock(persistmanifestcache)
1459
1459
1460 @property
1460 @property
1461 def fulltextcache(self):
1461 def fulltextcache(self):
1462 return self._fulltextcache
1462 return self._fulltextcache
1463
1463
1464 def clearcaches(self, clear_persisted_data=False):
1464 def clearcaches(self, clear_persisted_data=False):
1465 self._revlog.clearcaches()
1465 self._revlog.clearcaches()
1466 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1466 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1467 self._dirlogcache = {self.tree: self}
1467 self._dirlogcache = {self.tree: self}
1468
1468
1469 def dirlog(self, d):
1469 def dirlog(self, d):
1470 if d:
1470 if d:
1471 assert self._treeondisk
1471 assert self._treeondisk
1472 if d not in self._dirlogcache:
1472 if d not in self._dirlogcache:
1473 mfrevlog = manifestrevlog(self.opener, d,
1473 mfrevlog = manifestrevlog(self.opener, d,
1474 self._dirlogcache,
1474 self._dirlogcache,
1475 treemanifest=self._treeondisk)
1475 treemanifest=self._treeondisk)
1476 self._dirlogcache[d] = mfrevlog
1476 self._dirlogcache[d] = mfrevlog
1477 return self._dirlogcache[d]
1477 return self._dirlogcache[d]
1478
1478
1479 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1479 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1480 match=None):
1480 match=None):
1481 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1481 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1482 # If our first parent is in the manifest cache, we can
1482 # If our first parent is in the manifest cache, we can
1483 # compute a delta here using properties we know about the
1483 # compute a delta here using properties we know about the
1484 # manifest up-front, which may save time later for the
1484 # manifest up-front, which may save time later for the
1485 # revlog layer.
1485 # revlog layer.
1486
1486
1487 _checkforbidden(added)
1487 _checkforbidden(added)
1488 # combine the changed lists into one sorted iterator
1488 # combine the changed lists into one sorted iterator
1489 work = heapq.merge([(x, False) for x in added],
1489 work = heapq.merge([(x, False) for x in added],
1490 [(x, True) for x in removed])
1490 [(x, True) for x in removed])
1491
1491
1492 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1492 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1493 cachedelta = self._revlog.rev(p1), deltatext
1493 cachedelta = self._revlog.rev(p1), deltatext
1494 text = util.buffer(arraytext)
1494 text = util.buffer(arraytext)
1495 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1495 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1496 cachedelta)
1496 cachedelta)
1497 else:
1497 else:
1498 # The first parent manifest isn't already loaded, so we'll
1498 # The first parent manifest isn't already loaded, so we'll
1499 # just encode a fulltext of the manifest and pass that
1499 # just encode a fulltext of the manifest and pass that
1500 # through to the revlog layer, and let it handle the delta
1500 # through to the revlog layer, and let it handle the delta
1501 # process.
1501 # process.
1502 if self._treeondisk:
1502 if self._treeondisk:
1503 assert readtree, "readtree must be set for treemanifest writes"
1503 assert readtree, "readtree must be set for treemanifest writes"
1504 assert match, "match must be specified for treemanifest writes"
1504 assert match, "match must be specified for treemanifest writes"
1505 m1 = readtree(self.tree, p1)
1505 m1 = readtree(self.tree, p1)
1506 m2 = readtree(self.tree, p2)
1506 m2 = readtree(self.tree, p2)
1507 n = self._addtree(m, transaction, link, m1, m2, readtree,
1507 n = self._addtree(m, transaction, link, m1, m2, readtree,
1508 match=match)
1508 match=match)
1509 arraytext = None
1509 arraytext = None
1510 else:
1510 else:
1511 text = m.text()
1511 text = m.text()
1512 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1512 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1513 arraytext = bytearray(text)
1513 arraytext = bytearray(text)
1514
1514
1515 if arraytext is not None:
1515 if arraytext is not None:
1516 self.fulltextcache[n] = arraytext
1516 self.fulltextcache[n] = arraytext
1517
1517
1518 return n
1518 return n
1519
1519
1520 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1520 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1521 # If the manifest is unchanged compared to one parent,
1521 # If the manifest is unchanged compared to one parent,
1522 # don't write a new revision
1522 # don't write a new revision
1523 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1523 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1524 m2)):
1524 m2)):
1525 return m.node()
1525 return m.node()
1526 def writesubtree(subm, subp1, subp2, match):
1526 def writesubtree(subm, subp1, subp2, match):
1527 sublog = self.dirlog(subm.dir())
1527 sublog = self.dirlog(subm.dir())
1528 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1528 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1529 readtree=readtree, match=match)
1529 readtree=readtree, match=match)
1530 m.writesubtrees(m1, m2, writesubtree, match)
1530 m.writesubtrees(m1, m2, writesubtree, match)
1531 text = m.dirtext()
1531 text = m.dirtext()
1532 n = None
1532 n = None
1533 if self.tree != '':
1533 if self.tree != '':
1534 # Double-check whether contents are unchanged to one parent
1534 # Double-check whether contents are unchanged to one parent
1535 if text == m1.dirtext():
1535 if text == m1.dirtext():
1536 n = m1.node()
1536 n = m1.node()
1537 elif text == m2.dirtext():
1537 elif text == m2.dirtext():
1538 n = m2.node()
1538 n = m2.node()
1539
1539
1540 if not n:
1540 if not n:
1541 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1541 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1542 m2.node())
1542 m2.node())
1543
1543
1544 # Save nodeid so parent manifest can calculate its nodeid
1544 # Save nodeid so parent manifest can calculate its nodeid
1545 m.setnode(n)
1545 m.setnode(n)
1546 return n
1546 return n
1547
1547
1548 def __len__(self):
1548 def __len__(self):
1549 return len(self._revlog)
1549 return len(self._revlog)
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 return self._revlog.__iter__()
1552 return self._revlog.__iter__()
1553
1553
1554 def rev(self, node):
1554 def rev(self, node):
1555 return self._revlog.rev(node)
1555 return self._revlog.rev(node)
1556
1556
1557 def node(self, rev):
1557 def node(self, rev):
1558 return self._revlog.node(rev)
1558 return self._revlog.node(rev)
1559
1559
1560 def lookup(self, value):
1560 def lookup(self, value):
1561 return self._revlog.lookup(value)
1561 return self._revlog.lookup(value)
1562
1562
1563 def parentrevs(self, rev):
1563 def parentrevs(self, rev):
1564 return self._revlog.parentrevs(rev)
1564 return self._revlog.parentrevs(rev)
1565
1565
1566 def parents(self, node):
1566 def parents(self, node):
1567 return self._revlog.parents(node)
1567 return self._revlog.parents(node)
1568
1568
1569 def linkrev(self, rev):
1569 def linkrev(self, rev):
1570 return self._revlog.linkrev(rev)
1570 return self._revlog.linkrev(rev)
1571
1571
1572 def checksize(self):
1572 def checksize(self):
1573 return self._revlog.checksize()
1573 return self._revlog.checksize()
1574
1574
1575 def revision(self, node, _df=None, raw=False):
1575 def revision(self, node, _df=None, raw=False):
1576 return self._revlog.revision(node, _df=_df, raw=raw)
1576 return self._revlog.revision(node, _df=_df, raw=raw)
1577
1577
1578 def revdiff(self, rev1, rev2):
1578 def revdiff(self, rev1, rev2):
1579 return self._revlog.revdiff(rev1, rev2)
1579 return self._revlog.revdiff(rev1, rev2)
1580
1580
1581 def cmp(self, node, text):
1581 def cmp(self, node, text):
1582 return self._revlog.cmp(node, text)
1582 return self._revlog.cmp(node, text)
1583
1583
1584 def deltaparent(self, rev):
1584 def deltaparent(self, rev):
1585 return self._revlog.deltaparent(rev)
1585 return self._revlog.deltaparent(rev)
1586
1586
1587 def emitrevisions(self, nodes, nodesorder=None,
1587 def emitrevisions(self, nodes, nodesorder=None,
1588 revisiondata=False, assumehaveparentrevisions=False,
1588 revisiondata=False, assumehaveparentrevisions=False,
1589 deltamode=repository.CG_DELTAMODE_STD):
1589 deltamode=repository.CG_DELTAMODE_STD):
1590 return self._revlog.emitrevisions(
1590 return self._revlog.emitrevisions(
1591 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1591 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1592 assumehaveparentrevisions=assumehaveparentrevisions,
1592 assumehaveparentrevisions=assumehaveparentrevisions,
1593 deltamode=deltamode)
1593 deltamode=deltamode)
1594
1594
1595 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1595 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1596 return self._revlog.addgroup(deltas, linkmapper, transaction,
1596 return self._revlog.addgroup(deltas, linkmapper, transaction,
1597 addrevisioncb=addrevisioncb)
1597 addrevisioncb=addrevisioncb)
1598
1598
1599 def rawsize(self, rev):
1599 def rawsize(self, rev):
1600 return self._revlog.rawsize(rev)
1600 return self._revlog.rawsize(rev)
1601
1601
1602 def getstrippoint(self, minlink):
1602 def getstrippoint(self, minlink):
1603 return self._revlog.getstrippoint(minlink)
1603 return self._revlog.getstrippoint(minlink)
1604
1604
1605 def strip(self, minlink, transaction):
1605 def strip(self, minlink, transaction):
1606 return self._revlog.strip(minlink, transaction)
1606 return self._revlog.strip(minlink, transaction)
1607
1607
1608 def files(self):
1608 def files(self):
1609 return self._revlog.files()
1609 return self._revlog.files()
1610
1610
1611 def clone(self, tr, destrevlog, **kwargs):
1611 def clone(self, tr, destrevlog, **kwargs):
1612 if not isinstance(destrevlog, manifestrevlog):
1612 if not isinstance(destrevlog, manifestrevlog):
1613 raise error.ProgrammingError('expected manifestrevlog to clone()')
1613 raise error.ProgrammingError('expected manifestrevlog to clone()')
1614
1614
1615 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1615 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1616
1616
1617 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1617 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1618 revisionscount=False, trackedsize=False,
1618 revisionscount=False, trackedsize=False,
1619 storedsize=False):
1619 storedsize=False):
1620 return self._revlog.storageinfo(
1620 return self._revlog.storageinfo(
1621 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1621 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1622 revisionscount=revisionscount, trackedsize=trackedsize,
1622 revisionscount=revisionscount, trackedsize=trackedsize,
1623 storedsize=storedsize)
1623 storedsize=storedsize)
1624
1624
1625 @property
1625 @property
1626 def indexfile(self):
1626 def indexfile(self):
1627 return self._revlog.indexfile
1627 return self._revlog.indexfile
1628
1628
1629 @indexfile.setter
1629 @indexfile.setter
1630 def indexfile(self, value):
1630 def indexfile(self, value):
1631 self._revlog.indexfile = value
1631 self._revlog.indexfile = value
1632
1632
1633 @property
1633 @property
1634 def opener(self):
1634 def opener(self):
1635 return self._revlog.opener
1635 return self._revlog.opener
1636
1636
1637 @opener.setter
1637 @opener.setter
1638 def opener(self, value):
1638 def opener(self, value):
1639 self._revlog.opener = value
1639 self._revlog.opener = value
1640
1640
1641 @interfaceutil.implementer(repository.imanifestlog)
1641 @interfaceutil.implementer(repository.imanifestlog)
1642 class manifestlog(object):
1642 class manifestlog(object):
1643 """A collection class representing the collection of manifest snapshots
1643 """A collection class representing the collection of manifest snapshots
1644 referenced by commits in the repository.
1644 referenced by commits in the repository.
1645
1645
1646 In this situation, 'manifest' refers to the abstract concept of a snapshot
1646 In this situation, 'manifest' refers to the abstract concept of a snapshot
1647 of the list of files in the given commit. Consumers of the output of this
1647 of the list of files in the given commit. Consumers of the output of this
1648 class do not care about the implementation details of the actual manifests
1648 class do not care about the implementation details of the actual manifests
1649 they receive (i.e. tree or flat or lazily loaded, etc)."""
1649 they receive (i.e. tree or flat or lazily loaded, etc)."""
1650 def __init__(self, opener, repo, rootstore, narrowmatch):
1650 def __init__(self, opener, repo, rootstore, narrowmatch):
1651 usetreemanifest = False
1651 usetreemanifest = False
1652 cachesize = 4
1652 cachesize = 4
1653
1653
1654 opts = getattr(opener, 'options', None)
1654 opts = getattr(opener, 'options', None)
1655 if opts is not None:
1655 if opts is not None:
1656 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1656 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1657 cachesize = opts.get('manifestcachesize', cachesize)
1657 cachesize = opts.get('manifestcachesize', cachesize)
1658
1658
1659 self._treemanifests = usetreemanifest
1659 self._treemanifests = usetreemanifest
1660
1660
1661 self._rootstore = rootstore
1661 self._rootstore = rootstore
1662 self._rootstore._setupmanifestcachehooks(repo)
1662 self._rootstore._setupmanifestcachehooks(repo)
1663 self._narrowmatch = narrowmatch
1663 self._narrowmatch = narrowmatch
1664
1664
1665 # A cache of the manifestctx or treemanifestctx for each directory
1665 # A cache of the manifestctx or treemanifestctx for each directory
1666 self._dirmancache = {}
1666 self._dirmancache = {}
1667 self._dirmancache[''] = util.lrucachedict(cachesize)
1667 self._dirmancache[''] = util.lrucachedict(cachesize)
1668
1668
1669 self._cachesize = cachesize
1669 self._cachesize = cachesize
1670
1670
1671 def __getitem__(self, node):
1671 def __getitem__(self, node):
1672 """Retrieves the manifest instance for the given node. Throws a
1672 """Retrieves the manifest instance for the given node. Throws a
1673 LookupError if not found.
1673 LookupError if not found.
1674 """
1674 """
1675 return self.get('', node)
1675 return self.get('', node)
1676
1676
1677 def get(self, tree, node, verify=True):
1677 def get(self, tree, node, verify=True):
1678 """Retrieves the manifest instance for the given node. Throws a
1678 """Retrieves the manifest instance for the given node. Throws a
1679 LookupError if not found.
1679 LookupError if not found.
1680
1680
1681 `verify` - if True an exception will be thrown if the node is not in
1681 `verify` - if True an exception will be thrown if the node is not in
1682 the revlog
1682 the revlog
1683 """
1683 """
1684 if node in self._dirmancache.get(tree, ()):
1684 if node in self._dirmancache.get(tree, ()):
1685 return self._dirmancache[tree][node]
1685 return self._dirmancache[tree][node]
1686
1686
1687 if not self._narrowmatch.always():
1687 if not self._narrowmatch.always():
1688 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1688 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1689 return excludeddirmanifestctx(tree, node)
1689 return excludeddirmanifestctx(tree, node)
1690 if tree:
1690 if tree:
1691 if self._rootstore._treeondisk:
1691 if self._rootstore._treeondisk:
1692 if verify:
1692 if verify:
1693 # Side-effect is LookupError is raised if node doesn't
1693 # Side-effect is LookupError is raised if node doesn't
1694 # exist.
1694 # exist.
1695 self.getstorage(tree).rev(node)
1695 self.getstorage(tree).rev(node)
1696
1696
1697 m = treemanifestctx(self, tree, node)
1697 m = treemanifestctx(self, tree, node)
1698 else:
1698 else:
1699 raise error.Abort(
1699 raise error.Abort(
1700 _("cannot ask for manifest directory '%s' in a flat "
1700 _("cannot ask for manifest directory '%s' in a flat "
1701 "manifest") % tree)
1701 "manifest") % tree)
1702 else:
1702 else:
1703 if verify:
1703 if verify:
1704 # Side-effect is LookupError is raised if node doesn't exist.
1704 # Side-effect is LookupError is raised if node doesn't exist.
1705 self._rootstore.rev(node)
1705 self._rootstore.rev(node)
1706
1706
1707 if self._treemanifests:
1707 if self._treemanifests:
1708 m = treemanifestctx(self, '', node)
1708 m = treemanifestctx(self, '', node)
1709 else:
1709 else:
1710 m = manifestctx(self, node)
1710 m = manifestctx(self, node)
1711
1711
1712 if node != nullid:
1712 if node != nullid:
1713 mancache = self._dirmancache.get(tree)
1713 mancache = self._dirmancache.get(tree)
1714 if not mancache:
1714 if not mancache:
1715 mancache = util.lrucachedict(self._cachesize)
1715 mancache = util.lrucachedict(self._cachesize)
1716 self._dirmancache[tree] = mancache
1716 self._dirmancache[tree] = mancache
1717 mancache[node] = m
1717 mancache[node] = m
1718 return m
1718 return m
1719
1719
1720 def getstorage(self, tree):
1720 def getstorage(self, tree):
1721 return self._rootstore.dirlog(tree)
1721 return self._rootstore.dirlog(tree)
1722
1722
1723 def clearcaches(self, clear_persisted_data=False):
1723 def clearcaches(self, clear_persisted_data=False):
1724 self._dirmancache.clear()
1724 self._dirmancache.clear()
1725 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1725 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1726
1726
1727 def rev(self, node):
1727 def rev(self, node):
1728 return self._rootstore.rev(node)
1728 return self._rootstore.rev(node)
1729
1729
1730 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1730 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1731 class memmanifestctx(object):
1731 class memmanifestctx(object):
1732 def __init__(self, manifestlog):
1732 def __init__(self, manifestlog):
1733 self._manifestlog = manifestlog
1733 self._manifestlog = manifestlog
1734 self._manifestdict = manifestdict()
1734 self._manifestdict = manifestdict()
1735
1735
1736 def _storage(self):
1736 def _storage(self):
1737 return self._manifestlog.getstorage(b'')
1737 return self._manifestlog.getstorage(b'')
1738
1738
1739 def new(self):
1739 def new(self):
1740 return memmanifestctx(self._manifestlog)
1740 return memmanifestctx(self._manifestlog)
1741
1741
1742 def copy(self):
1742 def copy(self):
1743 memmf = memmanifestctx(self._manifestlog)
1743 memmf = memmanifestctx(self._manifestlog)
1744 memmf._manifestdict = self.read().copy()
1744 memmf._manifestdict = self.read().copy()
1745 return memmf
1745 return memmf
1746
1746
1747 def read(self):
1747 def read(self):
1748 return self._manifestdict
1748 return self._manifestdict
1749
1749
1750 def write(self, transaction, link, p1, p2, added, removed, match=None):
1750 def write(self, transaction, link, p1, p2, added, removed, match=None):
1751 return self._storage().add(self._manifestdict, transaction, link,
1751 return self._storage().add(self._manifestdict, transaction, link,
1752 p1, p2, added, removed, match=match)
1752 p1, p2, added, removed, match=match)
1753
1753
1754 @interfaceutil.implementer(repository.imanifestrevisionstored)
1754 @interfaceutil.implementer(repository.imanifestrevisionstored)
1755 class manifestctx(object):
1755 class manifestctx(object):
1756 """A class representing a single revision of a manifest, including its
1756 """A class representing a single revision of a manifest, including its
1757 contents, its parent revs, and its linkrev.
1757 contents, its parent revs, and its linkrev.
1758 """
1758 """
1759 def __init__(self, manifestlog, node):
1759 def __init__(self, manifestlog, node):
1760 self._manifestlog = manifestlog
1760 self._manifestlog = manifestlog
1761 self._data = None
1761 self._data = None
1762
1762
1763 self._node = node
1763 self._node = node
1764
1764
1765 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1765 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1766 # but let's add it later when something needs it and we can load it
1766 # but let's add it later when something needs it and we can load it
1767 # lazily.
1767 # lazily.
1768 #self.p1, self.p2 = store.parents(node)
1768 #self.p1, self.p2 = store.parents(node)
1769 #rev = store.rev(node)
1769 #rev = store.rev(node)
1770 #self.linkrev = store.linkrev(rev)
1770 #self.linkrev = store.linkrev(rev)
1771
1771
1772 def _storage(self):
1772 def _storage(self):
1773 return self._manifestlog.getstorage(b'')
1773 return self._manifestlog.getstorage(b'')
1774
1774
1775 def node(self):
1775 def node(self):
1776 return self._node
1776 return self._node
1777
1777
1778 def new(self):
1778 def new(self):
1779 return memmanifestctx(self._manifestlog)
1779 return memmanifestctx(self._manifestlog)
1780
1780
1781 def copy(self):
1781 def copy(self):
1782 memmf = memmanifestctx(self._manifestlog)
1782 memmf = memmanifestctx(self._manifestlog)
1783 memmf._manifestdict = self.read().copy()
1783 memmf._manifestdict = self.read().copy()
1784 return memmf
1784 return memmf
1785
1785
1786 @propertycache
1786 @propertycache
1787 def parents(self):
1787 def parents(self):
1788 return self._storage().parents(self._node)
1788 return self._storage().parents(self._node)
1789
1789
1790 def read(self):
1790 def read(self):
1791 if self._data is None:
1791 if self._data is None:
1792 if self._node == nullid:
1792 if self._node == nullid:
1793 self._data = manifestdict()
1793 self._data = manifestdict()
1794 else:
1794 else:
1795 store = self._storage()
1795 store = self._storage()
1796 if self._node in store.fulltextcache:
1796 if self._node in store.fulltextcache:
1797 text = pycompat.bytestr(store.fulltextcache[self._node])
1797 text = pycompat.bytestr(store.fulltextcache[self._node])
1798 else:
1798 else:
1799 text = store.revision(self._node)
1799 text = store.revision(self._node)
1800 arraytext = bytearray(text)
1800 arraytext = bytearray(text)
1801 store.fulltextcache[self._node] = arraytext
1801 store.fulltextcache[self._node] = arraytext
1802 self._data = manifestdict(text)
1802 self._data = manifestdict(text)
1803 return self._data
1803 return self._data
1804
1804
1805 def readfast(self, shallow=False):
1805 def readfast(self, shallow=False):
1806 '''Calls either readdelta or read, based on which would be less work.
1806 '''Calls either readdelta or read, based on which would be less work.
1807 readdelta is called if the delta is against the p1, and therefore can be
1807 readdelta is called if the delta is against the p1, and therefore can be
1808 read quickly.
1808 read quickly.
1809
1809
1810 If `shallow` is True, nothing changes since this is a flat manifest.
1810 If `shallow` is True, nothing changes since this is a flat manifest.
1811 '''
1811 '''
1812 store = self._storage()
1812 store = self._storage()
1813 r = store.rev(self._node)
1813 r = store.rev(self._node)
1814 deltaparent = store.deltaparent(r)
1814 deltaparent = store.deltaparent(r)
1815 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1815 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1816 return self.readdelta()
1816 return self.readdelta()
1817 return self.read()
1817 return self.read()
1818
1818
1819 def readdelta(self, shallow=False):
1819 def readdelta(self, shallow=False):
1820 '''Returns a manifest containing just the entries that are present
1820 '''Returns a manifest containing just the entries that are present
1821 in this manifest, but not in its p1 manifest. This is efficient to read
1821 in this manifest, but not in its p1 manifest. This is efficient to read
1822 if the revlog delta is already p1.
1822 if the revlog delta is already p1.
1823
1823
1824 Changing the value of `shallow` has no effect on flat manifests.
1824 Changing the value of `shallow` has no effect on flat manifests.
1825 '''
1825 '''
1826 store = self._storage()
1826 store = self._storage()
1827 r = store.rev(self._node)
1827 r = store.rev(self._node)
1828 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1828 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1829 return manifestdict(d)
1829 return manifestdict(d)
1830
1830
1831 def find(self, key):
1831 def find(self, key):
1832 return self.read().find(key)
1832 return self.read().find(key)
1833
1833
1834 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1834 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1835 class memtreemanifestctx(object):
1835 class memtreemanifestctx(object):
1836 def __init__(self, manifestlog, dir=''):
1836 def __init__(self, manifestlog, dir=''):
1837 self._manifestlog = manifestlog
1837 self._manifestlog = manifestlog
1838 self._dir = dir
1838 self._dir = dir
1839 self._treemanifest = treemanifest()
1839 self._treemanifest = treemanifest()
1840
1840
1841 def _storage(self):
1841 def _storage(self):
1842 return self._manifestlog.getstorage(b'')
1842 return self._manifestlog.getstorage(b'')
1843
1843
1844 def new(self, dir=''):
1844 def new(self, dir=''):
1845 return memtreemanifestctx(self._manifestlog, dir=dir)
1845 return memtreemanifestctx(self._manifestlog, dir=dir)
1846
1846
1847 def copy(self):
1847 def copy(self):
1848 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1848 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1849 memmf._treemanifest = self._treemanifest.copy()
1849 memmf._treemanifest = self._treemanifest.copy()
1850 return memmf
1850 return memmf
1851
1851
1852 def read(self):
1852 def read(self):
1853 return self._treemanifest
1853 return self._treemanifest
1854
1854
1855 def write(self, transaction, link, p1, p2, added, removed, match=None):
1855 def write(self, transaction, link, p1, p2, added, removed, match=None):
1856 def readtree(dir, node):
1856 def readtree(dir, node):
1857 return self._manifestlog.get(dir, node).read()
1857 return self._manifestlog.get(dir, node).read()
1858 return self._storage().add(self._treemanifest, transaction, link,
1858 return self._storage().add(self._treemanifest, transaction, link,
1859 p1, p2, added, removed, readtree=readtree,
1859 p1, p2, added, removed, readtree=readtree,
1860 match=match)
1860 match=match)
1861
1861
1862 @interfaceutil.implementer(repository.imanifestrevisionstored)
1862 @interfaceutil.implementer(repository.imanifestrevisionstored)
1863 class treemanifestctx(object):
1863 class treemanifestctx(object):
1864 def __init__(self, manifestlog, dir, node):
1864 def __init__(self, manifestlog, dir, node):
1865 self._manifestlog = manifestlog
1865 self._manifestlog = manifestlog
1866 self._dir = dir
1866 self._dir = dir
1867 self._data = None
1867 self._data = None
1868
1868
1869 self._node = node
1869 self._node = node
1870
1870
1871 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1871 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1872 # we can instantiate treemanifestctx objects for directories we don't
1872 # we can instantiate treemanifestctx objects for directories we don't
1873 # have on disk.
1873 # have on disk.
1874 #self.p1, self.p2 = store.parents(node)
1874 #self.p1, self.p2 = store.parents(node)
1875 #rev = store.rev(node)
1875 #rev = store.rev(node)
1876 #self.linkrev = store.linkrev(rev)
1876 #self.linkrev = store.linkrev(rev)
1877
1877
1878 def _storage(self):
1878 def _storage(self):
1879 narrowmatch = self._manifestlog._narrowmatch
1879 narrowmatch = self._manifestlog._narrowmatch
1880 if not narrowmatch.always():
1880 if not narrowmatch.always():
1881 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1881 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1882 return excludedmanifestrevlog(self._dir)
1882 return excludedmanifestrevlog(self._dir)
1883 return self._manifestlog.getstorage(self._dir)
1883 return self._manifestlog.getstorage(self._dir)
1884
1884
1885 def read(self):
1885 def read(self):
1886 if self._data is None:
1886 if self._data is None:
1887 store = self._storage()
1887 store = self._storage()
1888 if self._node == nullid:
1888 if self._node == nullid:
1889 self._data = treemanifest()
1889 self._data = treemanifest()
1890 # TODO accessing non-public API
1890 # TODO accessing non-public API
1891 elif store._treeondisk:
1891 elif store._treeondisk:
1892 m = treemanifest(dir=self._dir)
1892 m = treemanifest(dir=self._dir)
1893 def gettext():
1893 def gettext():
1894 return store.revision(self._node)
1894 return store.revision(self._node)
1895 def readsubtree(dir, subm):
1895 def readsubtree(dir, subm):
1896 # Set verify to False since we need to be able to create
1896 # Set verify to False since we need to be able to create
1897 # subtrees for trees that don't exist on disk.
1897 # subtrees for trees that don't exist on disk.
1898 return self._manifestlog.get(dir, subm, verify=False).read()
1898 return self._manifestlog.get(dir, subm, verify=False).read()
1899 m.read(gettext, readsubtree)
1899 m.read(gettext, readsubtree)
1900 m.setnode(self._node)
1900 m.setnode(self._node)
1901 self._data = m
1901 self._data = m
1902 else:
1902 else:
1903 if self._node in store.fulltextcache:
1903 if self._node in store.fulltextcache:
1904 text = pycompat.bytestr(store.fulltextcache[self._node])
1904 text = pycompat.bytestr(store.fulltextcache[self._node])
1905 else:
1905 else:
1906 text = store.revision(self._node)
1906 text = store.revision(self._node)
1907 arraytext = bytearray(text)
1907 arraytext = bytearray(text)
1908 store.fulltextcache[self._node] = arraytext
1908 store.fulltextcache[self._node] = arraytext
1909 self._data = treemanifest(dir=self._dir, text=text)
1909 self._data = treemanifest(dir=self._dir, text=text)
1910
1910
1911 return self._data
1911 return self._data
1912
1912
1913 def node(self):
1913 def node(self):
1914 return self._node
1914 return self._node
1915
1915
1916 def new(self, dir=''):
1916 def new(self, dir=''):
1917 return memtreemanifestctx(self._manifestlog, dir=dir)
1917 return memtreemanifestctx(self._manifestlog, dir=dir)
1918
1918
1919 def copy(self):
1919 def copy(self):
1920 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1920 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1921 memmf._treemanifest = self.read().copy()
1921 memmf._treemanifest = self.read().copy()
1922 return memmf
1922 return memmf
1923
1923
1924 @propertycache
1924 @propertycache
1925 def parents(self):
1925 def parents(self):
1926 return self._storage().parents(self._node)
1926 return self._storage().parents(self._node)
1927
1927
1928 def readdelta(self, shallow=False):
1928 def readdelta(self, shallow=False):
1929 '''Returns a manifest containing just the entries that are present
1929 '''Returns a manifest containing just the entries that are present
1930 in this manifest, but not in its p1 manifest. This is efficient to read
1930 in this manifest, but not in its p1 manifest. This is efficient to read
1931 if the revlog delta is already p1.
1931 if the revlog delta is already p1.
1932
1932
1933 If `shallow` is True, this will read the delta for this directory,
1933 If `shallow` is True, this will read the delta for this directory,
1934 without recursively reading subdirectory manifests. Instead, any
1934 without recursively reading subdirectory manifests. Instead, any
1935 subdirectory entry will be reported as it appears in the manifest, i.e.
1935 subdirectory entry will be reported as it appears in the manifest, i.e.
1936 the subdirectory will be reported among files and distinguished only by
1936 the subdirectory will be reported among files and distinguished only by
1937 its 't' flag.
1937 its 't' flag.
1938 '''
1938 '''
1939 store = self._storage()
1939 store = self._storage()
1940 if shallow:
1940 if shallow:
1941 r = store.rev(self._node)
1941 r = store.rev(self._node)
1942 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1942 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1943 return manifestdict(d)
1943 return manifestdict(d)
1944 else:
1944 else:
1945 # Need to perform a slow delta
1945 # Need to perform a slow delta
1946 r0 = store.deltaparent(store.rev(self._node))
1946 r0 = store.deltaparent(store.rev(self._node))
1947 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1947 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1948 m1 = self.read()
1948 m1 = self.read()
1949 md = treemanifest(dir=self._dir)
1949 md = treemanifest(dir=self._dir)
1950 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1950 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1951 if n1:
1951 if n1:
1952 md[f] = n1
1952 md[f] = n1
1953 if fl1:
1953 if fl1:
1954 md.setflag(f, fl1)
1954 md.setflag(f, fl1)
1955 return md
1955 return md
1956
1956
1957 def readfast(self, shallow=False):
1957 def readfast(self, shallow=False):
1958 '''Calls either readdelta or read, based on which would be less work.
1958 '''Calls either readdelta or read, based on which would be less work.
1959 readdelta is called if the delta is against the p1, and therefore can be
1959 readdelta is called if the delta is against the p1, and therefore can be
1960 read quickly.
1960 read quickly.
1961
1961
1962 If `shallow` is True, it only returns the entries from this manifest,
1962 If `shallow` is True, it only returns the entries from this manifest,
1963 and not any submanifests.
1963 and not any submanifests.
1964 '''
1964 '''
1965 store = self._storage()
1965 store = self._storage()
1966 r = store.rev(self._node)
1966 r = store.rev(self._node)
1967 deltaparent = store.deltaparent(r)
1967 deltaparent = store.deltaparent(r)
1968 if (deltaparent != nullrev and
1968 if (deltaparent != nullrev and
1969 deltaparent in store.parentrevs(r)):
1969 deltaparent in store.parentrevs(r)):
1970 return self.readdelta(shallow=shallow)
1970 return self.readdelta(shallow=shallow)
1971
1971
1972 if shallow:
1972 if shallow:
1973 return manifestdict(store.revision(self._node))
1973 return manifestdict(store.revision(self._node))
1974 else:
1974 else:
1975 return self.read()
1975 return self.read()
1976
1976
1977 def find(self, key):
1977 def find(self, key):
1978 return self.read().find(key)
1978 return self.read().find(key)
1979
1979
1980 class excludeddir(treemanifest):
1980 class excludeddir(treemanifest):
1981 """Stand-in for a directory that is excluded from the repository.
1981 """Stand-in for a directory that is excluded from the repository.
1982
1982
1983 With narrowing active on a repository that uses treemanifests,
1983 With narrowing active on a repository that uses treemanifests,
1984 some of the directory revlogs will be excluded from the resulting
1984 some of the directory revlogs will be excluded from the resulting
1985 clone. This is a huge storage win for clients, but means we need
1985 clone. This is a huge storage win for clients, but means we need
1986 some sort of pseudo-manifest to surface to internals so we can
1986 some sort of pseudo-manifest to surface to internals so we can
1987 detect a merge conflict outside the narrowspec. That's what this
1987 detect a merge conflict outside the narrowspec. That's what this
1988 class is: it stands in for a directory whose node is known, but
1988 class is: it stands in for a directory whose node is known, but
1989 whose contents are unknown.
1989 whose contents are unknown.
1990 """
1990 """
1991 def __init__(self, dir, node):
1991 def __init__(self, dir, node):
1992 super(excludeddir, self).__init__(dir)
1992 super(excludeddir, self).__init__(dir)
1993 self._node = node
1993 self._node = node
1994 # Add an empty file, which will be included by iterators and such,
1994 # Add an empty file, which will be included by iterators and such,
1995 # appearing as the directory itself (i.e. something like "dir/")
1995 # appearing as the directory itself (i.e. something like "dir/")
1996 self._files[''] = node
1996 self._files[''] = node
1997 self._flags[''] = 't'
1997 self._flags[''] = 't'
1998
1998
1999 # Manifests outside the narrowspec should never be modified, so avoid
1999 # Manifests outside the narrowspec should never be modified, so avoid
2000 # copying. This makes a noticeable difference when there are very many
2000 # copying. This makes a noticeable difference when there are very many
2001 # directories outside the narrowspec. Also, it makes sense for the copy to
2001 # directories outside the narrowspec. Also, it makes sense for the copy to
2002 # be of the same type as the original, which would not happen with the
2002 # be of the same type as the original, which would not happen with the
2003 # super type's copy().
2003 # super type's copy().
2004 def copy(self):
2004 def copy(self):
2005 return self
2005 return self
2006
2006
2007 class excludeddirmanifestctx(treemanifestctx):
2007 class excludeddirmanifestctx(treemanifestctx):
2008 """context wrapper for excludeddir - see that docstring for rationale"""
2008 """context wrapper for excludeddir - see that docstring for rationale"""
2009 def __init__(self, dir, node):
2009 def __init__(self, dir, node):
2010 self._dir = dir
2010 self._dir = dir
2011 self._node = node
2011 self._node = node
2012
2012
2013 def read(self):
2013 def read(self):
2014 return excludeddir(self._dir, self._node)
2014 return excludeddir(self._dir, self._node)
2015
2015
2016 def write(self, *args):
2016 def write(self, *args):
2017 raise error.ProgrammingError(
2017 raise error.ProgrammingError(
2018 'attempt to write manifest from excluded dir %s' % self._dir)
2018 'attempt to write manifest from excluded dir %s' % self._dir)
2019
2019
2020 class excludedmanifestrevlog(manifestrevlog):
2020 class excludedmanifestrevlog(manifestrevlog):
2021 """Stand-in for excluded treemanifest revlogs.
2021 """Stand-in for excluded treemanifest revlogs.
2022
2022
2023 When narrowing is active on a treemanifest repository, we'll have
2023 When narrowing is active on a treemanifest repository, we'll have
2024 references to directories we can't see due to the revlog being
2024 references to directories we can't see due to the revlog being
2025 skipped. This class exists to conform to the manifestrevlog
2025 skipped. This class exists to conform to the manifestrevlog
2026 interface for those directories and proactively prevent writes to
2026 interface for those directories and proactively prevent writes to
2027 outside the narrowspec.
2027 outside the narrowspec.
2028 """
2028 """
2029
2029
2030 def __init__(self, dir):
2030 def __init__(self, dir):
2031 self._dir = dir
2031 self._dir = dir
2032
2032
2033 def __len__(self):
2033 def __len__(self):
2034 raise error.ProgrammingError(
2034 raise error.ProgrammingError(
2035 'attempt to get length of excluded dir %s' % self._dir)
2035 'attempt to get length of excluded dir %s' % self._dir)
2036
2036
2037 def rev(self, node):
2037 def rev(self, node):
2038 raise error.ProgrammingError(
2038 raise error.ProgrammingError(
2039 'attempt to get rev from excluded dir %s' % self._dir)
2039 'attempt to get rev from excluded dir %s' % self._dir)
2040
2040
2041 def linkrev(self, node):
2041 def linkrev(self, node):
2042 raise error.ProgrammingError(
2042 raise error.ProgrammingError(
2043 'attempt to get linkrev from excluded dir %s' % self._dir)
2043 'attempt to get linkrev from excluded dir %s' % self._dir)
2044
2044
2045 def node(self, rev):
2045 def node(self, rev):
2046 raise error.ProgrammingError(
2046 raise error.ProgrammingError(
2047 'attempt to get node from excluded dir %s' % self._dir)
2047 'attempt to get node from excluded dir %s' % self._dir)
2048
2048
2049 def add(self, *args, **kwargs):
2049 def add(self, *args, **kwargs):
2050 # We should never write entries in dirlogs outside the narrow clone.
2050 # We should never write entries in dirlogs outside the narrow clone.
2051 # However, the method still gets called from writesubtree() in
2051 # However, the method still gets called from writesubtree() in
2052 # _addtree(), so we need to handle it. We should possibly make that
2052 # _addtree(), so we need to handle it. We should possibly make that
2053 # avoid calling add() with a clean manifest (_dirty is always False
2053 # avoid calling add() with a clean manifest (_dirty is always False
2054 # in excludeddir instances).
2054 # in excludeddir instances).
2055 pass
2055 pass
@@ -1,1293 +1,1294 b''
1 #testcases sshv1 sshv2
1 #testcases sshv1 sshv2
2
2
3 #if sshv2
3 #if sshv2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [experimental]
5 > [experimental]
6 > sshpeer.advertise-v2 = true
6 > sshpeer.advertise-v2 = true
7 > sshserver.support-v2 = true
7 > sshserver.support-v2 = true
8 > EOF
8 > EOF
9 #endif
9 #endif
10
10
11 Prepare repo a:
11 Prepare repo a:
12
12
13 $ hg init a
13 $ hg init a
14 $ cd a
14 $ cd a
15 $ echo a > a
15 $ echo a > a
16 $ hg add a
16 $ hg add a
17 $ hg commit -m test
17 $ hg commit -m test
18 $ echo first line > b
18 $ echo first line > b
19 $ hg add b
19 $ hg add b
20
20
21 Create a non-inlined filelog:
21 Create a non-inlined filelog:
22
22
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 > cat data1 >> b
25 > cat data1 >> b
26 > hg commit -m test
26 > hg commit -m test
27 > done
27 > done
28
28
29 List files in store/data (should show a 'b.d'):
29 List files in store/data (should show a 'b.d'):
30
30
31 #if reporevlogstore
31 #if reporevlogstore
32 $ for i in .hg/store/data/*; do
32 $ for i in .hg/store/data/*; do
33 > echo $i
33 > echo $i
34 > done
34 > done
35 .hg/store/data/a.i
35 .hg/store/data/a.i
36 .hg/store/data/b.d
36 .hg/store/data/b.d
37 .hg/store/data/b.i
37 .hg/store/data/b.i
38 #endif
38 #endif
39
39
40 Trigger branchcache creation:
40 Trigger branchcache creation:
41
41
42 $ hg branches
42 $ hg branches
43 default 10:a7949464abda
43 default 10:a7949464abda
44 $ ls .hg/cache
44 $ ls .hg/cache
45 branch2-served
45 branch2-served
46 manifestfulltextcache (reporevlogstore !)
46 manifestfulltextcache (reporevlogstore !)
47 rbc-names-v1
47 rbc-names-v1
48 rbc-revs-v1
48 rbc-revs-v1
49
49
50 Default operation:
50 Default operation:
51
51
52 $ hg clone . ../b
52 $ hg clone . ../b
53 updating to branch default
53 updating to branch default
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ cd ../b
55 $ cd ../b
56
56
57 Ensure branchcache got copied over:
57 Ensure branchcache got copied over:
58
58
59 $ ls .hg/cache
59 $ ls .hg/cache
60 branch2-served
60 branch2-served
61 manifestfulltextcache
61 rbc-names-v1
62 rbc-names-v1
62 rbc-revs-v1
63 rbc-revs-v1
63
64
64 $ cat a
65 $ cat a
65 a
66 a
66 $ hg verify
67 $ hg verify
67 checking changesets
68 checking changesets
68 checking manifests
69 checking manifests
69 crosschecking files in changesets and manifests
70 crosschecking files in changesets and manifests
70 checking files
71 checking files
71 checked 11 changesets with 11 changes to 2 files
72 checked 11 changesets with 11 changes to 2 files
72
73
73 Invalid dest '' must abort:
74 Invalid dest '' must abort:
74
75
75 $ hg clone . ''
76 $ hg clone . ''
76 abort: empty destination path is not valid
77 abort: empty destination path is not valid
77 [255]
78 [255]
78
79
79 No update, with debug option:
80 No update, with debug option:
80
81
81 #if hardlink
82 #if hardlink
82 $ hg --debug clone -U . ../c --config progress.debug=true
83 $ hg --debug clone -U . ../c --config progress.debug=true
83 linking: 1 files
84 linking: 1 files
84 linking: 2 files
85 linking: 2 files
85 linking: 3 files
86 linking: 3 files
86 linking: 4 files
87 linking: 4 files
87 linking: 5 files
88 linking: 5 files
88 linking: 6 files
89 linking: 6 files
89 linking: 7 files
90 linking: 7 files
90 linking: 8 files
91 linking: 8 files
91 linked 8 files (reporevlogstore !)
92 linked 8 files (reporevlogstore !)
92 linking: 9 files (reposimplestore !)
93 linking: 9 files (reposimplestore !)
93 linking: 10 files (reposimplestore !)
94 linking: 10 files (reposimplestore !)
94 linking: 11 files (reposimplestore !)
95 linking: 11 files (reposimplestore !)
95 linking: 12 files (reposimplestore !)
96 linking: 12 files (reposimplestore !)
96 linking: 13 files (reposimplestore !)
97 linking: 13 files (reposimplestore !)
97 linking: 14 files (reposimplestore !)
98 linking: 14 files (reposimplestore !)
98 linking: 15 files (reposimplestore !)
99 linking: 15 files (reposimplestore !)
99 linking: 16 files (reposimplestore !)
100 linking: 16 files (reposimplestore !)
100 linking: 17 files (reposimplestore !)
101 linking: 17 files (reposimplestore !)
101 linking: 18 files (reposimplestore !)
102 linking: 18 files (reposimplestore !)
102 linked 18 files (reposimplestore !)
103 linked 18 files (reposimplestore !)
103 #else
104 #else
104 $ hg --debug clone -U . ../c --config progress.debug=true
105 $ hg --debug clone -U . ../c --config progress.debug=true
105 linking: 1 files
106 linking: 1 files
106 copying: 2 files
107 copying: 2 files
107 copying: 3 files
108 copying: 3 files
108 copying: 4 files
109 copying: 4 files
109 copying: 5 files
110 copying: 5 files
110 copying: 6 files
111 copying: 6 files
111 copying: 7 files
112 copying: 7 files
112 copying: 8 files
113 copying: 8 files
113 copied 8 files (reporevlogstore !)
114 copied 8 files (reporevlogstore !)
114 copying: 9 files (reposimplestore !)
115 copying: 9 files (reposimplestore !)
115 copying: 10 files (reposimplestore !)
116 copying: 10 files (reposimplestore !)
116 copying: 11 files (reposimplestore !)
117 copying: 11 files (reposimplestore !)
117 copying: 12 files (reposimplestore !)
118 copying: 12 files (reposimplestore !)
118 copying: 13 files (reposimplestore !)
119 copying: 13 files (reposimplestore !)
119 copying: 14 files (reposimplestore !)
120 copying: 14 files (reposimplestore !)
120 copying: 15 files (reposimplestore !)
121 copying: 15 files (reposimplestore !)
121 copying: 16 files (reposimplestore !)
122 copying: 16 files (reposimplestore !)
122 copying: 17 files (reposimplestore !)
123 copying: 17 files (reposimplestore !)
123 copying: 18 files (reposimplestore !)
124 copying: 18 files (reposimplestore !)
124 copied 18 files (reposimplestore !)
125 copied 18 files (reposimplestore !)
125 #endif
126 #endif
126 $ cd ../c
127 $ cd ../c
127
128
128 Ensure branchcache got copied over:
129 Ensure branchcache got copied over:
129
130
130 $ ls .hg/cache
131 $ ls .hg/cache
131 branch2-served
132 branch2-served
132 rbc-names-v1
133 rbc-names-v1
133 rbc-revs-v1
134 rbc-revs-v1
134
135
135 $ cat a 2>/dev/null || echo "a not present"
136 $ cat a 2>/dev/null || echo "a not present"
136 a not present
137 a not present
137 $ hg verify
138 $ hg verify
138 checking changesets
139 checking changesets
139 checking manifests
140 checking manifests
140 crosschecking files in changesets and manifests
141 crosschecking files in changesets and manifests
141 checking files
142 checking files
142 checked 11 changesets with 11 changes to 2 files
143 checked 11 changesets with 11 changes to 2 files
143
144
144 Default destination:
145 Default destination:
145
146
146 $ mkdir ../d
147 $ mkdir ../d
147 $ cd ../d
148 $ cd ../d
148 $ hg clone ../a
149 $ hg clone ../a
149 destination directory: a
150 destination directory: a
150 updating to branch default
151 updating to branch default
151 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 $ cd a
153 $ cd a
153 $ hg cat a
154 $ hg cat a
154 a
155 a
155 $ cd ../..
156 $ cd ../..
156
157
157 Check that we drop the 'file:' from the path before writing the .hgrc:
158 Check that we drop the 'file:' from the path before writing the .hgrc:
158
159
159 $ hg clone file:a e
160 $ hg clone file:a e
160 updating to branch default
161 updating to branch default
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 $ grep 'file:' e/.hg/hgrc
163 $ grep 'file:' e/.hg/hgrc
163 [1]
164 [1]
164
165
165 Check that path aliases are expanded:
166 Check that path aliases are expanded:
166
167
167 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
168 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
168 $ hg -R f showconfig paths.default
169 $ hg -R f showconfig paths.default
169 $TESTTMP/a#0
170 $TESTTMP/a#0
170
171
171 Use --pull:
172 Use --pull:
172
173
173 $ hg clone --pull a g
174 $ hg clone --pull a g
174 requesting all changes
175 requesting all changes
175 adding changesets
176 adding changesets
176 adding manifests
177 adding manifests
177 adding file changes
178 adding file changes
178 added 11 changesets with 11 changes to 2 files
179 added 11 changesets with 11 changes to 2 files
179 new changesets acb14030fe0a:a7949464abda
180 new changesets acb14030fe0a:a7949464abda
180 updating to branch default
181 updating to branch default
181 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 $ hg -R g verify
183 $ hg -R g verify
183 checking changesets
184 checking changesets
184 checking manifests
185 checking manifests
185 crosschecking files in changesets and manifests
186 crosschecking files in changesets and manifests
186 checking files
187 checking files
187 checked 11 changesets with 11 changes to 2 files
188 checked 11 changesets with 11 changes to 2 files
188
189
189 Invalid dest '' with --pull must abort (issue2528):
190 Invalid dest '' with --pull must abort (issue2528):
190
191
191 $ hg clone --pull a ''
192 $ hg clone --pull a ''
192 abort: empty destination path is not valid
193 abort: empty destination path is not valid
193 [255]
194 [255]
194
195
195 Clone to '.':
196 Clone to '.':
196
197
197 $ mkdir h
198 $ mkdir h
198 $ cd h
199 $ cd h
199 $ hg clone ../a .
200 $ hg clone ../a .
200 updating to branch default
201 updating to branch default
201 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 $ cd ..
203 $ cd ..
203
204
204
205
205 *** Tests for option -u ***
206 *** Tests for option -u ***
206
207
207 Adding some more history to repo a:
208 Adding some more history to repo a:
208
209
209 $ cd a
210 $ cd a
210 $ hg tag ref1
211 $ hg tag ref1
211 $ echo the quick brown fox >a
212 $ echo the quick brown fox >a
212 $ hg ci -m "hacked default"
213 $ hg ci -m "hacked default"
213 $ hg up ref1
214 $ hg up ref1
214 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
215 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
215 $ hg branch stable
216 $ hg branch stable
216 marked working directory as branch stable
217 marked working directory as branch stable
217 (branches are permanent and global, did you want a bookmark?)
218 (branches are permanent and global, did you want a bookmark?)
218 $ echo some text >a
219 $ echo some text >a
219 $ hg ci -m "starting branch stable"
220 $ hg ci -m "starting branch stable"
220 $ hg tag ref2
221 $ hg tag ref2
221 $ echo some more text >a
222 $ echo some more text >a
222 $ hg ci -m "another change for branch stable"
223 $ hg ci -m "another change for branch stable"
223 $ hg up ref2
224 $ hg up ref2
224 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 $ hg parents
226 $ hg parents
226 changeset: 13:e8ece76546a6
227 changeset: 13:e8ece76546a6
227 branch: stable
228 branch: stable
228 tag: ref2
229 tag: ref2
229 parent: 10:a7949464abda
230 parent: 10:a7949464abda
230 user: test
231 user: test
231 date: Thu Jan 01 00:00:00 1970 +0000
232 date: Thu Jan 01 00:00:00 1970 +0000
232 summary: starting branch stable
233 summary: starting branch stable
233
234
234
235
235 Repo a has two heads:
236 Repo a has two heads:
236
237
237 $ hg heads
238 $ hg heads
238 changeset: 15:0aae7cf88f0d
239 changeset: 15:0aae7cf88f0d
239 branch: stable
240 branch: stable
240 tag: tip
241 tag: tip
241 user: test
242 user: test
242 date: Thu Jan 01 00:00:00 1970 +0000
243 date: Thu Jan 01 00:00:00 1970 +0000
243 summary: another change for branch stable
244 summary: another change for branch stable
244
245
245 changeset: 12:f21241060d6a
246 changeset: 12:f21241060d6a
246 user: test
247 user: test
247 date: Thu Jan 01 00:00:00 1970 +0000
248 date: Thu Jan 01 00:00:00 1970 +0000
248 summary: hacked default
249 summary: hacked default
249
250
250
251
251 $ cd ..
252 $ cd ..
252
253
253
254
254 Testing --noupdate with --updaterev (must abort):
255 Testing --noupdate with --updaterev (must abort):
255
256
256 $ hg clone --noupdate --updaterev 1 a ua
257 $ hg clone --noupdate --updaterev 1 a ua
257 abort: cannot specify both --noupdate and --updaterev
258 abort: cannot specify both --noupdate and --updaterev
258 [255]
259 [255]
259
260
260
261
261 Testing clone -u:
262 Testing clone -u:
262
263
263 $ hg clone -u . a ua
264 $ hg clone -u . a ua
264 updating to branch stable
265 updating to branch stable
265 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
266
267
267 Repo ua has both heads:
268 Repo ua has both heads:
268
269
269 $ hg -R ua heads
270 $ hg -R ua heads
270 changeset: 15:0aae7cf88f0d
271 changeset: 15:0aae7cf88f0d
271 branch: stable
272 branch: stable
272 tag: tip
273 tag: tip
273 user: test
274 user: test
274 date: Thu Jan 01 00:00:00 1970 +0000
275 date: Thu Jan 01 00:00:00 1970 +0000
275 summary: another change for branch stable
276 summary: another change for branch stable
276
277
277 changeset: 12:f21241060d6a
278 changeset: 12:f21241060d6a
278 user: test
279 user: test
279 date: Thu Jan 01 00:00:00 1970 +0000
280 date: Thu Jan 01 00:00:00 1970 +0000
280 summary: hacked default
281 summary: hacked default
281
282
282
283
283 Same revision checked out in repo a and ua:
284 Same revision checked out in repo a and ua:
284
285
285 $ hg -R a parents --template "{node|short}\n"
286 $ hg -R a parents --template "{node|short}\n"
286 e8ece76546a6
287 e8ece76546a6
287 $ hg -R ua parents --template "{node|short}\n"
288 $ hg -R ua parents --template "{node|short}\n"
288 e8ece76546a6
289 e8ece76546a6
289
290
290 $ rm -r ua
291 $ rm -r ua
291
292
292
293
293 Testing clone --pull -u:
294 Testing clone --pull -u:
294
295
295 $ hg clone --pull -u . a ua
296 $ hg clone --pull -u . a ua
296 requesting all changes
297 requesting all changes
297 adding changesets
298 adding changesets
298 adding manifests
299 adding manifests
299 adding file changes
300 adding file changes
300 added 16 changesets with 16 changes to 3 files (+1 heads)
301 added 16 changesets with 16 changes to 3 files (+1 heads)
301 new changesets acb14030fe0a:0aae7cf88f0d
302 new changesets acb14030fe0a:0aae7cf88f0d
302 updating to branch stable
303 updating to branch stable
303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
304
305
305 Repo ua has both heads:
306 Repo ua has both heads:
306
307
307 $ hg -R ua heads
308 $ hg -R ua heads
308 changeset: 15:0aae7cf88f0d
309 changeset: 15:0aae7cf88f0d
309 branch: stable
310 branch: stable
310 tag: tip
311 tag: tip
311 user: test
312 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
313 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: another change for branch stable
314 summary: another change for branch stable
314
315
315 changeset: 12:f21241060d6a
316 changeset: 12:f21241060d6a
316 user: test
317 user: test
317 date: Thu Jan 01 00:00:00 1970 +0000
318 date: Thu Jan 01 00:00:00 1970 +0000
318 summary: hacked default
319 summary: hacked default
319
320
320
321
321 Same revision checked out in repo a and ua:
322 Same revision checked out in repo a and ua:
322
323
323 $ hg -R a parents --template "{node|short}\n"
324 $ hg -R a parents --template "{node|short}\n"
324 e8ece76546a6
325 e8ece76546a6
325 $ hg -R ua parents --template "{node|short}\n"
326 $ hg -R ua parents --template "{node|short}\n"
326 e8ece76546a6
327 e8ece76546a6
327
328
328 $ rm -r ua
329 $ rm -r ua
329
330
330
331
331 Testing clone -u <branch>:
332 Testing clone -u <branch>:
332
333
333 $ hg clone -u stable a ua
334 $ hg clone -u stable a ua
334 updating to branch stable
335 updating to branch stable
335 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336
337
337 Repo ua has both heads:
338 Repo ua has both heads:
338
339
339 $ hg -R ua heads
340 $ hg -R ua heads
340 changeset: 15:0aae7cf88f0d
341 changeset: 15:0aae7cf88f0d
341 branch: stable
342 branch: stable
342 tag: tip
343 tag: tip
343 user: test
344 user: test
344 date: Thu Jan 01 00:00:00 1970 +0000
345 date: Thu Jan 01 00:00:00 1970 +0000
345 summary: another change for branch stable
346 summary: another change for branch stable
346
347
347 changeset: 12:f21241060d6a
348 changeset: 12:f21241060d6a
348 user: test
349 user: test
349 date: Thu Jan 01 00:00:00 1970 +0000
350 date: Thu Jan 01 00:00:00 1970 +0000
350 summary: hacked default
351 summary: hacked default
351
352
352
353
353 Branch 'stable' is checked out:
354 Branch 'stable' is checked out:
354
355
355 $ hg -R ua parents
356 $ hg -R ua parents
356 changeset: 15:0aae7cf88f0d
357 changeset: 15:0aae7cf88f0d
357 branch: stable
358 branch: stable
358 tag: tip
359 tag: tip
359 user: test
360 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
361 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: another change for branch stable
362 summary: another change for branch stable
362
363
363
364
364 $ rm -r ua
365 $ rm -r ua
365
366
366
367
367 Testing default checkout:
368 Testing default checkout:
368
369
369 $ hg clone a ua
370 $ hg clone a ua
370 updating to branch default
371 updating to branch default
371 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
372 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
372
373
373 Repo ua has both heads:
374 Repo ua has both heads:
374
375
375 $ hg -R ua heads
376 $ hg -R ua heads
376 changeset: 15:0aae7cf88f0d
377 changeset: 15:0aae7cf88f0d
377 branch: stable
378 branch: stable
378 tag: tip
379 tag: tip
379 user: test
380 user: test
380 date: Thu Jan 01 00:00:00 1970 +0000
381 date: Thu Jan 01 00:00:00 1970 +0000
381 summary: another change for branch stable
382 summary: another change for branch stable
382
383
383 changeset: 12:f21241060d6a
384 changeset: 12:f21241060d6a
384 user: test
385 user: test
385 date: Thu Jan 01 00:00:00 1970 +0000
386 date: Thu Jan 01 00:00:00 1970 +0000
386 summary: hacked default
387 summary: hacked default
387
388
388
389
389 Branch 'default' is checked out:
390 Branch 'default' is checked out:
390
391
391 $ hg -R ua parents
392 $ hg -R ua parents
392 changeset: 12:f21241060d6a
393 changeset: 12:f21241060d6a
393 user: test
394 user: test
394 date: Thu Jan 01 00:00:00 1970 +0000
395 date: Thu Jan 01 00:00:00 1970 +0000
395 summary: hacked default
396 summary: hacked default
396
397
397 Test clone with a branch named "@" (issue3677)
398 Test clone with a branch named "@" (issue3677)
398
399
399 $ hg -R ua branch @
400 $ hg -R ua branch @
400 marked working directory as branch @
401 marked working directory as branch @
401 $ hg -R ua commit -m 'created branch @'
402 $ hg -R ua commit -m 'created branch @'
402 $ hg clone ua atbranch
403 $ hg clone ua atbranch
403 updating to branch default
404 updating to branch default
404 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
405 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
405 $ hg -R atbranch heads
406 $ hg -R atbranch heads
406 changeset: 16:798b6d97153e
407 changeset: 16:798b6d97153e
407 branch: @
408 branch: @
408 tag: tip
409 tag: tip
409 parent: 12:f21241060d6a
410 parent: 12:f21241060d6a
410 user: test
411 user: test
411 date: Thu Jan 01 00:00:00 1970 +0000
412 date: Thu Jan 01 00:00:00 1970 +0000
412 summary: created branch @
413 summary: created branch @
413
414
414 changeset: 15:0aae7cf88f0d
415 changeset: 15:0aae7cf88f0d
415 branch: stable
416 branch: stable
416 user: test
417 user: test
417 date: Thu Jan 01 00:00:00 1970 +0000
418 date: Thu Jan 01 00:00:00 1970 +0000
418 summary: another change for branch stable
419 summary: another change for branch stable
419
420
420 changeset: 12:f21241060d6a
421 changeset: 12:f21241060d6a
421 user: test
422 user: test
422 date: Thu Jan 01 00:00:00 1970 +0000
423 date: Thu Jan 01 00:00:00 1970 +0000
423 summary: hacked default
424 summary: hacked default
424
425
425 $ hg -R atbranch parents
426 $ hg -R atbranch parents
426 changeset: 12:f21241060d6a
427 changeset: 12:f21241060d6a
427 user: test
428 user: test
428 date: Thu Jan 01 00:00:00 1970 +0000
429 date: Thu Jan 01 00:00:00 1970 +0000
429 summary: hacked default
430 summary: hacked default
430
431
431
432
432 $ rm -r ua atbranch
433 $ rm -r ua atbranch
433
434
434
435
435 Testing #<branch>:
436 Testing #<branch>:
436
437
437 $ hg clone -u . a#stable ua
438 $ hg clone -u . a#stable ua
438 adding changesets
439 adding changesets
439 adding manifests
440 adding manifests
440 adding file changes
441 adding file changes
441 added 14 changesets with 14 changes to 3 files
442 added 14 changesets with 14 changes to 3 files
442 new changesets acb14030fe0a:0aae7cf88f0d
443 new changesets acb14030fe0a:0aae7cf88f0d
443 updating to branch stable
444 updating to branch stable
444 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
445 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
445
446
446 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
447 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
447
448
448 $ hg -R ua heads
449 $ hg -R ua heads
449 changeset: 13:0aae7cf88f0d
450 changeset: 13:0aae7cf88f0d
450 branch: stable
451 branch: stable
451 tag: tip
452 tag: tip
452 user: test
453 user: test
453 date: Thu Jan 01 00:00:00 1970 +0000
454 date: Thu Jan 01 00:00:00 1970 +0000
454 summary: another change for branch stable
455 summary: another change for branch stable
455
456
456 changeset: 10:a7949464abda
457 changeset: 10:a7949464abda
457 user: test
458 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
459 date: Thu Jan 01 00:00:00 1970 +0000
459 summary: test
460 summary: test
460
461
461
462
462 Same revision checked out in repo a and ua:
463 Same revision checked out in repo a and ua:
463
464
464 $ hg -R a parents --template "{node|short}\n"
465 $ hg -R a parents --template "{node|short}\n"
465 e8ece76546a6
466 e8ece76546a6
466 $ hg -R ua parents --template "{node|short}\n"
467 $ hg -R ua parents --template "{node|short}\n"
467 e8ece76546a6
468 e8ece76546a6
468
469
469 $ rm -r ua
470 $ rm -r ua
470
471
471
472
472 Testing -u -r <branch>:
473 Testing -u -r <branch>:
473
474
474 $ hg clone -u . -r stable a ua
475 $ hg clone -u . -r stable a ua
475 adding changesets
476 adding changesets
476 adding manifests
477 adding manifests
477 adding file changes
478 adding file changes
478 added 14 changesets with 14 changes to 3 files
479 added 14 changesets with 14 changes to 3 files
479 new changesets acb14030fe0a:0aae7cf88f0d
480 new changesets acb14030fe0a:0aae7cf88f0d
480 updating to branch stable
481 updating to branch stable
481 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
482 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
482
483
483 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
484 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
484
485
485 $ hg -R ua heads
486 $ hg -R ua heads
486 changeset: 13:0aae7cf88f0d
487 changeset: 13:0aae7cf88f0d
487 branch: stable
488 branch: stable
488 tag: tip
489 tag: tip
489 user: test
490 user: test
490 date: Thu Jan 01 00:00:00 1970 +0000
491 date: Thu Jan 01 00:00:00 1970 +0000
491 summary: another change for branch stable
492 summary: another change for branch stable
492
493
493 changeset: 10:a7949464abda
494 changeset: 10:a7949464abda
494 user: test
495 user: test
495 date: Thu Jan 01 00:00:00 1970 +0000
496 date: Thu Jan 01 00:00:00 1970 +0000
496 summary: test
497 summary: test
497
498
498
499
499 Same revision checked out in repo a and ua:
500 Same revision checked out in repo a and ua:
500
501
501 $ hg -R a parents --template "{node|short}\n"
502 $ hg -R a parents --template "{node|short}\n"
502 e8ece76546a6
503 e8ece76546a6
503 $ hg -R ua parents --template "{node|short}\n"
504 $ hg -R ua parents --template "{node|short}\n"
504 e8ece76546a6
505 e8ece76546a6
505
506
506 $ rm -r ua
507 $ rm -r ua
507
508
508
509
509 Testing -r <branch>:
510 Testing -r <branch>:
510
511
511 $ hg clone -r stable a ua
512 $ hg clone -r stable a ua
512 adding changesets
513 adding changesets
513 adding manifests
514 adding manifests
514 adding file changes
515 adding file changes
515 added 14 changesets with 14 changes to 3 files
516 added 14 changesets with 14 changes to 3 files
516 new changesets acb14030fe0a:0aae7cf88f0d
517 new changesets acb14030fe0a:0aae7cf88f0d
517 updating to branch stable
518 updating to branch stable
518 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
519
520
520 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
521 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
521
522
522 $ hg -R ua heads
523 $ hg -R ua heads
523 changeset: 13:0aae7cf88f0d
524 changeset: 13:0aae7cf88f0d
524 branch: stable
525 branch: stable
525 tag: tip
526 tag: tip
526 user: test
527 user: test
527 date: Thu Jan 01 00:00:00 1970 +0000
528 date: Thu Jan 01 00:00:00 1970 +0000
528 summary: another change for branch stable
529 summary: another change for branch stable
529
530
530 changeset: 10:a7949464abda
531 changeset: 10:a7949464abda
531 user: test
532 user: test
532 date: Thu Jan 01 00:00:00 1970 +0000
533 date: Thu Jan 01 00:00:00 1970 +0000
533 summary: test
534 summary: test
534
535
535
536
536 Branch 'stable' is checked out:
537 Branch 'stable' is checked out:
537
538
538 $ hg -R ua parents
539 $ hg -R ua parents
539 changeset: 13:0aae7cf88f0d
540 changeset: 13:0aae7cf88f0d
540 branch: stable
541 branch: stable
541 tag: tip
542 tag: tip
542 user: test
543 user: test
543 date: Thu Jan 01 00:00:00 1970 +0000
544 date: Thu Jan 01 00:00:00 1970 +0000
544 summary: another change for branch stable
545 summary: another change for branch stable
545
546
546
547
547 $ rm -r ua
548 $ rm -r ua
548
549
549
550
550 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
551 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
551 iterable in addbranchrevs()
552 iterable in addbranchrevs()
552
553
553 $ cat <<EOF > simpleclone.py
554 $ cat <<EOF > simpleclone.py
554 > from mercurial import hg, ui as uimod
555 > from mercurial import hg, ui as uimod
555 > myui = uimod.ui.load()
556 > myui = uimod.ui.load()
556 > repo = hg.repository(myui, b'a')
557 > repo = hg.repository(myui, b'a')
557 > hg.clone(myui, {}, repo, dest=b"ua")
558 > hg.clone(myui, {}, repo, dest=b"ua")
558 > EOF
559 > EOF
559
560
560 $ "$PYTHON" simpleclone.py
561 $ "$PYTHON" simpleclone.py
561 updating to branch default
562 updating to branch default
562 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
563 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
563
564
564 $ rm -r ua
565 $ rm -r ua
565
566
566 $ cat <<EOF > branchclone.py
567 $ cat <<EOF > branchclone.py
567 > from mercurial import extensions, hg, ui as uimod
568 > from mercurial import extensions, hg, ui as uimod
568 > myui = uimod.ui.load()
569 > myui = uimod.ui.load()
569 > extensions.loadall(myui)
570 > extensions.loadall(myui)
570 > extensions.populateui(myui)
571 > extensions.populateui(myui)
571 > repo = hg.repository(myui, b'a')
572 > repo = hg.repository(myui, b'a')
572 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
573 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
573 > EOF
574 > EOF
574
575
575 $ "$PYTHON" branchclone.py
576 $ "$PYTHON" branchclone.py
576 adding changesets
577 adding changesets
577 adding manifests
578 adding manifests
578 adding file changes
579 adding file changes
579 added 14 changesets with 14 changes to 3 files
580 added 14 changesets with 14 changes to 3 files
580 new changesets acb14030fe0a:0aae7cf88f0d
581 new changesets acb14030fe0a:0aae7cf88f0d
581 updating to branch stable
582 updating to branch stable
582 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
583 $ rm -r ua
584 $ rm -r ua
584
585
585
586
586 Test clone with special '@' bookmark:
587 Test clone with special '@' bookmark:
587 $ cd a
588 $ cd a
588 $ hg bookmark -r a7949464abda @ # branch point of stable from default
589 $ hg bookmark -r a7949464abda @ # branch point of stable from default
589 $ hg clone . ../i
590 $ hg clone . ../i
590 updating to bookmark @
591 updating to bookmark @
591 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
592 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
592 $ hg id -i ../i
593 $ hg id -i ../i
593 a7949464abda
594 a7949464abda
594 $ rm -r ../i
595 $ rm -r ../i
595
596
596 $ hg bookmark -f -r stable @
597 $ hg bookmark -f -r stable @
597 $ hg bookmarks
598 $ hg bookmarks
598 @ 15:0aae7cf88f0d
599 @ 15:0aae7cf88f0d
599 $ hg clone . ../i
600 $ hg clone . ../i
600 updating to bookmark @ on branch stable
601 updating to bookmark @ on branch stable
601 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
602 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
602 $ hg id -i ../i
603 $ hg id -i ../i
603 0aae7cf88f0d
604 0aae7cf88f0d
604 $ cd "$TESTTMP"
605 $ cd "$TESTTMP"
605
606
606
607
607 Testing failures:
608 Testing failures:
608
609
609 $ mkdir fail
610 $ mkdir fail
610 $ cd fail
611 $ cd fail
611
612
612 No local source
613 No local source
613
614
614 $ hg clone a b
615 $ hg clone a b
615 abort: repository a not found!
616 abort: repository a not found!
616 [255]
617 [255]
617
618
618 No remote source
619 No remote source
619
620
620 #if windows
621 #if windows
621 $ hg clone http://$LOCALIP:3121/a b
622 $ hg clone http://$LOCALIP:3121/a b
622 abort: error: * (glob)
623 abort: error: * (glob)
623 [255]
624 [255]
624 #else
625 #else
625 $ hg clone http://$LOCALIP:3121/a b
626 $ hg clone http://$LOCALIP:3121/a b
626 abort: error: *refused* (glob)
627 abort: error: *refused* (glob)
627 [255]
628 [255]
628 #endif
629 #endif
629 $ rm -rf b # work around bug with http clone
630 $ rm -rf b # work around bug with http clone
630
631
631
632
632 #if unix-permissions no-root
633 #if unix-permissions no-root
633
634
634 Inaccessible source
635 Inaccessible source
635
636
636 $ mkdir a
637 $ mkdir a
637 $ chmod 000 a
638 $ chmod 000 a
638 $ hg clone a b
639 $ hg clone a b
639 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
640 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
640 [255]
641 [255]
641
642
642 Inaccessible destination
643 Inaccessible destination
643
644
644 $ hg init b
645 $ hg init b
645 $ cd b
646 $ cd b
646 $ hg clone . ../a
647 $ hg clone . ../a
647 abort: Permission denied: *../a* (glob)
648 abort: Permission denied: *../a* (glob)
648 [255]
649 [255]
649 $ cd ..
650 $ cd ..
650 $ chmod 700 a
651 $ chmod 700 a
651 $ rm -r a b
652 $ rm -r a b
652
653
653 #endif
654 #endif
654
655
655
656
656 #if fifo
657 #if fifo
657
658
658 Source of wrong type
659 Source of wrong type
659
660
660 $ mkfifo a
661 $ mkfifo a
661 $ hg clone a b
662 $ hg clone a b
662 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
663 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
663 [255]
664 [255]
664 $ rm a
665 $ rm a
665
666
666 #endif
667 #endif
667
668
668 Default destination, same directory
669 Default destination, same directory
669
670
670 $ hg init q
671 $ hg init q
671 $ hg clone q
672 $ hg clone q
672 destination directory: q
673 destination directory: q
673 abort: destination 'q' is not empty
674 abort: destination 'q' is not empty
674 [255]
675 [255]
675
676
676 destination directory not empty
677 destination directory not empty
677
678
678 $ mkdir a
679 $ mkdir a
679 $ echo stuff > a/a
680 $ echo stuff > a/a
680 $ hg clone q a
681 $ hg clone q a
681 abort: destination 'a' is not empty
682 abort: destination 'a' is not empty
682 [255]
683 [255]
683
684
684
685
685 #if unix-permissions no-root
686 #if unix-permissions no-root
686
687
687 leave existing directory in place after clone failure
688 leave existing directory in place after clone failure
688
689
689 $ hg init c
690 $ hg init c
690 $ cd c
691 $ cd c
691 $ echo c > c
692 $ echo c > c
692 $ hg commit -A -m test
693 $ hg commit -A -m test
693 adding c
694 adding c
694 $ chmod -rx .hg/store/data
695 $ chmod -rx .hg/store/data
695 $ cd ..
696 $ cd ..
696 $ mkdir d
697 $ mkdir d
697 $ hg clone c d 2> err
698 $ hg clone c d 2> err
698 [255]
699 [255]
699 $ test -d d
700 $ test -d d
700 $ test -d d/.hg
701 $ test -d d/.hg
701 [1]
702 [1]
702
703
703 re-enable perm to allow deletion
704 re-enable perm to allow deletion
704
705
705 $ chmod +rx c/.hg/store/data
706 $ chmod +rx c/.hg/store/data
706
707
707 #endif
708 #endif
708
709
709 $ cd ..
710 $ cd ..
710
711
711 Test clone from the repository in (emulated) revlog format 0 (issue4203):
712 Test clone from the repository in (emulated) revlog format 0 (issue4203):
712
713
713 $ mkdir issue4203
714 $ mkdir issue4203
714 $ mkdir -p src/.hg
715 $ mkdir -p src/.hg
715 $ echo foo > src/foo
716 $ echo foo > src/foo
716 $ hg -R src add src/foo
717 $ hg -R src add src/foo
717 $ hg -R src commit -m '#0'
718 $ hg -R src commit -m '#0'
718 $ hg -R src log -q
719 $ hg -R src log -q
719 0:e1bab28bca43
720 0:e1bab28bca43
720 $ hg -R src debugrevlog -c | egrep 'format|flags'
721 $ hg -R src debugrevlog -c | egrep 'format|flags'
721 format : 0
722 format : 0
722 flags : (none)
723 flags : (none)
723 $ hg clone -U -q src dst
724 $ hg clone -U -q src dst
724 $ hg -R dst log -q
725 $ hg -R dst log -q
725 0:e1bab28bca43
726 0:e1bab28bca43
726
727
727 Create repositories to test auto sharing functionality
728 Create repositories to test auto sharing functionality
728
729
729 $ cat >> $HGRCPATH << EOF
730 $ cat >> $HGRCPATH << EOF
730 > [extensions]
731 > [extensions]
731 > share=
732 > share=
732 > EOF
733 > EOF
733
734
734 $ hg init empty
735 $ hg init empty
735 $ hg init source1a
736 $ hg init source1a
736 $ cd source1a
737 $ cd source1a
737 $ echo initial1 > foo
738 $ echo initial1 > foo
738 $ hg -q commit -A -m initial
739 $ hg -q commit -A -m initial
739 $ echo second > foo
740 $ echo second > foo
740 $ hg commit -m second
741 $ hg commit -m second
741 $ cd ..
742 $ cd ..
742
743
743 $ hg init filteredrev0
744 $ hg init filteredrev0
744 $ cd filteredrev0
745 $ cd filteredrev0
745 $ cat >> .hg/hgrc << EOF
746 $ cat >> .hg/hgrc << EOF
746 > [experimental]
747 > [experimental]
747 > evolution.createmarkers=True
748 > evolution.createmarkers=True
748 > EOF
749 > EOF
749 $ echo initial1 > foo
750 $ echo initial1 > foo
750 $ hg -q commit -A -m initial0
751 $ hg -q commit -A -m initial0
751 $ hg -q up -r null
752 $ hg -q up -r null
752 $ echo initial2 > foo
753 $ echo initial2 > foo
753 $ hg -q commit -A -m initial1
754 $ hg -q commit -A -m initial1
754 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
755 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
755 obsoleted 1 changesets
756 obsoleted 1 changesets
756 $ cd ..
757 $ cd ..
757
758
758 $ hg -q clone --pull source1a source1b
759 $ hg -q clone --pull source1a source1b
759 $ cd source1a
760 $ cd source1a
760 $ hg bookmark bookA
761 $ hg bookmark bookA
761 $ echo 1a > foo
762 $ echo 1a > foo
762 $ hg commit -m 1a
763 $ hg commit -m 1a
763 $ cd ../source1b
764 $ cd ../source1b
764 $ hg -q up -r 0
765 $ hg -q up -r 0
765 $ echo head1 > foo
766 $ echo head1 > foo
766 $ hg commit -m head1
767 $ hg commit -m head1
767 created new head
768 created new head
768 $ hg bookmark head1
769 $ hg bookmark head1
769 $ hg -q up -r 0
770 $ hg -q up -r 0
770 $ echo head2 > foo
771 $ echo head2 > foo
771 $ hg commit -m head2
772 $ hg commit -m head2
772 created new head
773 created new head
773 $ hg bookmark head2
774 $ hg bookmark head2
774 $ hg -q up -r 0
775 $ hg -q up -r 0
775 $ hg branch branch1
776 $ hg branch branch1
776 marked working directory as branch branch1
777 marked working directory as branch branch1
777 (branches are permanent and global, did you want a bookmark?)
778 (branches are permanent and global, did you want a bookmark?)
778 $ echo branch1 > foo
779 $ echo branch1 > foo
779 $ hg commit -m branch1
780 $ hg commit -m branch1
780 $ hg -q up -r 0
781 $ hg -q up -r 0
781 $ hg branch branch2
782 $ hg branch branch2
782 marked working directory as branch branch2
783 marked working directory as branch branch2
783 $ echo branch2 > foo
784 $ echo branch2 > foo
784 $ hg commit -m branch2
785 $ hg commit -m branch2
785 $ cd ..
786 $ cd ..
786 $ hg init source2
787 $ hg init source2
787 $ cd source2
788 $ cd source2
788 $ echo initial2 > foo
789 $ echo initial2 > foo
789 $ hg -q commit -A -m initial2
790 $ hg -q commit -A -m initial2
790 $ echo second > foo
791 $ echo second > foo
791 $ hg commit -m second
792 $ hg commit -m second
792 $ cd ..
793 $ cd ..
793
794
794 Clone with auto share from an empty repo should not result in share
795 Clone with auto share from an empty repo should not result in share
795
796
796 $ mkdir share
797 $ mkdir share
797 $ hg --config share.pool=share clone empty share-empty
798 $ hg --config share.pool=share clone empty share-empty
798 (not using pooled storage: remote appears to be empty)
799 (not using pooled storage: remote appears to be empty)
799 updating to branch default
800 updating to branch default
800 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
801 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
801 $ ls share
802 $ ls share
802 $ test -d share-empty/.hg/store
803 $ test -d share-empty/.hg/store
803 $ test -f share-empty/.hg/sharedpath
804 $ test -f share-empty/.hg/sharedpath
804 [1]
805 [1]
805
806
806 Clone with auto share from a repo with filtered revision 0 should not result in share
807 Clone with auto share from a repo with filtered revision 0 should not result in share
807
808
808 $ hg --config share.pool=share clone filteredrev0 share-filtered
809 $ hg --config share.pool=share clone filteredrev0 share-filtered
809 (not using pooled storage: unable to resolve identity of remote)
810 (not using pooled storage: unable to resolve identity of remote)
810 requesting all changes
811 requesting all changes
811 adding changesets
812 adding changesets
812 adding manifests
813 adding manifests
813 adding file changes
814 adding file changes
814 added 1 changesets with 1 changes to 1 files
815 added 1 changesets with 1 changes to 1 files
815 new changesets e082c1832e09
816 new changesets e082c1832e09
816 updating to branch default
817 updating to branch default
817 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
818 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
818
819
819 Clone from repo with content should result in shared store being created
820 Clone from repo with content should result in shared store being created
820
821
821 $ hg --config share.pool=share clone source1a share-dest1a
822 $ hg --config share.pool=share clone source1a share-dest1a
822 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
823 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
823 requesting all changes
824 requesting all changes
824 adding changesets
825 adding changesets
825 adding manifests
826 adding manifests
826 adding file changes
827 adding file changes
827 added 3 changesets with 3 changes to 1 files
828 added 3 changesets with 3 changes to 1 files
828 new changesets b5f04eac9d8f:e5bfe23c0b47
829 new changesets b5f04eac9d8f:e5bfe23c0b47
829 searching for changes
830 searching for changes
830 no changes found
831 no changes found
831 adding remote bookmark bookA
832 adding remote bookmark bookA
832 updating working directory
833 updating working directory
833 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
834 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
834
835
835 The shared repo should have been created
836 The shared repo should have been created
836
837
837 $ ls share
838 $ ls share
838 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
839 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
839
840
840 The destination should point to it
841 The destination should point to it
841
842
842 $ cat share-dest1a/.hg/sharedpath; echo
843 $ cat share-dest1a/.hg/sharedpath; echo
843 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
844 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
844
845
845 The destination should have bookmarks
846 The destination should have bookmarks
846
847
847 $ hg -R share-dest1a bookmarks
848 $ hg -R share-dest1a bookmarks
848 bookA 2:e5bfe23c0b47
849 bookA 2:e5bfe23c0b47
849
850
850 The default path should be the remote, not the share
851 The default path should be the remote, not the share
851
852
852 $ hg -R share-dest1a config paths.default
853 $ hg -R share-dest1a config paths.default
853 $TESTTMP/source1a
854 $TESTTMP/source1a
854
855
855 Clone with existing share dir should result in pull + share
856 Clone with existing share dir should result in pull + share
856
857
857 $ hg --config share.pool=share clone source1b share-dest1b
858 $ hg --config share.pool=share clone source1b share-dest1b
858 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
859 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
859 searching for changes
860 searching for changes
860 adding changesets
861 adding changesets
861 adding manifests
862 adding manifests
862 adding file changes
863 adding file changes
863 added 4 changesets with 4 changes to 1 files (+4 heads)
864 added 4 changesets with 4 changes to 1 files (+4 heads)
864 adding remote bookmark head1
865 adding remote bookmark head1
865 adding remote bookmark head2
866 adding remote bookmark head2
866 new changesets 4a8dc1ab4c13:6bacf4683960
867 new changesets 4a8dc1ab4c13:6bacf4683960
867 updating working directory
868 updating working directory
868 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
869 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
869
870
870 $ ls share
871 $ ls share
871 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
872 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
872
873
873 $ cat share-dest1b/.hg/sharedpath; echo
874 $ cat share-dest1b/.hg/sharedpath; echo
874 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
875
876
876 We only get bookmarks from the remote, not everything in the share
877 We only get bookmarks from the remote, not everything in the share
877
878
878 $ hg -R share-dest1b bookmarks
879 $ hg -R share-dest1b bookmarks
879 head1 3:4a8dc1ab4c13
880 head1 3:4a8dc1ab4c13
880 head2 4:99f71071f117
881 head2 4:99f71071f117
881
882
882 Default path should be source, not share.
883 Default path should be source, not share.
883
884
884 $ hg -R share-dest1b config paths.default
885 $ hg -R share-dest1b config paths.default
885 $TESTTMP/source1b
886 $TESTTMP/source1b
886
887
887 Checked out revision should be head of default branch
888 Checked out revision should be head of default branch
888
889
889 $ hg -R share-dest1b log -r .
890 $ hg -R share-dest1b log -r .
890 changeset: 4:99f71071f117
891 changeset: 4:99f71071f117
891 bookmark: head2
892 bookmark: head2
892 parent: 0:b5f04eac9d8f
893 parent: 0:b5f04eac9d8f
893 user: test
894 user: test
894 date: Thu Jan 01 00:00:00 1970 +0000
895 date: Thu Jan 01 00:00:00 1970 +0000
895 summary: head2
896 summary: head2
896
897
897
898
898 Clone from unrelated repo should result in new share
899 Clone from unrelated repo should result in new share
899
900
900 $ hg --config share.pool=share clone source2 share-dest2
901 $ hg --config share.pool=share clone source2 share-dest2
901 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
902 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
902 requesting all changes
903 requesting all changes
903 adding changesets
904 adding changesets
904 adding manifests
905 adding manifests
905 adding file changes
906 adding file changes
906 added 2 changesets with 2 changes to 1 files
907 added 2 changesets with 2 changes to 1 files
907 new changesets 22aeff664783:63cf6c3dba4a
908 new changesets 22aeff664783:63cf6c3dba4a
908 searching for changes
909 searching for changes
909 no changes found
910 no changes found
910 updating working directory
911 updating working directory
911 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
912 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
912
913
913 $ ls share
914 $ ls share
914 22aeff664783fd44c6d9b435618173c118c3448e
915 22aeff664783fd44c6d9b435618173c118c3448e
915 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
916 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
916
917
917 remote naming mode works as advertised
918 remote naming mode works as advertised
918
919
919 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
920 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
920 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
921 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
921 requesting all changes
922 requesting all changes
922 adding changesets
923 adding changesets
923 adding manifests
924 adding manifests
924 adding file changes
925 adding file changes
925 added 3 changesets with 3 changes to 1 files
926 added 3 changesets with 3 changes to 1 files
926 new changesets b5f04eac9d8f:e5bfe23c0b47
927 new changesets b5f04eac9d8f:e5bfe23c0b47
927 searching for changes
928 searching for changes
928 no changes found
929 no changes found
929 adding remote bookmark bookA
930 adding remote bookmark bookA
930 updating working directory
931 updating working directory
931 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
932 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
932
933
933 $ ls shareremote
934 $ ls shareremote
934 195bb1fcdb595c14a6c13e0269129ed78f6debde
935 195bb1fcdb595c14a6c13e0269129ed78f6debde
935
936
936 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
937 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
937 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
938 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
938 requesting all changes
939 requesting all changes
939 adding changesets
940 adding changesets
940 adding manifests
941 adding manifests
941 adding file changes
942 adding file changes
942 added 6 changesets with 6 changes to 1 files (+4 heads)
943 added 6 changesets with 6 changes to 1 files (+4 heads)
943 new changesets b5f04eac9d8f:6bacf4683960
944 new changesets b5f04eac9d8f:6bacf4683960
944 searching for changes
945 searching for changes
945 no changes found
946 no changes found
946 adding remote bookmark head1
947 adding remote bookmark head1
947 adding remote bookmark head2
948 adding remote bookmark head2
948 updating working directory
949 updating working directory
949 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
950 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
950
951
951 $ ls shareremote
952 $ ls shareremote
952 195bb1fcdb595c14a6c13e0269129ed78f6debde
953 195bb1fcdb595c14a6c13e0269129ed78f6debde
953 c0d4f83847ca2a873741feb7048a45085fd47c46
954 c0d4f83847ca2a873741feb7048a45085fd47c46
954
955
955 request to clone a single revision is respected in sharing mode
956 request to clone a single revision is respected in sharing mode
956
957
957 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
958 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
958 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
959 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
959 adding changesets
960 adding changesets
960 adding manifests
961 adding manifests
961 adding file changes
962 adding file changes
962 added 2 changesets with 2 changes to 1 files
963 added 2 changesets with 2 changes to 1 files
963 new changesets b5f04eac9d8f:4a8dc1ab4c13
964 new changesets b5f04eac9d8f:4a8dc1ab4c13
964 no changes found
965 no changes found
965 adding remote bookmark head1
966 adding remote bookmark head1
966 updating working directory
967 updating working directory
967 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
968 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
968
969
969 $ hg -R share-1arev log -G
970 $ hg -R share-1arev log -G
970 @ changeset: 1:4a8dc1ab4c13
971 @ changeset: 1:4a8dc1ab4c13
971 | bookmark: head1
972 | bookmark: head1
972 | tag: tip
973 | tag: tip
973 | user: test
974 | user: test
974 | date: Thu Jan 01 00:00:00 1970 +0000
975 | date: Thu Jan 01 00:00:00 1970 +0000
975 | summary: head1
976 | summary: head1
976 |
977 |
977 o changeset: 0:b5f04eac9d8f
978 o changeset: 0:b5f04eac9d8f
978 user: test
979 user: test
979 date: Thu Jan 01 00:00:00 1970 +0000
980 date: Thu Jan 01 00:00:00 1970 +0000
980 summary: initial
981 summary: initial
981
982
982
983
983 making another clone should only pull down requested rev
984 making another clone should only pull down requested rev
984
985
985 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
986 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
986 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
987 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
987 searching for changes
988 searching for changes
988 adding changesets
989 adding changesets
989 adding manifests
990 adding manifests
990 adding file changes
991 adding file changes
991 added 1 changesets with 1 changes to 1 files (+1 heads)
992 added 1 changesets with 1 changes to 1 files (+1 heads)
992 adding remote bookmark head1
993 adding remote bookmark head1
993 adding remote bookmark head2
994 adding remote bookmark head2
994 new changesets 99f71071f117
995 new changesets 99f71071f117
995 updating working directory
996 updating working directory
996 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
997 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
997
998
998 $ hg -R share-1brev log -G
999 $ hg -R share-1brev log -G
999 @ changeset: 2:99f71071f117
1000 @ changeset: 2:99f71071f117
1000 | bookmark: head2
1001 | bookmark: head2
1001 | tag: tip
1002 | tag: tip
1002 | parent: 0:b5f04eac9d8f
1003 | parent: 0:b5f04eac9d8f
1003 | user: test
1004 | user: test
1004 | date: Thu Jan 01 00:00:00 1970 +0000
1005 | date: Thu Jan 01 00:00:00 1970 +0000
1005 | summary: head2
1006 | summary: head2
1006 |
1007 |
1007 | o changeset: 1:4a8dc1ab4c13
1008 | o changeset: 1:4a8dc1ab4c13
1008 |/ bookmark: head1
1009 |/ bookmark: head1
1009 | user: test
1010 | user: test
1010 | date: Thu Jan 01 00:00:00 1970 +0000
1011 | date: Thu Jan 01 00:00:00 1970 +0000
1011 | summary: head1
1012 | summary: head1
1012 |
1013 |
1013 o changeset: 0:b5f04eac9d8f
1014 o changeset: 0:b5f04eac9d8f
1014 user: test
1015 user: test
1015 date: Thu Jan 01 00:00:00 1970 +0000
1016 date: Thu Jan 01 00:00:00 1970 +0000
1016 summary: initial
1017 summary: initial
1017
1018
1018
1019
1019 Request to clone a single branch is respected in sharing mode
1020 Request to clone a single branch is respected in sharing mode
1020
1021
1021 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1022 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1022 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1023 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1023 adding changesets
1024 adding changesets
1024 adding manifests
1025 adding manifests
1025 adding file changes
1026 adding file changes
1026 added 2 changesets with 2 changes to 1 files
1027 added 2 changesets with 2 changes to 1 files
1027 new changesets b5f04eac9d8f:5f92a6c1a1b1
1028 new changesets b5f04eac9d8f:5f92a6c1a1b1
1028 no changes found
1029 no changes found
1029 updating working directory
1030 updating working directory
1030 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1031 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1031
1032
1032 $ hg -R share-1bbranch1 log -G
1033 $ hg -R share-1bbranch1 log -G
1033 o changeset: 1:5f92a6c1a1b1
1034 o changeset: 1:5f92a6c1a1b1
1034 | branch: branch1
1035 | branch: branch1
1035 | tag: tip
1036 | tag: tip
1036 | user: test
1037 | user: test
1037 | date: Thu Jan 01 00:00:00 1970 +0000
1038 | date: Thu Jan 01 00:00:00 1970 +0000
1038 | summary: branch1
1039 | summary: branch1
1039 |
1040 |
1040 @ changeset: 0:b5f04eac9d8f
1041 @ changeset: 0:b5f04eac9d8f
1041 user: test
1042 user: test
1042 date: Thu Jan 01 00:00:00 1970 +0000
1043 date: Thu Jan 01 00:00:00 1970 +0000
1043 summary: initial
1044 summary: initial
1044
1045
1045
1046
1046 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1047 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1047 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1048 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1048 searching for changes
1049 searching for changes
1049 adding changesets
1050 adding changesets
1050 adding manifests
1051 adding manifests
1051 adding file changes
1052 adding file changes
1052 added 1 changesets with 1 changes to 1 files (+1 heads)
1053 added 1 changesets with 1 changes to 1 files (+1 heads)
1053 new changesets 6bacf4683960
1054 new changesets 6bacf4683960
1054 updating working directory
1055 updating working directory
1055 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1056 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1056
1057
1057 $ hg -R share-1bbranch2 log -G
1058 $ hg -R share-1bbranch2 log -G
1058 o changeset: 2:6bacf4683960
1059 o changeset: 2:6bacf4683960
1059 | branch: branch2
1060 | branch: branch2
1060 | tag: tip
1061 | tag: tip
1061 | parent: 0:b5f04eac9d8f
1062 | parent: 0:b5f04eac9d8f
1062 | user: test
1063 | user: test
1063 | date: Thu Jan 01 00:00:00 1970 +0000
1064 | date: Thu Jan 01 00:00:00 1970 +0000
1064 | summary: branch2
1065 | summary: branch2
1065 |
1066 |
1066 | o changeset: 1:5f92a6c1a1b1
1067 | o changeset: 1:5f92a6c1a1b1
1067 |/ branch: branch1
1068 |/ branch: branch1
1068 | user: test
1069 | user: test
1069 | date: Thu Jan 01 00:00:00 1970 +0000
1070 | date: Thu Jan 01 00:00:00 1970 +0000
1070 | summary: branch1
1071 | summary: branch1
1071 |
1072 |
1072 @ changeset: 0:b5f04eac9d8f
1073 @ changeset: 0:b5f04eac9d8f
1073 user: test
1074 user: test
1074 date: Thu Jan 01 00:00:00 1970 +0000
1075 date: Thu Jan 01 00:00:00 1970 +0000
1075 summary: initial
1076 summary: initial
1076
1077
1077
1078
1078 -U is respected in share clone mode
1079 -U is respected in share clone mode
1079
1080
1080 $ hg --config share.pool=share clone -U source1a share-1anowc
1081 $ hg --config share.pool=share clone -U source1a share-1anowc
1081 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1082 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1082 searching for changes
1083 searching for changes
1083 no changes found
1084 no changes found
1084 adding remote bookmark bookA
1085 adding remote bookmark bookA
1085
1086
1086 $ ls share-1anowc
1087 $ ls share-1anowc
1087
1088
1088 Test that auto sharing doesn't cause failure of "hg clone local remote"
1089 Test that auto sharing doesn't cause failure of "hg clone local remote"
1089
1090
1090 $ cd $TESTTMP
1091 $ cd $TESTTMP
1091 $ hg -R a id -r 0
1092 $ hg -R a id -r 0
1092 acb14030fe0a
1093 acb14030fe0a
1093 $ hg id -R remote -r 0
1094 $ hg id -R remote -r 0
1094 abort: repository remote not found!
1095 abort: repository remote not found!
1095 [255]
1096 [255]
1096 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1097 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1097 $ hg -R remote id -r 0
1098 $ hg -R remote id -r 0
1098 acb14030fe0a
1099 acb14030fe0a
1099
1100
1100 Cloning into pooled storage doesn't race (issue5104)
1101 Cloning into pooled storage doesn't race (issue5104)
1101
1102
1102 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1103 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1103 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1104 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1104 $ wait
1105 $ wait
1105
1106
1106 $ hg -R share-destrace1 log -r tip
1107 $ hg -R share-destrace1 log -r tip
1107 changeset: 2:e5bfe23c0b47
1108 changeset: 2:e5bfe23c0b47
1108 bookmark: bookA
1109 bookmark: bookA
1109 tag: tip
1110 tag: tip
1110 user: test
1111 user: test
1111 date: Thu Jan 01 00:00:00 1970 +0000
1112 date: Thu Jan 01 00:00:00 1970 +0000
1112 summary: 1a
1113 summary: 1a
1113
1114
1114
1115
1115 $ hg -R share-destrace2 log -r tip
1116 $ hg -R share-destrace2 log -r tip
1116 changeset: 2:e5bfe23c0b47
1117 changeset: 2:e5bfe23c0b47
1117 bookmark: bookA
1118 bookmark: bookA
1118 tag: tip
1119 tag: tip
1119 user: test
1120 user: test
1120 date: Thu Jan 01 00:00:00 1970 +0000
1121 date: Thu Jan 01 00:00:00 1970 +0000
1121 summary: 1a
1122 summary: 1a
1122
1123
1123 One repo should be new, the other should be shared from the pool. We
1124 One repo should be new, the other should be shared from the pool. We
1124 don't care which is which, so we just make sure we always print the
1125 don't care which is which, so we just make sure we always print the
1125 one containing "new pooled" first, then one one containing "existing
1126 one containing "new pooled" first, then one one containing "existing
1126 pooled".
1127 pooled".
1127
1128
1128 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1129 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1129 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1130 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1130 requesting all changes
1131 requesting all changes
1131 adding changesets
1132 adding changesets
1132 adding manifests
1133 adding manifests
1133 adding file changes
1134 adding file changes
1134 added 3 changesets with 3 changes to 1 files
1135 added 3 changesets with 3 changes to 1 files
1135 new changesets b5f04eac9d8f:e5bfe23c0b47
1136 new changesets b5f04eac9d8f:e5bfe23c0b47
1136 searching for changes
1137 searching for changes
1137 no changes found
1138 no changes found
1138 adding remote bookmark bookA
1139 adding remote bookmark bookA
1139 updating working directory
1140 updating working directory
1140 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1141 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1141
1142
1142 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1143 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1143 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1144 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1144 searching for changes
1145 searching for changes
1145 no changes found
1146 no changes found
1146 adding remote bookmark bookA
1147 adding remote bookmark bookA
1147 updating working directory
1148 updating working directory
1148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1149 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1149
1150
1150 SEC: check for unsafe ssh url
1151 SEC: check for unsafe ssh url
1151
1152
1152 $ cat >> $HGRCPATH << EOF
1153 $ cat >> $HGRCPATH << EOF
1153 > [ui]
1154 > [ui]
1154 > ssh = sh -c "read l; read l; read l"
1155 > ssh = sh -c "read l; read l; read l"
1155 > EOF
1156 > EOF
1156
1157
1157 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1158 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1158 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1159 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1159 [255]
1160 [255]
1160 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1161 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 [255]
1163 [255]
1163 $ hg clone 'ssh://fakehost|touch%20owned/path'
1164 $ hg clone 'ssh://fakehost|touch%20owned/path'
1164 abort: no suitable response from remote hg!
1165 abort: no suitable response from remote hg!
1165 [255]
1166 [255]
1166 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1167 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1167 abort: no suitable response from remote hg!
1168 abort: no suitable response from remote hg!
1168 [255]
1169 [255]
1169
1170
1170 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1171 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1171 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1172 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1172 [255]
1173 [255]
1173
1174
1174 #if windows
1175 #if windows
1175 $ hg clone "ssh://%26touch%20owned%20/" --debug
1176 $ hg clone "ssh://%26touch%20owned%20/" --debug
1176 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1177 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1177 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1178 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1178 sending hello command
1179 sending hello command
1179 sending between command
1180 sending between command
1180 abort: no suitable response from remote hg!
1181 abort: no suitable response from remote hg!
1181 [255]
1182 [255]
1182 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1183 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1183 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1184 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1184 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1185 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1185 sending hello command
1186 sending hello command
1186 sending between command
1187 sending between command
1187 abort: no suitable response from remote hg!
1188 abort: no suitable response from remote hg!
1188 [255]
1189 [255]
1189 #else
1190 #else
1190 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1191 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1191 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1192 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1192 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1193 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1193 sending hello command
1194 sending hello command
1194 sending between command
1195 sending between command
1195 abort: no suitable response from remote hg!
1196 abort: no suitable response from remote hg!
1196 [255]
1197 [255]
1197 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1198 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1198 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1199 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1199 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1200 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1200 sending hello command
1201 sending hello command
1201 sending between command
1202 sending between command
1202 abort: no suitable response from remote hg!
1203 abort: no suitable response from remote hg!
1203 [255]
1204 [255]
1204 #endif
1205 #endif
1205
1206
1206 $ hg clone "ssh://v-alid.example.com/" --debug
1207 $ hg clone "ssh://v-alid.example.com/" --debug
1207 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1208 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1208 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1209 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1209 sending hello command
1210 sending hello command
1210 sending between command
1211 sending between command
1211 abort: no suitable response from remote hg!
1212 abort: no suitable response from remote hg!
1212 [255]
1213 [255]
1213
1214
1214 We should not have created a file named owned - if it exists, the
1215 We should not have created a file named owned - if it exists, the
1215 attack succeeded.
1216 attack succeeded.
1216 $ if test -f owned; then echo 'you got owned'; fi
1217 $ if test -f owned; then echo 'you got owned'; fi
1217
1218
1218 Cloning without fsmonitor enabled does not print a warning for small repos
1219 Cloning without fsmonitor enabled does not print a warning for small repos
1219
1220
1220 $ hg clone a fsmonitor-default
1221 $ hg clone a fsmonitor-default
1221 updating to bookmark @ on branch stable
1222 updating to bookmark @ on branch stable
1222 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1223 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1223
1224
1224 Lower the warning threshold to simulate a large repo
1225 Lower the warning threshold to simulate a large repo
1225
1226
1226 $ cat >> $HGRCPATH << EOF
1227 $ cat >> $HGRCPATH << EOF
1227 > [fsmonitor]
1228 > [fsmonitor]
1228 > warn_update_file_count = 2
1229 > warn_update_file_count = 2
1229 > EOF
1230 > EOF
1230
1231
1231 We should see a warning about no fsmonitor on supported platforms
1232 We should see a warning about no fsmonitor on supported platforms
1232
1233
1233 #if linuxormacos no-fsmonitor
1234 #if linuxormacos no-fsmonitor
1234 $ hg clone a nofsmonitor
1235 $ hg clone a nofsmonitor
1235 updating to bookmark @ on branch stable
1236 updating to bookmark @ on branch stable
1236 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1237 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1237 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1238 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1238 #else
1239 #else
1239 $ hg clone a nofsmonitor
1240 $ hg clone a nofsmonitor
1240 updating to bookmark @ on branch stable
1241 updating to bookmark @ on branch stable
1241 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1242 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1242 #endif
1243 #endif
1243
1244
1244 We should not see warning about fsmonitor when it is enabled
1245 We should not see warning about fsmonitor when it is enabled
1245
1246
1246 #if fsmonitor
1247 #if fsmonitor
1247 $ hg clone a fsmonitor-enabled
1248 $ hg clone a fsmonitor-enabled
1248 updating to bookmark @ on branch stable
1249 updating to bookmark @ on branch stable
1249 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1250 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1250 #endif
1251 #endif
1251
1252
1252 We can disable the fsmonitor warning
1253 We can disable the fsmonitor warning
1253
1254
1254 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1255 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1255 updating to bookmark @ on branch stable
1256 updating to bookmark @ on branch stable
1256 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1257 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1257
1258
1258 Loaded fsmonitor but disabled in config should still print warning
1259 Loaded fsmonitor but disabled in config should still print warning
1259
1260
1260 #if linuxormacos fsmonitor
1261 #if linuxormacos fsmonitor
1261 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1262 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1262 updating to bookmark @ on branch stable
1263 updating to bookmark @ on branch stable
1263 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1264 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1264 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1265 #endif
1266 #endif
1266
1267
1267 Warning not printed if working directory isn't empty
1268 Warning not printed if working directory isn't empty
1268
1269
1269 $ hg -q clone a fsmonitor-update
1270 $ hg -q clone a fsmonitor-update
1270 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1271 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1271 $ cd fsmonitor-update
1272 $ cd fsmonitor-update
1272 $ hg up acb14030fe0a
1273 $ hg up acb14030fe0a
1273 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1274 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1274 (leaving bookmark @)
1275 (leaving bookmark @)
1275 $ hg up cf0fe1914066
1276 $ hg up cf0fe1914066
1276 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1277 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1277
1278
1278 `hg update` from null revision also prints
1279 `hg update` from null revision also prints
1279
1280
1280 $ hg up null
1281 $ hg up null
1281 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1282 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1282
1283
1283 #if linuxormacos no-fsmonitor
1284 #if linuxormacos no-fsmonitor
1284 $ hg up cf0fe1914066
1285 $ hg up cf0fe1914066
1285 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1286 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1286 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1287 #else
1288 #else
1288 $ hg up cf0fe1914066
1289 $ hg up cf0fe1914066
1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 #endif
1291 #endif
1291
1292
1292 $ cd ..
1293 $ cd ..
1293
1294
@@ -1,432 +1,432 b''
1 #require hardlink reporevlogstore
1 #require hardlink reporevlogstore
2
2
3 $ cat > nlinks.py <<EOF
3 $ cat > nlinks.py <<EOF
4 > from __future__ import print_function
4 > from __future__ import print_function
5 > import sys
5 > import sys
6 > from mercurial import pycompat, util
6 > from mercurial import pycompat, util
7 > for f in sorted(sys.stdin.readlines()):
7 > for f in sorted(sys.stdin.readlines()):
8 > f = f[:-1]
8 > f = f[:-1]
9 > print(util.nlinks(pycompat.fsencode(f)), f)
9 > print(util.nlinks(pycompat.fsencode(f)), f)
10 > EOF
10 > EOF
11
11
12 $ nlinksdir()
12 $ nlinksdir()
13 > {
13 > {
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 > }
15 > }
16
16
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18
18
19 $ cat > linkcp.py <<EOF
19 $ cat > linkcp.py <<EOF
20 > from __future__ import absolute_import
20 > from __future__ import absolute_import
21 > import sys
21 > import sys
22 > from mercurial import pycompat, util
22 > from mercurial import pycompat, util
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 > EOF
25 > EOF
26
26
27 $ linkcp()
27 $ linkcp()
28 > {
28 > {
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 > }
30 > }
31
31
32 Prepare repo r1:
32 Prepare repo r1:
33
33
34 $ hg init r1
34 $ hg init r1
35 $ cd r1
35 $ cd r1
36
36
37 $ echo c1 > f1
37 $ echo c1 > f1
38 $ hg add f1
38 $ hg add f1
39 $ hg ci -m0
39 $ hg ci -m0
40
40
41 $ mkdir d1
41 $ mkdir d1
42 $ cd d1
42 $ cd d1
43 $ echo c2 > f2
43 $ echo c2 > f2
44 $ hg add f2
44 $ hg add f2
45 $ hg ci -m1
45 $ hg ci -m1
46 $ cd ../..
46 $ cd ../..
47
47
48 $ nlinksdir r1/.hg/store
48 $ nlinksdir r1/.hg/store
49 1 r1/.hg/store/00changelog.i
49 1 r1/.hg/store/00changelog.i
50 1 r1/.hg/store/00manifest.i
50 1 r1/.hg/store/00manifest.i
51 1 r1/.hg/store/data/d1/f2.i
51 1 r1/.hg/store/data/d1/f2.i
52 1 r1/.hg/store/data/f1.i
52 1 r1/.hg/store/data/f1.i
53 1 r1/.hg/store/fncache (repofncache !)
53 1 r1/.hg/store/fncache (repofncache !)
54 1 r1/.hg/store/phaseroots
54 1 r1/.hg/store/phaseroots
55 1 r1/.hg/store/undo
55 1 r1/.hg/store/undo
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 1 r1/.hg/store/undo.backupfiles
57 1 r1/.hg/store/undo.backupfiles
58 1 r1/.hg/store/undo.phaseroots
58 1 r1/.hg/store/undo.phaseroots
59
59
60
60
61 Create hardlinked clone r2:
61 Create hardlinked clone r2:
62
62
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 linking: 1 files
64 linking: 1 files
65 linking: 2 files
65 linking: 2 files
66 linking: 3 files
66 linking: 3 files
67 linking: 4 files
67 linking: 4 files
68 linking: 5 files
68 linking: 5 files
69 linking: 6 files
69 linking: 6 files
70 linking: 7 files
70 linking: 7 files
71 linked 7 files
71 linked 7 files
72
72
73 Create non-hardlinked clone r3:
73 Create non-hardlinked clone r3:
74
74
75 $ hg clone --pull r1 r3
75 $ hg clone --pull r1 r3
76 requesting all changes
76 requesting all changes
77 adding changesets
77 adding changesets
78 adding manifests
78 adding manifests
79 adding file changes
79 adding file changes
80 added 2 changesets with 2 changes to 2 files
80 added 2 changesets with 2 changes to 2 files
81 new changesets 40d85e9847f2:7069c422939c
81 new changesets 40d85e9847f2:7069c422939c
82 updating to branch default
82 updating to branch default
83 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84
84
85
85
86 Repos r1 and r2 should now contain hardlinked files:
86 Repos r1 and r2 should now contain hardlinked files:
87
87
88 $ nlinksdir r1/.hg/store
88 $ nlinksdir r1/.hg/store
89 2 r1/.hg/store/00changelog.i
89 2 r1/.hg/store/00changelog.i
90 2 r1/.hg/store/00manifest.i
90 2 r1/.hg/store/00manifest.i
91 2 r1/.hg/store/data/d1/f2.i
91 2 r1/.hg/store/data/d1/f2.i
92 2 r1/.hg/store/data/f1.i
92 2 r1/.hg/store/data/f1.i
93 2 r1/.hg/store/fncache (repofncache !)
93 2 r1/.hg/store/fncache (repofncache !)
94 1 r1/.hg/store/phaseroots
94 1 r1/.hg/store/phaseroots
95 1 r1/.hg/store/undo
95 1 r1/.hg/store/undo
96 1 r1/.hg/store/undo.backup.fncache (repofncache !)
96 1 r1/.hg/store/undo.backup.fncache (repofncache !)
97 1 r1/.hg/store/undo.backupfiles
97 1 r1/.hg/store/undo.backupfiles
98 1 r1/.hg/store/undo.phaseroots
98 1 r1/.hg/store/undo.phaseroots
99
99
100 $ nlinksdir r2/.hg/store
100 $ nlinksdir r2/.hg/store
101 2 r2/.hg/store/00changelog.i
101 2 r2/.hg/store/00changelog.i
102 2 r2/.hg/store/00manifest.i
102 2 r2/.hg/store/00manifest.i
103 2 r2/.hg/store/data/d1/f2.i
103 2 r2/.hg/store/data/d1/f2.i
104 2 r2/.hg/store/data/f1.i
104 2 r2/.hg/store/data/f1.i
105 2 r2/.hg/store/fncache (repofncache !)
105 2 r2/.hg/store/fncache (repofncache !)
106
106
107 Repo r3 should not be hardlinked:
107 Repo r3 should not be hardlinked:
108
108
109 $ nlinksdir r3/.hg/store
109 $ nlinksdir r3/.hg/store
110 1 r3/.hg/store/00changelog.i
110 1 r3/.hg/store/00changelog.i
111 1 r3/.hg/store/00manifest.i
111 1 r3/.hg/store/00manifest.i
112 1 r3/.hg/store/data/d1/f2.i
112 1 r3/.hg/store/data/d1/f2.i
113 1 r3/.hg/store/data/f1.i
113 1 r3/.hg/store/data/f1.i
114 1 r3/.hg/store/fncache (repofncache !)
114 1 r3/.hg/store/fncache (repofncache !)
115 1 r3/.hg/store/phaseroots
115 1 r3/.hg/store/phaseroots
116 1 r3/.hg/store/undo
116 1 r3/.hg/store/undo
117 1 r3/.hg/store/undo.backupfiles
117 1 r3/.hg/store/undo.backupfiles
118 1 r3/.hg/store/undo.phaseroots
118 1 r3/.hg/store/undo.phaseroots
119
119
120
120
121 Create a non-inlined filelog in r3:
121 Create a non-inlined filelog in r3:
122
122
123 $ cd r3/d1
123 $ cd r3/d1
124 >>> f = open('data1', 'wb')
124 >>> f = open('data1', 'wb')
125 >>> for x in range(10000):
125 >>> for x in range(10000):
126 ... f.write(b"%d\n" % x) and None
126 ... f.write(b"%d\n" % x) and None
127 >>> f.close()
127 >>> f.close()
128 $ for j in 0 1 2 3 4 5 6 7 8 9; do
128 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 > cat data1 >> f2
129 > cat data1 >> f2
130 > hg commit -m$j
130 > hg commit -m$j
131 > done
131 > done
132 $ cd ../..
132 $ cd ../..
133
133
134 $ nlinksdir r3/.hg/store
134 $ nlinksdir r3/.hg/store
135 1 r3/.hg/store/00changelog.i
135 1 r3/.hg/store/00changelog.i
136 1 r3/.hg/store/00manifest.i
136 1 r3/.hg/store/00manifest.i
137 1 r3/.hg/store/data/d1/f2.d
137 1 r3/.hg/store/data/d1/f2.d
138 1 r3/.hg/store/data/d1/f2.i
138 1 r3/.hg/store/data/d1/f2.i
139 1 r3/.hg/store/data/f1.i
139 1 r3/.hg/store/data/f1.i
140 1 r3/.hg/store/fncache (repofncache !)
140 1 r3/.hg/store/fncache (repofncache !)
141 1 r3/.hg/store/phaseroots
141 1 r3/.hg/store/phaseroots
142 1 r3/.hg/store/undo
142 1 r3/.hg/store/undo
143 1 r3/.hg/store/undo.backup.fncache (repofncache !)
143 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 1 r3/.hg/store/undo.backup.phaseroots
144 1 r3/.hg/store/undo.backup.phaseroots
145 1 r3/.hg/store/undo.backupfiles
145 1 r3/.hg/store/undo.backupfiles
146 1 r3/.hg/store/undo.phaseroots
146 1 r3/.hg/store/undo.phaseroots
147
147
148 Push to repo r1 should break up most hardlinks in r2:
148 Push to repo r1 should break up most hardlinks in r2:
149
149
150 $ hg -R r2 verify
150 $ hg -R r2 verify
151 checking changesets
151 checking changesets
152 checking manifests
152 checking manifests
153 crosschecking files in changesets and manifests
153 crosschecking files in changesets and manifests
154 checking files
154 checking files
155 checked 2 changesets with 2 changes to 2 files
155 checked 2 changesets with 2 changes to 2 files
156
156
157 $ cd r3
157 $ cd r3
158 $ hg push
158 $ hg push
159 pushing to $TESTTMP/r1
159 pushing to $TESTTMP/r1
160 searching for changes
160 searching for changes
161 adding changesets
161 adding changesets
162 adding manifests
162 adding manifests
163 adding file changes
163 adding file changes
164 added 10 changesets with 10 changes to 1 files
164 added 10 changesets with 10 changes to 1 files
165
165
166 $ cd ..
166 $ cd ..
167
167
168 $ nlinksdir r2/.hg/store
168 $ nlinksdir r2/.hg/store
169 1 r2/.hg/store/00changelog.i
169 1 r2/.hg/store/00changelog.i
170 1 r2/.hg/store/00manifest.i
170 1 r2/.hg/store/00manifest.i
171 1 r2/.hg/store/data/d1/f2.i
171 1 r2/.hg/store/data/d1/f2.i
172 2 r2/.hg/store/data/f1.i
172 2 r2/.hg/store/data/f1.i
173 [12] r2/\.hg/store/fncache (re) (repofncache !)
173 [12] r2/\.hg/store/fncache (re) (repofncache !)
174
174
175 #if hardlink-whitelisted repofncache
175 #if hardlink-whitelisted repofncache
176 $ nlinksdir r2/.hg/store/fncache
176 $ nlinksdir r2/.hg/store/fncache
177 2 r2/.hg/store/fncache
177 2 r2/.hg/store/fncache
178 #endif
178 #endif
179
179
180 $ hg -R r2 verify
180 $ hg -R r2 verify
181 checking changesets
181 checking changesets
182 checking manifests
182 checking manifests
183 crosschecking files in changesets and manifests
183 crosschecking files in changesets and manifests
184 checking files
184 checking files
185 checked 2 changesets with 2 changes to 2 files
185 checked 2 changesets with 2 changes to 2 files
186
186
187
187
188 $ cd r1
188 $ cd r1
189 $ hg up
189 $ hg up
190 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
190 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
191
191
192 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
192 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
193
193
194 $ echo c1c1 >> f1
194 $ echo c1c1 >> f1
195 $ hg ci -m00
195 $ hg ci -m00
196 $ cd ..
196 $ cd ..
197
197
198 $ nlinksdir r2/.hg/store
198 $ nlinksdir r2/.hg/store
199 1 r2/.hg/store/00changelog.i
199 1 r2/.hg/store/00changelog.i
200 1 r2/.hg/store/00manifest.i
200 1 r2/.hg/store/00manifest.i
201 1 r2/.hg/store/data/d1/f2.i
201 1 r2/.hg/store/data/d1/f2.i
202 1 r2/.hg/store/data/f1.i
202 1 r2/.hg/store/data/f1.i
203 [12] r2/\.hg/store/fncache (re) (repofncache !)
203 [12] r2/\.hg/store/fncache (re) (repofncache !)
204
204
205 #if hardlink-whitelisted repofncache
205 #if hardlink-whitelisted repofncache
206 $ nlinksdir r2/.hg/store/fncache
206 $ nlinksdir r2/.hg/store/fncache
207 2 r2/.hg/store/fncache
207 2 r2/.hg/store/fncache
208 #endif
208 #endif
209
209
210 Create a file which exec permissions we will change
210 Create a file which exec permissions we will change
211 $ cd r3
211 $ cd r3
212 $ echo "echo hello world" > f3
212 $ echo "echo hello world" > f3
213 $ hg add f3
213 $ hg add f3
214 $ hg ci -mf3
214 $ hg ci -mf3
215 $ cd ..
215 $ cd ..
216
216
217 $ cd r3
217 $ cd r3
218 $ hg tip --template '{rev}:{node|short}\n'
218 $ hg tip --template '{rev}:{node|short}\n'
219 12:d3b77733a28a
219 12:d3b77733a28a
220 $ echo bla > f1
220 $ echo bla > f1
221 $ chmod +x f3
221 $ chmod +x f3
222 $ hg ci -m1
222 $ hg ci -m1
223 $ cd ..
223 $ cd ..
224
224
225 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
225 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
226
226
227 $ linkcp r3 r4
227 $ linkcp r3 r4
228
228
229 'checklink' is produced by hardlinking a symlink, which is undefined whether
229 'checklink' is produced by hardlinking a symlink, which is undefined whether
230 the symlink should be followed or not. It does behave differently on Linux and
230 the symlink should be followed or not. It does behave differently on Linux and
231 BSD. Just remove it so the test pass on both platforms.
231 BSD. Just remove it so the test pass on both platforms.
232
232
233 $ rm -f r4/.hg/wcache/checklink
233 $ rm -f r4/.hg/wcache/checklink
234
234
235 r4 has hardlinks in the working dir (not just inside .hg):
235 r4 has hardlinks in the working dir (not just inside .hg):
236
236
237 $ nlinksdir r4
237 $ nlinksdir r4
238 2 r4/.hg/00changelog.i
238 2 r4/.hg/00changelog.i
239 2 r4/.hg/branch
239 2 r4/.hg/branch
240 2 r4/.hg/cache/branch2-base
240 2 r4/.hg/cache/branch2-base
241 2 r4/.hg/cache/branch2-served
241 2 r4/.hg/cache/branch2-served
242 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
242 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
243 2 r4/.hg/cache/rbc-names-v1
243 2 r4/.hg/cache/rbc-names-v1
244 2 r4/.hg/cache/rbc-revs-v1
244 2 r4/.hg/cache/rbc-revs-v1
245 2 r4/.hg/dirstate
245 2 r4/.hg/dirstate
246 2 r4/.hg/fsmonitor.state (fsmonitor !)
246 2 r4/.hg/fsmonitor.state (fsmonitor !)
247 2 r4/.hg/hgrc
247 2 r4/.hg/hgrc
248 2 r4/.hg/last-message.txt
248 2 r4/.hg/last-message.txt
249 2 r4/.hg/requires
249 2 r4/.hg/requires
250 2 r4/.hg/store/00changelog.i
250 2 r4/.hg/store/00changelog.i
251 2 r4/.hg/store/00manifest.i
251 2 r4/.hg/store/00manifest.i
252 2 r4/.hg/store/data/d1/f2.d
252 2 r4/.hg/store/data/d1/f2.d
253 2 r4/.hg/store/data/d1/f2.i
253 2 r4/.hg/store/data/d1/f2.i
254 2 r4/.hg/store/data/f1.i
254 2 r4/.hg/store/data/f1.i
255 2 r4/.hg/store/data/f3.i
255 2 r4/.hg/store/data/f3.i
256 2 r4/.hg/store/fncache (repofncache !)
256 2 r4/.hg/store/fncache (repofncache !)
257 2 r4/.hg/store/phaseroots
257 2 r4/.hg/store/phaseroots
258 2 r4/.hg/store/undo
258 2 r4/.hg/store/undo
259 2 r4/.hg/store/undo.backup.fncache (repofncache !)
259 2 r4/.hg/store/undo.backup.fncache (repofncache !)
260 2 r4/.hg/store/undo.backup.phaseroots
260 2 r4/.hg/store/undo.backup.phaseroots
261 2 r4/.hg/store/undo.backupfiles
261 2 r4/.hg/store/undo.backupfiles
262 2 r4/.hg/store/undo.phaseroots
262 2 r4/.hg/store/undo.phaseroots
263 [24] r4/\.hg/undo\.backup\.dirstate (re)
263 [24] r4/\.hg/undo\.backup\.dirstate (re)
264 2 r4/.hg/undo.bookmarks
264 2 r4/.hg/undo.bookmarks
265 2 r4/.hg/undo.branch
265 2 r4/.hg/undo.branch
266 2 r4/.hg/undo.desc
266 2 r4/.hg/undo.desc
267 [24] r4/\.hg/undo\.dirstate (re)
267 [24] r4/\.hg/undo\.dirstate (re)
268 2 r4/.hg/wcache/checkisexec (execbit !)
268 2 r4/.hg/wcache/checkisexec (execbit !)
269 2 r4/.hg/wcache/checklink-target (symlink !)
269 2 r4/.hg/wcache/checklink-target (symlink !)
270 2 r4/.hg/wcache/checknoexec (execbit !)
270 2 r4/.hg/wcache/checknoexec (execbit !)
271 2 r4/d1/data1
271 2 r4/d1/data1
272 2 r4/d1/f2
272 2 r4/d1/f2
273 2 r4/f1
273 2 r4/f1
274 2 r4/f3
274 2 r4/f3
275
275
276 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
276 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
277 #if hardlink-whitelisted
277 #if hardlink-whitelisted
278 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
278 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
279 4 r4/.hg/undo.backup.dirstate
279 4 r4/.hg/undo.backup.dirstate
280 4 r4/.hg/undo.dirstate
280 4 r4/.hg/undo.dirstate
281 #endif
281 #endif
282
282
283
283
284 $ hg -R r4 up 12
284 $ hg -R r4 up 12
285 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
285 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
287
287
288 $ nlinksdir r4
288 $ nlinksdir r4
289 2 r4/.hg/00changelog.i
289 2 r4/.hg/00changelog.i
290 1 r4/.hg/branch
290 1 r4/.hg/branch
291 2 r4/.hg/cache/branch2-base
291 2 r4/.hg/cache/branch2-base
292 2 r4/.hg/cache/branch2-served
292 2 r4/.hg/cache/branch2-served
293 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
293 1 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
294 2 r4/.hg/cache/rbc-names-v1
294 2 r4/.hg/cache/rbc-names-v1
295 2 r4/.hg/cache/rbc-revs-v1
295 2 r4/.hg/cache/rbc-revs-v1
296 1 r4/.hg/dirstate
296 1 r4/.hg/dirstate
297 1 r4/.hg/fsmonitor.state (fsmonitor !)
297 1 r4/.hg/fsmonitor.state (fsmonitor !)
298 2 r4/.hg/hgrc
298 2 r4/.hg/hgrc
299 2 r4/.hg/last-message.txt
299 2 r4/.hg/last-message.txt
300 2 r4/.hg/requires
300 2 r4/.hg/requires
301 2 r4/.hg/store/00changelog.i
301 2 r4/.hg/store/00changelog.i
302 2 r4/.hg/store/00manifest.i
302 2 r4/.hg/store/00manifest.i
303 2 r4/.hg/store/data/d1/f2.d
303 2 r4/.hg/store/data/d1/f2.d
304 2 r4/.hg/store/data/d1/f2.i
304 2 r4/.hg/store/data/d1/f2.i
305 2 r4/.hg/store/data/f1.i
305 2 r4/.hg/store/data/f1.i
306 2 r4/.hg/store/data/f3.i
306 2 r4/.hg/store/data/f3.i
307 2 r4/.hg/store/fncache
307 2 r4/.hg/store/fncache
308 2 r4/.hg/store/phaseroots
308 2 r4/.hg/store/phaseroots
309 2 r4/.hg/store/undo
309 2 r4/.hg/store/undo
310 2 r4/.hg/store/undo.backup.fncache (repofncache !)
310 2 r4/.hg/store/undo.backup.fncache (repofncache !)
311 2 r4/.hg/store/undo.backup.phaseroots
311 2 r4/.hg/store/undo.backup.phaseroots
312 2 r4/.hg/store/undo.backupfiles
312 2 r4/.hg/store/undo.backupfiles
313 2 r4/.hg/store/undo.phaseroots
313 2 r4/.hg/store/undo.phaseroots
314 [24] r4/\.hg/undo\.backup\.dirstate (re)
314 [24] r4/\.hg/undo\.backup\.dirstate (re)
315 2 r4/.hg/undo.bookmarks
315 2 r4/.hg/undo.bookmarks
316 2 r4/.hg/undo.branch
316 2 r4/.hg/undo.branch
317 2 r4/.hg/undo.desc
317 2 r4/.hg/undo.desc
318 [24] r4/\.hg/undo\.dirstate (re)
318 [24] r4/\.hg/undo\.dirstate (re)
319 2 r4/.hg/wcache/checkisexec (execbit !)
319 2 r4/.hg/wcache/checkisexec (execbit !)
320 2 r4/.hg/wcache/checklink-target (symlink !)
320 2 r4/.hg/wcache/checklink-target (symlink !)
321 2 r4/.hg/wcache/checknoexec (execbit !)
321 2 r4/.hg/wcache/checknoexec (execbit !)
322 2 r4/d1/data1
322 2 r4/d1/data1
323 2 r4/d1/f2
323 2 r4/d1/f2
324 1 r4/f1
324 1 r4/f1
325 1 r4/f3 (execbit !)
325 1 r4/f3 (execbit !)
326 2 r4/f3 (no-execbit !)
326 2 r4/f3 (no-execbit !)
327
327
328 #if hardlink-whitelisted
328 #if hardlink-whitelisted
329 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
329 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
330 4 r4/.hg/undo.backup.dirstate
330 4 r4/.hg/undo.backup.dirstate
331 4 r4/.hg/undo.dirstate
331 4 r4/.hg/undo.dirstate
332 #endif
332 #endif
333
333
334 Test hardlinking outside hg:
334 Test hardlinking outside hg:
335
335
336 $ mkdir x
336 $ mkdir x
337 $ echo foo > x/a
337 $ echo foo > x/a
338
338
339 $ linkcp x y
339 $ linkcp x y
340 $ echo bar >> y/a
340 $ echo bar >> y/a
341
341
342 No diff if hardlink:
342 No diff if hardlink:
343
343
344 $ diff x/a y/a
344 $ diff x/a y/a
345
345
346 Test mq hardlinking:
346 Test mq hardlinking:
347
347
348 $ echo "[extensions]" >> $HGRCPATH
348 $ echo "[extensions]" >> $HGRCPATH
349 $ echo "mq=" >> $HGRCPATH
349 $ echo "mq=" >> $HGRCPATH
350
350
351 $ hg init a
351 $ hg init a
352 $ cd a
352 $ cd a
353
353
354 $ hg qimport -n foo - << EOF
354 $ hg qimport -n foo - << EOF
355 > # HG changeset patch
355 > # HG changeset patch
356 > # Date 1 0
356 > # Date 1 0
357 > diff -r 2588a8b53d66 a
357 > diff -r 2588a8b53d66 a
358 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
358 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
359 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
359 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
360 > @@ -0,0 +1,1 @@
360 > @@ -0,0 +1,1 @@
361 > +a
361 > +a
362 > EOF
362 > EOF
363 adding foo to series file
363 adding foo to series file
364
364
365 $ hg qpush
365 $ hg qpush
366 applying foo
366 applying foo
367 now at: foo
367 now at: foo
368
368
369 $ cd ..
369 $ cd ..
370 $ linkcp a b
370 $ linkcp a b
371 $ cd b
371 $ cd b
372
372
373 $ hg qimport -n bar - << EOF
373 $ hg qimport -n bar - << EOF
374 > # HG changeset patch
374 > # HG changeset patch
375 > # Date 2 0
375 > # Date 2 0
376 > diff -r 2588a8b53d66 a
376 > diff -r 2588a8b53d66 a
377 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
377 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
378 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
378 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
379 > @@ -0,0 +1,1 @@
379 > @@ -0,0 +1,1 @@
380 > +b
380 > +b
381 > EOF
381 > EOF
382 adding bar to series file
382 adding bar to series file
383
383
384 $ hg qpush
384 $ hg qpush
385 applying bar
385 applying bar
386 now at: bar
386 now at: bar
387
387
388 $ cat .hg/patches/status
388 $ cat .hg/patches/status
389 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
389 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
390 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
390 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
391
391
392 $ cat .hg/patches/series
392 $ cat .hg/patches/series
393 foo
393 foo
394 bar
394 bar
395
395
396 $ cat ../a/.hg/patches/status
396 $ cat ../a/.hg/patches/status
397 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
397 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
398
398
399 $ cat ../a/.hg/patches/series
399 $ cat ../a/.hg/patches/series
400 foo
400 foo
401
401
402 Test tags hardlinking:
402 Test tags hardlinking:
403
403
404 $ hg qdel -r qbase:qtip
404 $ hg qdel -r qbase:qtip
405 patch foo finalized without changeset message
405 patch foo finalized without changeset message
406 patch bar finalized without changeset message
406 patch bar finalized without changeset message
407
407
408 $ hg tag -l lfoo
408 $ hg tag -l lfoo
409 $ hg tag foo
409 $ hg tag foo
410
410
411 $ cd ..
411 $ cd ..
412 $ linkcp b c
412 $ linkcp b c
413 $ cd c
413 $ cd c
414
414
415 $ hg tag -l -r 0 lbar
415 $ hg tag -l -r 0 lbar
416 $ hg tag -r 0 bar
416 $ hg tag -r 0 bar
417
417
418 $ cat .hgtags
418 $ cat .hgtags
419 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
419 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
420 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
420 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
421
421
422 $ cat .hg/localtags
422 $ cat .hg/localtags
423 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
423 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
424 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
424 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
425
425
426 $ cat ../b/.hgtags
426 $ cat ../b/.hgtags
427 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
427 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
428
428
429 $ cat ../b/.hg/localtags
429 $ cat ../b/.hg/localtags
430 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
430 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
431
431
432 $ cd ..
432 $ cd ..
@@ -1,185 +1,203 b''
1 Source bundle was generated with the following script:
1 Source bundle was generated with the following script:
2
2
3 # hg init
3 # hg init
4 # echo a > a
4 # echo a > a
5 # ln -s a l
5 # ln -s a l
6 # hg ci -Ama -d'0 0'
6 # hg ci -Ama -d'0 0'
7 # mkdir b
7 # mkdir b
8 # echo a > b/a
8 # echo a > b/a
9 # chmod +x b/a
9 # chmod +x b/a
10 # hg ci -Amb -d'1 0'
10 # hg ci -Amb -d'1 0'
11
11
12 $ hg init
12 $ hg init
13 $ hg unbundle "$TESTDIR/bundles/test-manifest.hg"
13 $ hg unbundle "$TESTDIR/bundles/test-manifest.hg"
14 adding changesets
14 adding changesets
15 adding manifests
15 adding manifests
16 adding file changes
16 adding file changes
17 added 2 changesets with 3 changes to 3 files
17 added 2 changesets with 3 changes to 3 files
18 new changesets b73562a03cfe:5bdc995175ba (2 drafts)
18 new changesets b73562a03cfe:5bdc995175ba (2 drafts)
19 (run 'hg update' to get a working copy)
19 (run 'hg update' to get a working copy)
20
20
21 The next call is expected to return nothing:
21 The next call is expected to return nothing:
22
22
23 $ hg manifest
23 $ hg manifest
24
24
25 $ hg co
25 $ hg co
26 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
27
27
28 $ hg manifest
28 $ hg manifest
29 a
29 a
30 b/a
30 b/a
31 l
31 l
32
32
33 $ hg files -vr .
33 $ hg files -vr .
34 2 a
34 2 a
35 2 x b/a
35 2 x b/a
36 1 l l
36 1 l l
37 $ hg files -r . -X b
37 $ hg files -r . -X b
38 a
38 a
39 l
39 l
40 $ hg files -T '{path} {size} {flags}\n'
40 $ hg files -T '{path} {size} {flags}\n'
41 a 2
41 a 2
42 b/a 2 x
42 b/a 2 x
43 l 1 l
43 l 1 l
44 $ hg files -T '{path} {node|shortest}\n' -r.
44 $ hg files -T '{path} {node|shortest}\n' -r.
45 a 5bdc
45 a 5bdc
46 b/a 5bdc
46 b/a 5bdc
47 l 5bdc
47 l 5bdc
48
48
49 $ hg manifest -v
49 $ hg manifest -v
50 644 a
50 644 a
51 755 * b/a
51 755 * b/a
52 644 @ l
52 644 @ l
53 $ hg manifest -T '{path} {rev}\n'
53 $ hg manifest -T '{path} {rev}\n'
54 a 1
54 a 1
55 b/a 1
55 b/a 1
56 l 1
56 l 1
57
57
58 $ hg manifest --debug
58 $ hg manifest --debug
59 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 a
59 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 644 a
60 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 755 * b/a
60 b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3 755 * b/a
61 047b75c6d7a3ef6a2243bd0e99f94f6ea6683597 644 @ l
61 047b75c6d7a3ef6a2243bd0e99f94f6ea6683597 644 @ l
62
62
63 $ hg manifest -r 0
63 $ hg manifest -r 0
64 a
64 a
65 l
65 l
66
66
67 $ hg manifest -r 1
67 $ hg manifest -r 1
68 a
68 a
69 b/a
69 b/a
70 l
70 l
71
71
72 $ hg manifest -r tip
72 $ hg manifest -r tip
73 a
73 a
74 b/a
74 b/a
75 l
75 l
76
76
77 $ hg manifest tip
77 $ hg manifest tip
78 a
78 a
79 b/a
79 b/a
80 l
80 l
81
81
82 $ hg manifest --all
82 $ hg manifest --all
83 a
83 a
84 b/a
84 b/a
85 l
85 l
86
86
87 The next two calls are expected to abort:
87 The next two calls are expected to abort:
88
88
89 $ hg manifest -r 2
89 $ hg manifest -r 2
90 abort: unknown revision '2'!
90 abort: unknown revision '2'!
91 [255]
91 [255]
92
92
93 $ hg manifest -r tip tip
93 $ hg manifest -r tip tip
94 abort: please specify just one revision
94 abort: please specify just one revision
95 [255]
95 [255]
96
96
97 Testing the manifest full text cache utility
97 Testing the manifest full text cache utility
98 --------------------------------------------
98 --------------------------------------------
99
99
100 Reminder of the manifest log content
100 Reminder of the manifest log content
101
101
102 $ hg log --debug | grep 'manifest:'
102 $ hg log --debug | grep 'manifest:'
103 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
103 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
104 manifest: 0:fce2a30dedad1eef4da95ca1dc0004157aa527cf
104 manifest: 0:fce2a30dedad1eef4da95ca1dc0004157aa527cf
105
105
106 Showing the content of the caches after the above operations
106 Showing the content of the caches after the above operations
107
107
108 $ hg debugmanifestfulltextcache
108 $ hg debugmanifestfulltextcache
109 cache empty
109 cache contains 1 manifest entries, in order of most to least recent:
110 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
111 total cache data size 157 bytes, on-disk 157 bytes
110
112
111 (Clearing the cache in case of any content)
113 (Clearing the cache in case of any content)
112
114
113 $ hg debugmanifestfulltextcache --clear
115 $ hg debugmanifestfulltextcache --clear
114
116
115 Adding a new persistent entry in the cache
117 Adding a new persistent entry in the cache
116
118
117 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
119 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
118
120
119 $ hg debugmanifestfulltextcache
121 $ hg debugmanifestfulltextcache
120 cache contains 1 manifest entries, in order of most to least recent:
122 cache contains 1 manifest entries, in order of most to least recent:
121 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
123 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
122 total cache data size 157 bytes, on-disk 157 bytes
124 total cache data size 157 bytes, on-disk 157 bytes
123
125
124 Check we don't duplicated entry (added from the debug command)
126 Check we don't duplicated entry (added from the debug command)
125
127
126 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
128 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
127 $ hg debugmanifestfulltextcache
129 $ hg debugmanifestfulltextcache
128 cache contains 1 manifest entries, in order of most to least recent:
130 cache contains 1 manifest entries, in order of most to least recent:
129 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
131 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
130 total cache data size 157 bytes, on-disk 157 bytes
132 total cache data size 157 bytes, on-disk 157 bytes
131
133
132 Adding a second entry
134 Adding a second entry
133
135
134 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf
136 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf
135 $ hg debugmanifestfulltextcache
137 $ hg debugmanifestfulltextcache
136 cache contains 2 manifest entries, in order of most to least recent:
138 cache contains 2 manifest entries, in order of most to least recent:
137 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
139 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
138 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
140 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
139 total cache data size 268 bytes, on-disk 268 bytes
141 total cache data size 268 bytes, on-disk 268 bytes
140
142
141 Accessing the initial entry again, refresh their order
143 Accessing the initial entry again, refresh their order
142
144
143 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
145 $ hg debugmanifestfulltextcache --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
144 $ hg debugmanifestfulltextcache
146 $ hg debugmanifestfulltextcache
145 cache contains 2 manifest entries, in order of most to least recent:
147 cache contains 2 manifest entries, in order of most to least recent:
146 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
148 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
147 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
149 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
148 total cache data size 268 bytes, on-disk 268 bytes
150 total cache data size 268 bytes, on-disk 268 bytes
149
151
150 Check cache clearing
152 Check cache clearing
151
153
152 $ hg debugmanifestfulltextcache --clear
154 $ hg debugmanifestfulltextcache --clear
153 $ hg debugmanifestfulltextcache
155 $ hg debugmanifestfulltextcache
154 cache empty
156 cache empty
155
157
156 Check adding multiple entry in one go:
158 Check adding multiple entry in one go:
157
159
158 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
160 $ hg debugmanifestfulltextcache --add fce2a30dedad1eef4da95ca1dc0004157aa527cf --add 1e01206b1d2f72bd55f2a33fa8ccad74144825b7
159 $ hg debugmanifestfulltextcache
161 $ hg debugmanifestfulltextcache
160 cache contains 2 manifest entries, in order of most to least recent:
162 cache contains 2 manifest entries, in order of most to least recent:
161 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
163 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
162 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
164 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
163 total cache data size 268 bytes, on-disk 268 bytes
165 total cache data size 268 bytes, on-disk 268 bytes
164 $ hg debugmanifestfulltextcache --clear
166 $ hg debugmanifestfulltextcache --clear
165
167
166 Test caching behavior on actual operation
168 Test caching behavior on actual operation
167 -----------------------------------------
169 -----------------------------------------
168
170
169 Make sure we start empty
171 Make sure we start empty
170
172
171 $ hg debugmanifestfulltextcache
173 $ hg debugmanifestfulltextcache
172 cache empty
174 cache empty
173
175
174 Commit should have the new node cached:
176 Commit should have the new node cached:
175
177
176 $ echo a >> b/a
178 $ echo a >> b/a
177 $ hg commit -m 'foo'
179 $ hg commit -m 'foo'
178 $ hg debugmanifestfulltextcache
180 $ hg debugmanifestfulltextcache
179 cache contains 2 manifest entries, in order of most to least recent:
181 cache contains 2 manifest entries, in order of most to least recent:
180 id: 26b8653b67af8c1a0a0317c4ee8dac50a41fdb65, size 133 bytes
182 id: 26b8653b67af8c1a0a0317c4ee8dac50a41fdb65, size 133 bytes
181 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
183 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
182 total cache data size 314 bytes, on-disk 314 bytes
184 total cache data size 314 bytes, on-disk 314 bytes
183 $ hg log -r 'ancestors(., 1)' --debug | grep 'manifest:'
185 $ hg log -r 'ancestors(., 1)' --debug | grep 'manifest:'
184 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
186 manifest: 1:1e01206b1d2f72bd55f2a33fa8ccad74144825b7
185 manifest: 2:26b8653b67af8c1a0a0317c4ee8dac50a41fdb65
187 manifest: 2:26b8653b67af8c1a0a0317c4ee8dac50a41fdb65
188
189 hg update should warm the cache too
190
191 (force dirstate check to avoid flackiness in manifest order)
192 $ hg debugrebuilddirstate
193
194 $ hg update 0
195 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
196 $ hg debugmanifestfulltextcache
197 cache contains 3 manifest entries, in order of most to least recent:
198 id: fce2a30dedad1eef4da95ca1dc0004157aa527cf, size 87 bytes
199 id: 26b8653b67af8c1a0a0317c4ee8dac50a41fdb65, size 133 bytes
200 id: 1e01206b1d2f72bd55f2a33fa8ccad74144825b7, size 133 bytes
201 total cache data size 425 bytes, on-disk 425 bytes
202 $ hg log -r '0' --debug | grep 'manifest:'
203 manifest: 0:fce2a30dedad1eef4da95ca1dc0004157aa527cf
General Comments 0
You need to be logged in to leave comments. Login now