##// END OF EJS Templates
revlog: rename _chunkraw to _getsegmentforrevs()...
Gregory Szorc -
r32224:75e93d95 default
parent child Browse files
Show More
@@ -1,1310 +1,1321 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import gc
23 import gc
24 import os
24 import os
25 import random
25 import random
26 import sys
26 import sys
27 import time
27 import time
28 from mercurial import (
28 from mercurial import (
29 changegroup,
29 changegroup,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 error,
33 error,
34 extensions,
34 extensions,
35 mdiff,
35 mdiff,
36 merge,
36 merge,
37 util,
37 util,
38 )
38 )
39
39
40 # for "historical portability":
40 # for "historical portability":
41 # try to import modules separately (in dict order), and ignore
41 # try to import modules separately (in dict order), and ignore
42 # failure, because these aren't available with early Mercurial
42 # failure, because these aren't available with early Mercurial
43 try:
43 try:
44 from mercurial import branchmap # since 2.5 (or bcee63733aad)
44 from mercurial import branchmap # since 2.5 (or bcee63733aad)
45 except ImportError:
45 except ImportError:
46 pass
46 pass
47 try:
47 try:
48 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
48 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
49 except ImportError:
49 except ImportError:
50 pass
50 pass
51 try:
51 try:
52 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
52 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
53 except ImportError:
53 except ImportError:
54 pass
54 pass
55 try:
55 try:
56 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
56 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
57 except ImportError:
57 except ImportError:
58 pass
58 pass
59
59
60 # for "historical portability":
60 # for "historical portability":
61 # define util.safehasattr forcibly, because util.safehasattr has been
61 # define util.safehasattr forcibly, because util.safehasattr has been
62 # available since 1.9.3 (or 94b200a11cf7)
62 # available since 1.9.3 (or 94b200a11cf7)
63 _undefined = object()
63 _undefined = object()
64 def safehasattr(thing, attr):
64 def safehasattr(thing, attr):
65 return getattr(thing, attr, _undefined) is not _undefined
65 return getattr(thing, attr, _undefined) is not _undefined
66 setattr(util, 'safehasattr', safehasattr)
66 setattr(util, 'safehasattr', safehasattr)
67
67
68 # for "historical portability":
68 # for "historical portability":
69 # define util.timer forcibly, because util.timer has been available
69 # define util.timer forcibly, because util.timer has been available
70 # since ae5d60bb70c9
70 # since ae5d60bb70c9
71 if safehasattr(time, 'perf_counter'):
71 if safehasattr(time, 'perf_counter'):
72 util.timer = time.perf_counter
72 util.timer = time.perf_counter
73 elif os.name == 'nt':
73 elif os.name == 'nt':
74 util.timer = time.clock
74 util.timer = time.clock
75 else:
75 else:
76 util.timer = time.time
76 util.timer = time.time
77
77
78 # for "historical portability":
78 # for "historical portability":
79 # use locally defined empty option list, if formatteropts isn't
79 # use locally defined empty option list, if formatteropts isn't
80 # available, because commands.formatteropts has been available since
80 # available, because commands.formatteropts has been available since
81 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
81 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
82 # available since 2.2 (or ae5f92e154d3)
82 # available since 2.2 (or ae5f92e154d3)
83 formatteropts = getattr(commands, "formatteropts", [])
83 formatteropts = getattr(commands, "formatteropts", [])
84
84
85 # for "historical portability":
85 # for "historical portability":
86 # use locally defined option list, if debugrevlogopts isn't available,
86 # use locally defined option list, if debugrevlogopts isn't available,
87 # because commands.debugrevlogopts has been available since 3.7 (or
87 # because commands.debugrevlogopts has been available since 3.7 (or
88 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
88 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
89 # since 1.9 (or a79fea6b3e77).
89 # since 1.9 (or a79fea6b3e77).
90 revlogopts = getattr(commands, "debugrevlogopts", [
90 revlogopts = getattr(commands, "debugrevlogopts", [
91 ('c', 'changelog', False, ('open changelog')),
91 ('c', 'changelog', False, ('open changelog')),
92 ('m', 'manifest', False, ('open manifest')),
92 ('m', 'manifest', False, ('open manifest')),
93 ('', 'dir', False, ('open directory manifest')),
93 ('', 'dir', False, ('open directory manifest')),
94 ])
94 ])
95
95
96 cmdtable = {}
96 cmdtable = {}
97
97
98 # for "historical portability":
98 # for "historical portability":
99 # define parsealiases locally, because cmdutil.parsealiases has been
99 # define parsealiases locally, because cmdutil.parsealiases has been
100 # available since 1.5 (or 6252852b4332)
100 # available since 1.5 (or 6252852b4332)
101 def parsealiases(cmd):
101 def parsealiases(cmd):
102 return cmd.lstrip("^").split("|")
102 return cmd.lstrip("^").split("|")
103
103
104 if safehasattr(cmdutil, 'command'):
104 if safehasattr(cmdutil, 'command'):
105 import inspect
105 import inspect
106 command = cmdutil.command(cmdtable)
106 command = cmdutil.command(cmdtable)
107 if 'norepo' not in inspect.getargspec(command)[0]:
107 if 'norepo' not in inspect.getargspec(command)[0]:
108 # for "historical portability":
108 # for "historical portability":
109 # wrap original cmdutil.command, because "norepo" option has
109 # wrap original cmdutil.command, because "norepo" option has
110 # been available since 3.1 (or 75a96326cecb)
110 # been available since 3.1 (or 75a96326cecb)
111 _command = command
111 _command = command
112 def command(name, options=(), synopsis=None, norepo=False):
112 def command(name, options=(), synopsis=None, norepo=False):
113 if norepo:
113 if norepo:
114 commands.norepo += ' %s' % ' '.join(parsealiases(name))
114 commands.norepo += ' %s' % ' '.join(parsealiases(name))
115 return _command(name, list(options), synopsis)
115 return _command(name, list(options), synopsis)
116 else:
116 else:
117 # for "historical portability":
117 # for "historical portability":
118 # define "@command" annotation locally, because cmdutil.command
118 # define "@command" annotation locally, because cmdutil.command
119 # has been available since 1.9 (or 2daa5179e73f)
119 # has been available since 1.9 (or 2daa5179e73f)
120 def command(name, options=(), synopsis=None, norepo=False):
120 def command(name, options=(), synopsis=None, norepo=False):
121 def decorator(func):
121 def decorator(func):
122 if synopsis:
122 if synopsis:
123 cmdtable[name] = func, list(options), synopsis
123 cmdtable[name] = func, list(options), synopsis
124 else:
124 else:
125 cmdtable[name] = func, list(options)
125 cmdtable[name] = func, list(options)
126 if norepo:
126 if norepo:
127 commands.norepo += ' %s' % ' '.join(parsealiases(name))
127 commands.norepo += ' %s' % ' '.join(parsealiases(name))
128 return func
128 return func
129 return decorator
129 return decorator
130
130
131 def getlen(ui):
131 def getlen(ui):
132 if ui.configbool("perf", "stub"):
132 if ui.configbool("perf", "stub"):
133 return lambda x: 1
133 return lambda x: 1
134 return len
134 return len
135
135
136 def gettimer(ui, opts=None):
136 def gettimer(ui, opts=None):
137 """return a timer function and formatter: (timer, formatter)
137 """return a timer function and formatter: (timer, formatter)
138
138
139 This function exists to gather the creation of formatter in a single
139 This function exists to gather the creation of formatter in a single
140 place instead of duplicating it in all performance commands."""
140 place instead of duplicating it in all performance commands."""
141
141
142 # enforce an idle period before execution to counteract power management
142 # enforce an idle period before execution to counteract power management
143 # experimental config: perf.presleep
143 # experimental config: perf.presleep
144 time.sleep(getint(ui, "perf", "presleep", 1))
144 time.sleep(getint(ui, "perf", "presleep", 1))
145
145
146 if opts is None:
146 if opts is None:
147 opts = {}
147 opts = {}
148 # redirect all to stderr unless buffer api is in use
148 # redirect all to stderr unless buffer api is in use
149 if not ui._buffers:
149 if not ui._buffers:
150 ui = ui.copy()
150 ui = ui.copy()
151 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
151 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
152 if uifout:
152 if uifout:
153 # for "historical portability":
153 # for "historical portability":
154 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
154 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
155 uifout.set(ui.ferr)
155 uifout.set(ui.ferr)
156
156
157 # get a formatter
157 # get a formatter
158 uiformatter = getattr(ui, 'formatter', None)
158 uiformatter = getattr(ui, 'formatter', None)
159 if uiformatter:
159 if uiformatter:
160 fm = uiformatter('perf', opts)
160 fm = uiformatter('perf', opts)
161 else:
161 else:
162 # for "historical portability":
162 # for "historical portability":
163 # define formatter locally, because ui.formatter has been
163 # define formatter locally, because ui.formatter has been
164 # available since 2.2 (or ae5f92e154d3)
164 # available since 2.2 (or ae5f92e154d3)
165 from mercurial import node
165 from mercurial import node
166 class defaultformatter(object):
166 class defaultformatter(object):
167 """Minimized composition of baseformatter and plainformatter
167 """Minimized composition of baseformatter and plainformatter
168 """
168 """
169 def __init__(self, ui, topic, opts):
169 def __init__(self, ui, topic, opts):
170 self._ui = ui
170 self._ui = ui
171 if ui.debugflag:
171 if ui.debugflag:
172 self.hexfunc = node.hex
172 self.hexfunc = node.hex
173 else:
173 else:
174 self.hexfunc = node.short
174 self.hexfunc = node.short
175 def __nonzero__(self):
175 def __nonzero__(self):
176 return False
176 return False
177 __bool__ = __nonzero__
177 __bool__ = __nonzero__
178 def startitem(self):
178 def startitem(self):
179 pass
179 pass
180 def data(self, **data):
180 def data(self, **data):
181 pass
181 pass
182 def write(self, fields, deftext, *fielddata, **opts):
182 def write(self, fields, deftext, *fielddata, **opts):
183 self._ui.write(deftext % fielddata, **opts)
183 self._ui.write(deftext % fielddata, **opts)
184 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
184 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
185 if cond:
185 if cond:
186 self._ui.write(deftext % fielddata, **opts)
186 self._ui.write(deftext % fielddata, **opts)
187 def plain(self, text, **opts):
187 def plain(self, text, **opts):
188 self._ui.write(text, **opts)
188 self._ui.write(text, **opts)
189 def end(self):
189 def end(self):
190 pass
190 pass
191 fm = defaultformatter(ui, 'perf', opts)
191 fm = defaultformatter(ui, 'perf', opts)
192
192
193 # stub function, runs code only once instead of in a loop
193 # stub function, runs code only once instead of in a loop
194 # experimental config: perf.stub
194 # experimental config: perf.stub
195 if ui.configbool("perf", "stub"):
195 if ui.configbool("perf", "stub"):
196 return functools.partial(stub_timer, fm), fm
196 return functools.partial(stub_timer, fm), fm
197 return functools.partial(_timer, fm), fm
197 return functools.partial(_timer, fm), fm
198
198
199 def stub_timer(fm, func, title=None):
199 def stub_timer(fm, func, title=None):
200 func()
200 func()
201
201
202 def _timer(fm, func, title=None):
202 def _timer(fm, func, title=None):
203 gc.collect()
203 gc.collect()
204 results = []
204 results = []
205 begin = util.timer()
205 begin = util.timer()
206 count = 0
206 count = 0
207 while True:
207 while True:
208 ostart = os.times()
208 ostart = os.times()
209 cstart = util.timer()
209 cstart = util.timer()
210 r = func()
210 r = func()
211 cstop = util.timer()
211 cstop = util.timer()
212 ostop = os.times()
212 ostop = os.times()
213 count += 1
213 count += 1
214 a, b = ostart, ostop
214 a, b = ostart, ostop
215 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
215 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
216 if cstop - begin > 3 and count >= 100:
216 if cstop - begin > 3 and count >= 100:
217 break
217 break
218 if cstop - begin > 10 and count >= 3:
218 if cstop - begin > 10 and count >= 3:
219 break
219 break
220
220
221 fm.startitem()
221 fm.startitem()
222
222
223 if title:
223 if title:
224 fm.write('title', '! %s\n', title)
224 fm.write('title', '! %s\n', title)
225 if r:
225 if r:
226 fm.write('result', '! result: %s\n', r)
226 fm.write('result', '! result: %s\n', r)
227 m = min(results)
227 m = min(results)
228 fm.plain('!')
228 fm.plain('!')
229 fm.write('wall', ' wall %f', m[0])
229 fm.write('wall', ' wall %f', m[0])
230 fm.write('comb', ' comb %f', m[1] + m[2])
230 fm.write('comb', ' comb %f', m[1] + m[2])
231 fm.write('user', ' user %f', m[1])
231 fm.write('user', ' user %f', m[1])
232 fm.write('sys', ' sys %f', m[2])
232 fm.write('sys', ' sys %f', m[2])
233 fm.write('count', ' (best of %d)', count)
233 fm.write('count', ' (best of %d)', count)
234 fm.plain('\n')
234 fm.plain('\n')
235
235
236 # utilities for historical portability
236 # utilities for historical portability
237
237
238 def getint(ui, section, name, default):
238 def getint(ui, section, name, default):
239 # for "historical portability":
239 # for "historical portability":
240 # ui.configint has been available since 1.9 (or fa2b596db182)
240 # ui.configint has been available since 1.9 (or fa2b596db182)
241 v = ui.config(section, name, None)
241 v = ui.config(section, name, None)
242 if v is None:
242 if v is None:
243 return default
243 return default
244 try:
244 try:
245 return int(v)
245 return int(v)
246 except ValueError:
246 except ValueError:
247 raise error.ConfigError(("%s.%s is not an integer ('%s')")
247 raise error.ConfigError(("%s.%s is not an integer ('%s')")
248 % (section, name, v))
248 % (section, name, v))
249
249
250 def safeattrsetter(obj, name, ignoremissing=False):
250 def safeattrsetter(obj, name, ignoremissing=False):
251 """Ensure that 'obj' has 'name' attribute before subsequent setattr
251 """Ensure that 'obj' has 'name' attribute before subsequent setattr
252
252
253 This function is aborted, if 'obj' doesn't have 'name' attribute
253 This function is aborted, if 'obj' doesn't have 'name' attribute
254 at runtime. This avoids overlooking removal of an attribute, which
254 at runtime. This avoids overlooking removal of an attribute, which
255 breaks assumption of performance measurement, in the future.
255 breaks assumption of performance measurement, in the future.
256
256
257 This function returns the object to (1) assign a new value, and
257 This function returns the object to (1) assign a new value, and
258 (2) restore an original value to the attribute.
258 (2) restore an original value to the attribute.
259
259
260 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
260 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
261 abortion, and this function returns None. This is useful to
261 abortion, and this function returns None. This is useful to
262 examine an attribute, which isn't ensured in all Mercurial
262 examine an attribute, which isn't ensured in all Mercurial
263 versions.
263 versions.
264 """
264 """
265 if not util.safehasattr(obj, name):
265 if not util.safehasattr(obj, name):
266 if ignoremissing:
266 if ignoremissing:
267 return None
267 return None
268 raise error.Abort(("missing attribute %s of %s might break assumption"
268 raise error.Abort(("missing attribute %s of %s might break assumption"
269 " of performance measurement") % (name, obj))
269 " of performance measurement") % (name, obj))
270
270
271 origvalue = getattr(obj, name)
271 origvalue = getattr(obj, name)
272 class attrutil(object):
272 class attrutil(object):
273 def set(self, newvalue):
273 def set(self, newvalue):
274 setattr(obj, name, newvalue)
274 setattr(obj, name, newvalue)
275 def restore(self):
275 def restore(self):
276 setattr(obj, name, origvalue)
276 setattr(obj, name, origvalue)
277
277
278 return attrutil()
278 return attrutil()
279
279
280 # utilities to examine each internal API changes
280 # utilities to examine each internal API changes
281
281
282 def getbranchmapsubsettable():
282 def getbranchmapsubsettable():
283 # for "historical portability":
283 # for "historical portability":
284 # subsettable is defined in:
284 # subsettable is defined in:
285 # - branchmap since 2.9 (or 175c6fd8cacc)
285 # - branchmap since 2.9 (or 175c6fd8cacc)
286 # - repoview since 2.5 (or 59a9f18d4587)
286 # - repoview since 2.5 (or 59a9f18d4587)
287 for mod in (branchmap, repoview):
287 for mod in (branchmap, repoview):
288 subsettable = getattr(mod, 'subsettable', None)
288 subsettable = getattr(mod, 'subsettable', None)
289 if subsettable:
289 if subsettable:
290 return subsettable
290 return subsettable
291
291
292 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
292 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
293 # branchmap and repoview modules exist, but subsettable attribute
293 # branchmap and repoview modules exist, but subsettable attribute
294 # doesn't)
294 # doesn't)
295 raise error.Abort(("perfbranchmap not available with this Mercurial"),
295 raise error.Abort(("perfbranchmap not available with this Mercurial"),
296 hint="use 2.5 or later")
296 hint="use 2.5 or later")
297
297
298 def getsvfs(repo):
298 def getsvfs(repo):
299 """Return appropriate object to access files under .hg/store
299 """Return appropriate object to access files under .hg/store
300 """
300 """
301 # for "historical portability":
301 # for "historical portability":
302 # repo.svfs has been available since 2.3 (or 7034365089bf)
302 # repo.svfs has been available since 2.3 (or 7034365089bf)
303 svfs = getattr(repo, 'svfs', None)
303 svfs = getattr(repo, 'svfs', None)
304 if svfs:
304 if svfs:
305 return svfs
305 return svfs
306 else:
306 else:
307 return getattr(repo, 'sopener')
307 return getattr(repo, 'sopener')
308
308
309 def getvfs(repo):
309 def getvfs(repo):
310 """Return appropriate object to access files under .hg
310 """Return appropriate object to access files under .hg
311 """
311 """
312 # for "historical portability":
312 # for "historical portability":
313 # repo.vfs has been available since 2.3 (or 7034365089bf)
313 # repo.vfs has been available since 2.3 (or 7034365089bf)
314 vfs = getattr(repo, 'vfs', None)
314 vfs = getattr(repo, 'vfs', None)
315 if vfs:
315 if vfs:
316 return vfs
316 return vfs
317 else:
317 else:
318 return getattr(repo, 'opener')
318 return getattr(repo, 'opener')
319
319
320 def repocleartagscachefunc(repo):
320 def repocleartagscachefunc(repo):
321 """Return the function to clear tags cache according to repo internal API
321 """Return the function to clear tags cache according to repo internal API
322 """
322 """
323 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
323 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
324 # in this case, setattr(repo, '_tagscache', None) or so isn't
324 # in this case, setattr(repo, '_tagscache', None) or so isn't
325 # correct way to clear tags cache, because existing code paths
325 # correct way to clear tags cache, because existing code paths
326 # expect _tagscache to be a structured object.
326 # expect _tagscache to be a structured object.
327 def clearcache():
327 def clearcache():
328 # _tagscache has been filteredpropertycache since 2.5 (or
328 # _tagscache has been filteredpropertycache since 2.5 (or
329 # 98c867ac1330), and delattr() can't work in such case
329 # 98c867ac1330), and delattr() can't work in such case
330 if '_tagscache' in vars(repo):
330 if '_tagscache' in vars(repo):
331 del repo.__dict__['_tagscache']
331 del repo.__dict__['_tagscache']
332 return clearcache
332 return clearcache
333
333
334 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
334 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
335 if repotags: # since 1.4 (or 5614a628d173)
335 if repotags: # since 1.4 (or 5614a628d173)
336 return lambda : repotags.set(None)
336 return lambda : repotags.set(None)
337
337
338 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
338 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
339 if repotagscache: # since 0.6 (or d7df759d0e97)
339 if repotagscache: # since 0.6 (or d7df759d0e97)
340 return lambda : repotagscache.set(None)
340 return lambda : repotagscache.set(None)
341
341
342 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
342 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
343 # this point, but it isn't so problematic, because:
343 # this point, but it isn't so problematic, because:
344 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
344 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
345 # in perftags() causes failure soon
345 # in perftags() causes failure soon
346 # - perf.py itself has been available since 1.1 (or eb240755386d)
346 # - perf.py itself has been available since 1.1 (or eb240755386d)
347 raise error.Abort(("tags API of this hg command is unknown"))
347 raise error.Abort(("tags API of this hg command is unknown"))
348
348
349 # perf commands
349 # perf commands
350
350
351 @command('perfwalk', formatteropts)
351 @command('perfwalk', formatteropts)
352 def perfwalk(ui, repo, *pats, **opts):
352 def perfwalk(ui, repo, *pats, **opts):
353 timer, fm = gettimer(ui, opts)
353 timer, fm = gettimer(ui, opts)
354 try:
354 try:
355 m = scmutil.match(repo[None], pats, {})
355 m = scmutil.match(repo[None], pats, {})
356 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
356 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
357 except Exception:
357 except Exception:
358 try:
358 try:
359 m = scmutil.match(repo[None], pats, {})
359 m = scmutil.match(repo[None], pats, {})
360 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
360 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
361 except Exception:
361 except Exception:
362 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
362 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
363 fm.end()
363 fm.end()
364
364
365 @command('perfannotate', formatteropts)
365 @command('perfannotate', formatteropts)
366 def perfannotate(ui, repo, f, **opts):
366 def perfannotate(ui, repo, f, **opts):
367 timer, fm = gettimer(ui, opts)
367 timer, fm = gettimer(ui, opts)
368 fc = repo['.'][f]
368 fc = repo['.'][f]
369 timer(lambda: len(fc.annotate(True)))
369 timer(lambda: len(fc.annotate(True)))
370 fm.end()
370 fm.end()
371
371
372 @command('perfstatus',
372 @command('perfstatus',
373 [('u', 'unknown', False,
373 [('u', 'unknown', False,
374 'ask status to look for unknown files')] + formatteropts)
374 'ask status to look for unknown files')] + formatteropts)
375 def perfstatus(ui, repo, **opts):
375 def perfstatus(ui, repo, **opts):
376 #m = match.always(repo.root, repo.getcwd())
376 #m = match.always(repo.root, repo.getcwd())
377 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
377 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
378 # False))))
378 # False))))
379 timer, fm = gettimer(ui, opts)
379 timer, fm = gettimer(ui, opts)
380 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
380 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
381 fm.end()
381 fm.end()
382
382
383 @command('perfaddremove', formatteropts)
383 @command('perfaddremove', formatteropts)
384 def perfaddremove(ui, repo, **opts):
384 def perfaddremove(ui, repo, **opts):
385 timer, fm = gettimer(ui, opts)
385 timer, fm = gettimer(ui, opts)
386 try:
386 try:
387 oldquiet = repo.ui.quiet
387 oldquiet = repo.ui.quiet
388 repo.ui.quiet = True
388 repo.ui.quiet = True
389 matcher = scmutil.match(repo[None])
389 matcher = scmutil.match(repo[None])
390 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
390 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
391 finally:
391 finally:
392 repo.ui.quiet = oldquiet
392 repo.ui.quiet = oldquiet
393 fm.end()
393 fm.end()
394
394
395 def clearcaches(cl):
395 def clearcaches(cl):
396 # behave somewhat consistently across internal API changes
396 # behave somewhat consistently across internal API changes
397 if util.safehasattr(cl, 'clearcaches'):
397 if util.safehasattr(cl, 'clearcaches'):
398 cl.clearcaches()
398 cl.clearcaches()
399 elif util.safehasattr(cl, '_nodecache'):
399 elif util.safehasattr(cl, '_nodecache'):
400 from mercurial.node import nullid, nullrev
400 from mercurial.node import nullid, nullrev
401 cl._nodecache = {nullid: nullrev}
401 cl._nodecache = {nullid: nullrev}
402 cl._nodepos = None
402 cl._nodepos = None
403
403
404 @command('perfheads', formatteropts)
404 @command('perfheads', formatteropts)
405 def perfheads(ui, repo, **opts):
405 def perfheads(ui, repo, **opts):
406 timer, fm = gettimer(ui, opts)
406 timer, fm = gettimer(ui, opts)
407 cl = repo.changelog
407 cl = repo.changelog
408 def d():
408 def d():
409 len(cl.headrevs())
409 len(cl.headrevs())
410 clearcaches(cl)
410 clearcaches(cl)
411 timer(d)
411 timer(d)
412 fm.end()
412 fm.end()
413
413
414 @command('perftags', formatteropts)
414 @command('perftags', formatteropts)
415 def perftags(ui, repo, **opts):
415 def perftags(ui, repo, **opts):
416 import mercurial.changelog
416 import mercurial.changelog
417 import mercurial.manifest
417 import mercurial.manifest
418 timer, fm = gettimer(ui, opts)
418 timer, fm = gettimer(ui, opts)
419 svfs = getsvfs(repo)
419 svfs = getsvfs(repo)
420 repocleartagscache = repocleartagscachefunc(repo)
420 repocleartagscache = repocleartagscachefunc(repo)
421 def t():
421 def t():
422 repo.changelog = mercurial.changelog.changelog(svfs)
422 repo.changelog = mercurial.changelog.changelog(svfs)
423 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
423 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
424 repocleartagscache()
424 repocleartagscache()
425 return len(repo.tags())
425 return len(repo.tags())
426 timer(t)
426 timer(t)
427 fm.end()
427 fm.end()
428
428
429 @command('perfancestors', formatteropts)
429 @command('perfancestors', formatteropts)
430 def perfancestors(ui, repo, **opts):
430 def perfancestors(ui, repo, **opts):
431 timer, fm = gettimer(ui, opts)
431 timer, fm = gettimer(ui, opts)
432 heads = repo.changelog.headrevs()
432 heads = repo.changelog.headrevs()
433 def d():
433 def d():
434 for a in repo.changelog.ancestors(heads):
434 for a in repo.changelog.ancestors(heads):
435 pass
435 pass
436 timer(d)
436 timer(d)
437 fm.end()
437 fm.end()
438
438
439 @command('perfancestorset', formatteropts)
439 @command('perfancestorset', formatteropts)
440 def perfancestorset(ui, repo, revset, **opts):
440 def perfancestorset(ui, repo, revset, **opts):
441 timer, fm = gettimer(ui, opts)
441 timer, fm = gettimer(ui, opts)
442 revs = repo.revs(revset)
442 revs = repo.revs(revset)
443 heads = repo.changelog.headrevs()
443 heads = repo.changelog.headrevs()
444 def d():
444 def d():
445 s = repo.changelog.ancestors(heads)
445 s = repo.changelog.ancestors(heads)
446 for rev in revs:
446 for rev in revs:
447 rev in s
447 rev in s
448 timer(d)
448 timer(d)
449 fm.end()
449 fm.end()
450
450
451 @command('perfchangegroupchangelog', formatteropts +
451 @command('perfchangegroupchangelog', formatteropts +
452 [('', 'version', '02', 'changegroup version'),
452 [('', 'version', '02', 'changegroup version'),
453 ('r', 'rev', '', 'revisions to add to changegroup')])
453 ('r', 'rev', '', 'revisions to add to changegroup')])
454 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
454 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
455 """Benchmark producing a changelog group for a changegroup.
455 """Benchmark producing a changelog group for a changegroup.
456
456
457 This measures the time spent processing the changelog during a
457 This measures the time spent processing the changelog during a
458 bundle operation. This occurs during `hg bundle` and on a server
458 bundle operation. This occurs during `hg bundle` and on a server
459 processing a `getbundle` wire protocol request (handles clones
459 processing a `getbundle` wire protocol request (handles clones
460 and pull requests).
460 and pull requests).
461
461
462 By default, all revisions are added to the changegroup.
462 By default, all revisions are added to the changegroup.
463 """
463 """
464 cl = repo.changelog
464 cl = repo.changelog
465 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
465 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
466 bundler = changegroup.getbundler(version, repo)
466 bundler = changegroup.getbundler(version, repo)
467
467
468 def lookup(node):
468 def lookup(node):
469 # The real bundler reads the revision in order to access the
469 # The real bundler reads the revision in order to access the
470 # manifest node and files list. Do that here.
470 # manifest node and files list. Do that here.
471 cl.read(node)
471 cl.read(node)
472 return node
472 return node
473
473
474 def d():
474 def d():
475 for chunk in bundler.group(revs, cl, lookup):
475 for chunk in bundler.group(revs, cl, lookup):
476 pass
476 pass
477
477
478 timer, fm = gettimer(ui, opts)
478 timer, fm = gettimer(ui, opts)
479 timer(d)
479 timer(d)
480 fm.end()
480 fm.end()
481
481
482 @command('perfdirs', formatteropts)
482 @command('perfdirs', formatteropts)
483 def perfdirs(ui, repo, **opts):
483 def perfdirs(ui, repo, **opts):
484 timer, fm = gettimer(ui, opts)
484 timer, fm = gettimer(ui, opts)
485 dirstate = repo.dirstate
485 dirstate = repo.dirstate
486 'a' in dirstate
486 'a' in dirstate
487 def d():
487 def d():
488 dirstate.dirs()
488 dirstate.dirs()
489 del dirstate._dirs
489 del dirstate._dirs
490 timer(d)
490 timer(d)
491 fm.end()
491 fm.end()
492
492
493 @command('perfdirstate', formatteropts)
493 @command('perfdirstate', formatteropts)
494 def perfdirstate(ui, repo, **opts):
494 def perfdirstate(ui, repo, **opts):
495 timer, fm = gettimer(ui, opts)
495 timer, fm = gettimer(ui, opts)
496 "a" in repo.dirstate
496 "a" in repo.dirstate
497 def d():
497 def d():
498 repo.dirstate.invalidate()
498 repo.dirstate.invalidate()
499 "a" in repo.dirstate
499 "a" in repo.dirstate
500 timer(d)
500 timer(d)
501 fm.end()
501 fm.end()
502
502
503 @command('perfdirstatedirs', formatteropts)
503 @command('perfdirstatedirs', formatteropts)
504 def perfdirstatedirs(ui, repo, **opts):
504 def perfdirstatedirs(ui, repo, **opts):
505 timer, fm = gettimer(ui, opts)
505 timer, fm = gettimer(ui, opts)
506 "a" in repo.dirstate
506 "a" in repo.dirstate
507 def d():
507 def d():
508 "a" in repo.dirstate._dirs
508 "a" in repo.dirstate._dirs
509 del repo.dirstate._dirs
509 del repo.dirstate._dirs
510 timer(d)
510 timer(d)
511 fm.end()
511 fm.end()
512
512
513 @command('perfdirstatefoldmap', formatteropts)
513 @command('perfdirstatefoldmap', formatteropts)
514 def perfdirstatefoldmap(ui, repo, **opts):
514 def perfdirstatefoldmap(ui, repo, **opts):
515 timer, fm = gettimer(ui, opts)
515 timer, fm = gettimer(ui, opts)
516 dirstate = repo.dirstate
516 dirstate = repo.dirstate
517 'a' in dirstate
517 'a' in dirstate
518 def d():
518 def d():
519 dirstate._filefoldmap.get('a')
519 dirstate._filefoldmap.get('a')
520 del dirstate._filefoldmap
520 del dirstate._filefoldmap
521 timer(d)
521 timer(d)
522 fm.end()
522 fm.end()
523
523
524 @command('perfdirfoldmap', formatteropts)
524 @command('perfdirfoldmap', formatteropts)
525 def perfdirfoldmap(ui, repo, **opts):
525 def perfdirfoldmap(ui, repo, **opts):
526 timer, fm = gettimer(ui, opts)
526 timer, fm = gettimer(ui, opts)
527 dirstate = repo.dirstate
527 dirstate = repo.dirstate
528 'a' in dirstate
528 'a' in dirstate
529 def d():
529 def d():
530 dirstate._dirfoldmap.get('a')
530 dirstate._dirfoldmap.get('a')
531 del dirstate._dirfoldmap
531 del dirstate._dirfoldmap
532 del dirstate._dirs
532 del dirstate._dirs
533 timer(d)
533 timer(d)
534 fm.end()
534 fm.end()
535
535
536 @command('perfdirstatewrite', formatteropts)
536 @command('perfdirstatewrite', formatteropts)
537 def perfdirstatewrite(ui, repo, **opts):
537 def perfdirstatewrite(ui, repo, **opts):
538 timer, fm = gettimer(ui, opts)
538 timer, fm = gettimer(ui, opts)
539 ds = repo.dirstate
539 ds = repo.dirstate
540 "a" in ds
540 "a" in ds
541 def d():
541 def d():
542 ds._dirty = True
542 ds._dirty = True
543 ds.write(repo.currenttransaction())
543 ds.write(repo.currenttransaction())
544 timer(d)
544 timer(d)
545 fm.end()
545 fm.end()
546
546
547 @command('perfmergecalculate',
547 @command('perfmergecalculate',
548 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
548 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
549 def perfmergecalculate(ui, repo, rev, **opts):
549 def perfmergecalculate(ui, repo, rev, **opts):
550 timer, fm = gettimer(ui, opts)
550 timer, fm = gettimer(ui, opts)
551 wctx = repo[None]
551 wctx = repo[None]
552 rctx = scmutil.revsingle(repo, rev, rev)
552 rctx = scmutil.revsingle(repo, rev, rev)
553 ancestor = wctx.ancestor(rctx)
553 ancestor = wctx.ancestor(rctx)
554 # we don't want working dir files to be stat'd in the benchmark, so prime
554 # we don't want working dir files to be stat'd in the benchmark, so prime
555 # that cache
555 # that cache
556 wctx.dirty()
556 wctx.dirty()
557 def d():
557 def d():
558 # acceptremote is True because we don't want prompts in the middle of
558 # acceptremote is True because we don't want prompts in the middle of
559 # our benchmark
559 # our benchmark
560 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
560 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
561 acceptremote=True, followcopies=True)
561 acceptremote=True, followcopies=True)
562 timer(d)
562 timer(d)
563 fm.end()
563 fm.end()
564
564
565 @command('perfpathcopies', [], "REV REV")
565 @command('perfpathcopies', [], "REV REV")
566 def perfpathcopies(ui, repo, rev1, rev2, **opts):
566 def perfpathcopies(ui, repo, rev1, rev2, **opts):
567 timer, fm = gettimer(ui, opts)
567 timer, fm = gettimer(ui, opts)
568 ctx1 = scmutil.revsingle(repo, rev1, rev1)
568 ctx1 = scmutil.revsingle(repo, rev1, rev1)
569 ctx2 = scmutil.revsingle(repo, rev2, rev2)
569 ctx2 = scmutil.revsingle(repo, rev2, rev2)
570 def d():
570 def d():
571 copies.pathcopies(ctx1, ctx2)
571 copies.pathcopies(ctx1, ctx2)
572 timer(d)
572 timer(d)
573 fm.end()
573 fm.end()
574
574
575 @command('perfmanifest', [], 'REV')
575 @command('perfmanifest', [], 'REV')
576 def perfmanifest(ui, repo, rev, **opts):
576 def perfmanifest(ui, repo, rev, **opts):
577 timer, fm = gettimer(ui, opts)
577 timer, fm = gettimer(ui, opts)
578 ctx = scmutil.revsingle(repo, rev, rev)
578 ctx = scmutil.revsingle(repo, rev, rev)
579 t = ctx.manifestnode()
579 t = ctx.manifestnode()
580 def d():
580 def d():
581 repo.manifestlog.clearcaches()
581 repo.manifestlog.clearcaches()
582 repo.manifestlog[t].read()
582 repo.manifestlog[t].read()
583 timer(d)
583 timer(d)
584 fm.end()
584 fm.end()
585
585
586 @command('perfchangeset', formatteropts)
586 @command('perfchangeset', formatteropts)
587 def perfchangeset(ui, repo, rev, **opts):
587 def perfchangeset(ui, repo, rev, **opts):
588 timer, fm = gettimer(ui, opts)
588 timer, fm = gettimer(ui, opts)
589 n = repo[rev].node()
589 n = repo[rev].node()
590 def d():
590 def d():
591 repo.changelog.read(n)
591 repo.changelog.read(n)
592 #repo.changelog._cache = None
592 #repo.changelog._cache = None
593 timer(d)
593 timer(d)
594 fm.end()
594 fm.end()
595
595
596 @command('perfindex', formatteropts)
596 @command('perfindex', formatteropts)
597 def perfindex(ui, repo, **opts):
597 def perfindex(ui, repo, **opts):
598 import mercurial.revlog
598 import mercurial.revlog
599 timer, fm = gettimer(ui, opts)
599 timer, fm = gettimer(ui, opts)
600 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
600 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
601 n = repo["tip"].node()
601 n = repo["tip"].node()
602 svfs = getsvfs(repo)
602 svfs = getsvfs(repo)
603 def d():
603 def d():
604 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
604 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
605 cl.rev(n)
605 cl.rev(n)
606 timer(d)
606 timer(d)
607 fm.end()
607 fm.end()
608
608
609 @command('perfstartup', formatteropts)
609 @command('perfstartup', formatteropts)
610 def perfstartup(ui, repo, **opts):
610 def perfstartup(ui, repo, **opts):
611 timer, fm = gettimer(ui, opts)
611 timer, fm = gettimer(ui, opts)
612 cmd = sys.argv[0]
612 cmd = sys.argv[0]
613 def d():
613 def d():
614 if os.name != 'nt':
614 if os.name != 'nt':
615 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
615 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
616 else:
616 else:
617 os.environ['HGRCPATH'] = ''
617 os.environ['HGRCPATH'] = ''
618 os.system("%s version -q > NUL" % cmd)
618 os.system("%s version -q > NUL" % cmd)
619 timer(d)
619 timer(d)
620 fm.end()
620 fm.end()
621
621
622 @command('perfparents', formatteropts)
622 @command('perfparents', formatteropts)
623 def perfparents(ui, repo, **opts):
623 def perfparents(ui, repo, **opts):
624 timer, fm = gettimer(ui, opts)
624 timer, fm = gettimer(ui, opts)
625 # control the number of commits perfparents iterates over
625 # control the number of commits perfparents iterates over
626 # experimental config: perf.parentscount
626 # experimental config: perf.parentscount
627 count = getint(ui, "perf", "parentscount", 1000)
627 count = getint(ui, "perf", "parentscount", 1000)
628 if len(repo.changelog) < count:
628 if len(repo.changelog) < count:
629 raise error.Abort("repo needs %d commits for this test" % count)
629 raise error.Abort("repo needs %d commits for this test" % count)
630 repo = repo.unfiltered()
630 repo = repo.unfiltered()
631 nl = [repo.changelog.node(i) for i in xrange(count)]
631 nl = [repo.changelog.node(i) for i in xrange(count)]
632 def d():
632 def d():
633 for n in nl:
633 for n in nl:
634 repo.changelog.parents(n)
634 repo.changelog.parents(n)
635 timer(d)
635 timer(d)
636 fm.end()
636 fm.end()
637
637
638 @command('perfctxfiles', formatteropts)
638 @command('perfctxfiles', formatteropts)
639 def perfctxfiles(ui, repo, x, **opts):
639 def perfctxfiles(ui, repo, x, **opts):
640 x = int(x)
640 x = int(x)
641 timer, fm = gettimer(ui, opts)
641 timer, fm = gettimer(ui, opts)
642 def d():
642 def d():
643 len(repo[x].files())
643 len(repo[x].files())
644 timer(d)
644 timer(d)
645 fm.end()
645 fm.end()
646
646
647 @command('perfrawfiles', formatteropts)
647 @command('perfrawfiles', formatteropts)
648 def perfrawfiles(ui, repo, x, **opts):
648 def perfrawfiles(ui, repo, x, **opts):
649 x = int(x)
649 x = int(x)
650 timer, fm = gettimer(ui, opts)
650 timer, fm = gettimer(ui, opts)
651 cl = repo.changelog
651 cl = repo.changelog
652 def d():
652 def d():
653 len(cl.read(x)[3])
653 len(cl.read(x)[3])
654 timer(d)
654 timer(d)
655 fm.end()
655 fm.end()
656
656
657 @command('perflookup', formatteropts)
657 @command('perflookup', formatteropts)
658 def perflookup(ui, repo, rev, **opts):
658 def perflookup(ui, repo, rev, **opts):
659 timer, fm = gettimer(ui, opts)
659 timer, fm = gettimer(ui, opts)
660 timer(lambda: len(repo.lookup(rev)))
660 timer(lambda: len(repo.lookup(rev)))
661 fm.end()
661 fm.end()
662
662
663 @command('perfrevrange', formatteropts)
663 @command('perfrevrange', formatteropts)
664 def perfrevrange(ui, repo, *specs, **opts):
664 def perfrevrange(ui, repo, *specs, **opts):
665 timer, fm = gettimer(ui, opts)
665 timer, fm = gettimer(ui, opts)
666 revrange = scmutil.revrange
666 revrange = scmutil.revrange
667 timer(lambda: len(revrange(repo, specs)))
667 timer(lambda: len(revrange(repo, specs)))
668 fm.end()
668 fm.end()
669
669
670 @command('perfnodelookup', formatteropts)
670 @command('perfnodelookup', formatteropts)
671 def perfnodelookup(ui, repo, rev, **opts):
671 def perfnodelookup(ui, repo, rev, **opts):
672 timer, fm = gettimer(ui, opts)
672 timer, fm = gettimer(ui, opts)
673 import mercurial.revlog
673 import mercurial.revlog
674 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
674 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
675 n = repo[rev].node()
675 n = repo[rev].node()
676 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
676 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
677 def d():
677 def d():
678 cl.rev(n)
678 cl.rev(n)
679 clearcaches(cl)
679 clearcaches(cl)
680 timer(d)
680 timer(d)
681 fm.end()
681 fm.end()
682
682
683 @command('perflog',
683 @command('perflog',
684 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
684 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
685 def perflog(ui, repo, rev=None, **opts):
685 def perflog(ui, repo, rev=None, **opts):
686 if rev is None:
686 if rev is None:
687 rev=[]
687 rev=[]
688 timer, fm = gettimer(ui, opts)
688 timer, fm = gettimer(ui, opts)
689 ui.pushbuffer()
689 ui.pushbuffer()
690 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
690 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
691 copies=opts.get('rename')))
691 copies=opts.get('rename')))
692 ui.popbuffer()
692 ui.popbuffer()
693 fm.end()
693 fm.end()
694
694
695 @command('perfmoonwalk', formatteropts)
695 @command('perfmoonwalk', formatteropts)
696 def perfmoonwalk(ui, repo, **opts):
696 def perfmoonwalk(ui, repo, **opts):
697 """benchmark walking the changelog backwards
697 """benchmark walking the changelog backwards
698
698
699 This also loads the changelog data for each revision in the changelog.
699 This also loads the changelog data for each revision in the changelog.
700 """
700 """
701 timer, fm = gettimer(ui, opts)
701 timer, fm = gettimer(ui, opts)
702 def moonwalk():
702 def moonwalk():
703 for i in xrange(len(repo), -1, -1):
703 for i in xrange(len(repo), -1, -1):
704 ctx = repo[i]
704 ctx = repo[i]
705 ctx.branch() # read changelog data (in addition to the index)
705 ctx.branch() # read changelog data (in addition to the index)
706 timer(moonwalk)
706 timer(moonwalk)
707 fm.end()
707 fm.end()
708
708
709 @command('perftemplating', formatteropts)
709 @command('perftemplating', formatteropts)
710 def perftemplating(ui, repo, rev=None, **opts):
710 def perftemplating(ui, repo, rev=None, **opts):
711 if rev is None:
711 if rev is None:
712 rev=[]
712 rev=[]
713 timer, fm = gettimer(ui, opts)
713 timer, fm = gettimer(ui, opts)
714 ui.pushbuffer()
714 ui.pushbuffer()
715 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
715 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
716 template='{date|shortdate} [{rev}:{node|short}]'
716 template='{date|shortdate} [{rev}:{node|short}]'
717 ' {author|person}: {desc|firstline}\n'))
717 ' {author|person}: {desc|firstline}\n'))
718 ui.popbuffer()
718 ui.popbuffer()
719 fm.end()
719 fm.end()
720
720
721 @command('perfcca', formatteropts)
721 @command('perfcca', formatteropts)
722 def perfcca(ui, repo, **opts):
722 def perfcca(ui, repo, **opts):
723 timer, fm = gettimer(ui, opts)
723 timer, fm = gettimer(ui, opts)
724 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
724 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
725 fm.end()
725 fm.end()
726
726
727 @command('perffncacheload', formatteropts)
727 @command('perffncacheload', formatteropts)
728 def perffncacheload(ui, repo, **opts):
728 def perffncacheload(ui, repo, **opts):
729 timer, fm = gettimer(ui, opts)
729 timer, fm = gettimer(ui, opts)
730 s = repo.store
730 s = repo.store
731 def d():
731 def d():
732 s.fncache._load()
732 s.fncache._load()
733 timer(d)
733 timer(d)
734 fm.end()
734 fm.end()
735
735
736 @command('perffncachewrite', formatteropts)
736 @command('perffncachewrite', formatteropts)
737 def perffncachewrite(ui, repo, **opts):
737 def perffncachewrite(ui, repo, **opts):
738 timer, fm = gettimer(ui, opts)
738 timer, fm = gettimer(ui, opts)
739 s = repo.store
739 s = repo.store
740 s.fncache._load()
740 s.fncache._load()
741 lock = repo.lock()
741 lock = repo.lock()
742 tr = repo.transaction('perffncachewrite')
742 tr = repo.transaction('perffncachewrite')
743 def d():
743 def d():
744 s.fncache._dirty = True
744 s.fncache._dirty = True
745 s.fncache.write(tr)
745 s.fncache.write(tr)
746 timer(d)
746 timer(d)
747 tr.close()
747 tr.close()
748 lock.release()
748 lock.release()
749 fm.end()
749 fm.end()
750
750
751 @command('perffncacheencode', formatteropts)
751 @command('perffncacheencode', formatteropts)
752 def perffncacheencode(ui, repo, **opts):
752 def perffncacheencode(ui, repo, **opts):
753 timer, fm = gettimer(ui, opts)
753 timer, fm = gettimer(ui, opts)
754 s = repo.store
754 s = repo.store
755 s.fncache._load()
755 s.fncache._load()
756 def d():
756 def d():
757 for p in s.fncache.entries:
757 for p in s.fncache.entries:
758 s.encode(p)
758 s.encode(p)
759 timer(d)
759 timer(d)
760 fm.end()
760 fm.end()
761
761
762 @command('perfbdiff', revlogopts + formatteropts + [
762 @command('perfbdiff', revlogopts + formatteropts + [
763 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
763 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
764 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
764 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
765 '-c|-m|FILE REV')
765 '-c|-m|FILE REV')
766 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
766 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
767 """benchmark a bdiff between revisions
767 """benchmark a bdiff between revisions
768
768
769 By default, benchmark a bdiff between its delta parent and itself.
769 By default, benchmark a bdiff between its delta parent and itself.
770
770
771 With ``--count``, benchmark bdiffs between delta parents and self for N
771 With ``--count``, benchmark bdiffs between delta parents and self for N
772 revisions starting at the specified revision.
772 revisions starting at the specified revision.
773
773
774 With ``--alldata``, assume the requested revision is a changeset and
774 With ``--alldata``, assume the requested revision is a changeset and
775 measure bdiffs for all changes related to that changeset (manifest
775 measure bdiffs for all changes related to that changeset (manifest
776 and filelogs).
776 and filelogs).
777 """
777 """
778 if opts['alldata']:
778 if opts['alldata']:
779 opts['changelog'] = True
779 opts['changelog'] = True
780
780
781 if opts.get('changelog') or opts.get('manifest'):
781 if opts.get('changelog') or opts.get('manifest'):
782 file_, rev = None, file_
782 file_, rev = None, file_
783 elif rev is None:
783 elif rev is None:
784 raise error.CommandError('perfbdiff', 'invalid arguments')
784 raise error.CommandError('perfbdiff', 'invalid arguments')
785
785
786 textpairs = []
786 textpairs = []
787
787
788 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
788 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
789
789
790 startrev = r.rev(r.lookup(rev))
790 startrev = r.rev(r.lookup(rev))
791 for rev in range(startrev, min(startrev + count, len(r) - 1)):
791 for rev in range(startrev, min(startrev + count, len(r) - 1)):
792 if opts['alldata']:
792 if opts['alldata']:
793 # Load revisions associated with changeset.
793 # Load revisions associated with changeset.
794 ctx = repo[rev]
794 ctx = repo[rev]
795 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
795 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
796 for pctx in ctx.parents():
796 for pctx in ctx.parents():
797 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
797 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
798 textpairs.append((pman, mtext))
798 textpairs.append((pman, mtext))
799
799
800 # Load filelog revisions by iterating manifest delta.
800 # Load filelog revisions by iterating manifest delta.
801 man = ctx.manifest()
801 man = ctx.manifest()
802 pman = ctx.p1().manifest()
802 pman = ctx.p1().manifest()
803 for filename, change in pman.diff(man).items():
803 for filename, change in pman.diff(man).items():
804 fctx = repo.file(filename)
804 fctx = repo.file(filename)
805 f1 = fctx.revision(change[0][0] or -1)
805 f1 = fctx.revision(change[0][0] or -1)
806 f2 = fctx.revision(change[1][0] or -1)
806 f2 = fctx.revision(change[1][0] or -1)
807 textpairs.append((f1, f2))
807 textpairs.append((f1, f2))
808 else:
808 else:
809 dp = r.deltaparent(rev)
809 dp = r.deltaparent(rev)
810 textpairs.append((r.revision(dp), r.revision(rev)))
810 textpairs.append((r.revision(dp), r.revision(rev)))
811
811
812 def d():
812 def d():
813 for pair in textpairs:
813 for pair in textpairs:
814 mdiff.textdiff(*pair)
814 mdiff.textdiff(*pair)
815
815
816 timer, fm = gettimer(ui, opts)
816 timer, fm = gettimer(ui, opts)
817 timer(d)
817 timer(d)
818 fm.end()
818 fm.end()
819
819
820 @command('perfdiffwd', formatteropts)
820 @command('perfdiffwd', formatteropts)
821 def perfdiffwd(ui, repo, **opts):
821 def perfdiffwd(ui, repo, **opts):
822 """Profile diff of working directory changes"""
822 """Profile diff of working directory changes"""
823 timer, fm = gettimer(ui, opts)
823 timer, fm = gettimer(ui, opts)
824 options = {
824 options = {
825 'w': 'ignore_all_space',
825 'w': 'ignore_all_space',
826 'b': 'ignore_space_change',
826 'b': 'ignore_space_change',
827 'B': 'ignore_blank_lines',
827 'B': 'ignore_blank_lines',
828 }
828 }
829
829
830 for diffopt in ('', 'w', 'b', 'B', 'wB'):
830 for diffopt in ('', 'w', 'b', 'B', 'wB'):
831 opts = dict((options[c], '1') for c in diffopt)
831 opts = dict((options[c], '1') for c in diffopt)
832 def d():
832 def d():
833 ui.pushbuffer()
833 ui.pushbuffer()
834 commands.diff(ui, repo, **opts)
834 commands.diff(ui, repo, **opts)
835 ui.popbuffer()
835 ui.popbuffer()
836 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
836 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
837 timer(d, title)
837 timer(d, title)
838 fm.end()
838 fm.end()
839
839
840 @command('perfrevlog', revlogopts + formatteropts +
840 @command('perfrevlog', revlogopts + formatteropts +
841 [('d', 'dist', 100, 'distance between the revisions'),
841 [('d', 'dist', 100, 'distance between the revisions'),
842 ('s', 'startrev', 0, 'revision to start reading at'),
842 ('s', 'startrev', 0, 'revision to start reading at'),
843 ('', 'reverse', False, 'read in reverse')],
843 ('', 'reverse', False, 'read in reverse')],
844 '-c|-m|FILE')
844 '-c|-m|FILE')
845 def perfrevlog(ui, repo, file_=None, startrev=0, reverse=False, **opts):
845 def perfrevlog(ui, repo, file_=None, startrev=0, reverse=False, **opts):
846 """Benchmark reading a series of revisions from a revlog.
846 """Benchmark reading a series of revisions from a revlog.
847
847
848 By default, we read every ``-d/--dist`` revision from 0 to tip of
848 By default, we read every ``-d/--dist`` revision from 0 to tip of
849 the specified revlog.
849 the specified revlog.
850
850
851 The start revision can be defined via ``-s/--startrev``.
851 The start revision can be defined via ``-s/--startrev``.
852 """
852 """
853 _len = getlen(ui)
853 _len = getlen(ui)
854
854
855 def d():
855 def d():
856 r = cmdutil.openrevlog(repo, 'perfrevlog', file_, opts)
856 r = cmdutil.openrevlog(repo, 'perfrevlog', file_, opts)
857
857
858 beginrev = startrev
858 beginrev = startrev
859 endrev = _len(r)
859 endrev = _len(r)
860 dist = opts['dist']
860 dist = opts['dist']
861
861
862 if reverse:
862 if reverse:
863 beginrev, endrev = endrev, beginrev
863 beginrev, endrev = endrev, beginrev
864 dist = -1 * dist
864 dist = -1 * dist
865
865
866 for x in xrange(beginrev, endrev, dist):
866 for x in xrange(beginrev, endrev, dist):
867 r.revision(r.node(x))
867 r.revision(r.node(x))
868
868
869 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
870 timer(d)
870 timer(d)
871 fm.end()
871 fm.end()
872
872
873 @command('perfrevlogchunks', revlogopts + formatteropts +
873 @command('perfrevlogchunks', revlogopts + formatteropts +
874 [('e', 'engines', '', 'compression engines to use'),
874 [('e', 'engines', '', 'compression engines to use'),
875 ('s', 'startrev', 0, 'revision to start at')],
875 ('s', 'startrev', 0, 'revision to start at')],
876 '-c|-m|FILE')
876 '-c|-m|FILE')
877 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
877 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
878 """Benchmark operations on revlog chunks.
878 """Benchmark operations on revlog chunks.
879
879
880 Logically, each revlog is a collection of fulltext revisions. However,
880 Logically, each revlog is a collection of fulltext revisions. However,
881 stored within each revlog are "chunks" of possibly compressed data. This
881 stored within each revlog are "chunks" of possibly compressed data. This
882 data needs to be read and decompressed or compressed and written.
882 data needs to be read and decompressed or compressed and written.
883
883
884 This command measures the time it takes to read+decompress and recompress
884 This command measures the time it takes to read+decompress and recompress
885 chunks in a revlog. It effectively isolates I/O and compression performance.
885 chunks in a revlog. It effectively isolates I/O and compression performance.
886 For measurements of higher-level operations like resolving revisions,
886 For measurements of higher-level operations like resolving revisions,
887 see ``perfrevlog`` and ``perfrevlogrevision``.
887 see ``perfrevlog`` and ``perfrevlogrevision``.
888 """
888 """
889 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
889 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
890 segmentforrevs = rl._chunkraw
890
891 # _chunkraw was renamed to _getsegmentforrevs.
892 try:
893 segmentforrevs = rl._getsegmentforrevs
894 except AttributeError:
895 segmentforrevs = rl._chunkraw
891
896
892 # Verify engines argument.
897 # Verify engines argument.
893 if engines:
898 if engines:
894 engines = set(e.strip() for e in engines.split(','))
899 engines = set(e.strip() for e in engines.split(','))
895 for engine in engines:
900 for engine in engines:
896 try:
901 try:
897 util.compressionengines[engine]
902 util.compressionengines[engine]
898 except KeyError:
903 except KeyError:
899 raise error.Abort('unknown compression engine: %s' % engine)
904 raise error.Abort('unknown compression engine: %s' % engine)
900 else:
905 else:
901 engines = []
906 engines = []
902 for e in util.compengines:
907 for e in util.compengines:
903 engine = util.compengines[e]
908 engine = util.compengines[e]
904 try:
909 try:
905 if engine.available():
910 if engine.available():
906 engine.revlogcompressor().compress('dummy')
911 engine.revlogcompressor().compress('dummy')
907 engines.append(e)
912 engines.append(e)
908 except NotImplementedError:
913 except NotImplementedError:
909 pass
914 pass
910
915
911 revs = list(rl.revs(startrev, len(rl) - 1))
916 revs = list(rl.revs(startrev, len(rl) - 1))
912
917
913 def rlfh(rl):
918 def rlfh(rl):
914 if rl._inline:
919 if rl._inline:
915 return getsvfs(repo)(rl.indexfile)
920 return getsvfs(repo)(rl.indexfile)
916 else:
921 else:
917 return getsvfs(repo)(rl.datafile)
922 return getsvfs(repo)(rl.datafile)
918
923
919 def doread():
924 def doread():
920 rl.clearcaches()
925 rl.clearcaches()
921 for rev in revs:
926 for rev in revs:
922 segmentforrevs(rev, rev)
927 segmentforrevs(rev, rev)
923
928
924 def doreadcachedfh():
929 def doreadcachedfh():
925 rl.clearcaches()
930 rl.clearcaches()
926 fh = rlfh(rl)
931 fh = rlfh(rl)
927 for rev in revs:
932 for rev in revs:
928 segmentforrevs(rev, rev, df=fh)
933 segmentforrevs(rev, rev, df=fh)
929
934
930 def doreadbatch():
935 def doreadbatch():
931 rl.clearcaches()
936 rl.clearcaches()
932 segmentforrevs(revs[0], revs[-1])
937 segmentforrevs(revs[0], revs[-1])
933
938
934 def doreadbatchcachedfh():
939 def doreadbatchcachedfh():
935 rl.clearcaches()
940 rl.clearcaches()
936 fh = rlfh(rl)
941 fh = rlfh(rl)
937 segmentforrevs(revs[0], revs[-1], df=fh)
942 segmentforrevs(revs[0], revs[-1], df=fh)
938
943
939 def dochunk():
944 def dochunk():
940 rl.clearcaches()
945 rl.clearcaches()
941 fh = rlfh(rl)
946 fh = rlfh(rl)
942 for rev in revs:
947 for rev in revs:
943 rl._chunk(rev, df=fh)
948 rl._chunk(rev, df=fh)
944
949
945 chunks = [None]
950 chunks = [None]
946
951
947 def dochunkbatch():
952 def dochunkbatch():
948 rl.clearcaches()
953 rl.clearcaches()
949 fh = rlfh(rl)
954 fh = rlfh(rl)
950 # Save chunks as a side-effect.
955 # Save chunks as a side-effect.
951 chunks[0] = rl._chunks(revs, df=fh)
956 chunks[0] = rl._chunks(revs, df=fh)
952
957
953 def docompress(compressor):
958 def docompress(compressor):
954 rl.clearcaches()
959 rl.clearcaches()
955
960
956 try:
961 try:
957 # Swap in the requested compression engine.
962 # Swap in the requested compression engine.
958 oldcompressor = rl._compressor
963 oldcompressor = rl._compressor
959 rl._compressor = compressor
964 rl._compressor = compressor
960 for chunk in chunks[0]:
965 for chunk in chunks[0]:
961 rl.compress(chunk)
966 rl.compress(chunk)
962 finally:
967 finally:
963 rl._compressor = oldcompressor
968 rl._compressor = oldcompressor
964
969
965 benches = [
970 benches = [
966 (lambda: doread(), 'read'),
971 (lambda: doread(), 'read'),
967 (lambda: doreadcachedfh(), 'read w/ reused fd'),
972 (lambda: doreadcachedfh(), 'read w/ reused fd'),
968 (lambda: doreadbatch(), 'read batch'),
973 (lambda: doreadbatch(), 'read batch'),
969 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
974 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
970 (lambda: dochunk(), 'chunk'),
975 (lambda: dochunk(), 'chunk'),
971 (lambda: dochunkbatch(), 'chunk batch'),
976 (lambda: dochunkbatch(), 'chunk batch'),
972 ]
977 ]
973
978
974 for engine in sorted(engines):
979 for engine in sorted(engines):
975 compressor = util.compengines[engine].revlogcompressor()
980 compressor = util.compengines[engine].revlogcompressor()
976 benches.append((functools.partial(docompress, compressor),
981 benches.append((functools.partial(docompress, compressor),
977 'compress w/ %s' % engine))
982 'compress w/ %s' % engine))
978
983
979 for fn, title in benches:
984 for fn, title in benches:
980 timer, fm = gettimer(ui, opts)
985 timer, fm = gettimer(ui, opts)
981 timer(fn, title=title)
986 timer(fn, title=title)
982 fm.end()
987 fm.end()
983
988
984 @command('perfrevlogrevision', revlogopts + formatteropts +
989 @command('perfrevlogrevision', revlogopts + formatteropts +
985 [('', 'cache', False, 'use caches instead of clearing')],
990 [('', 'cache', False, 'use caches instead of clearing')],
986 '-c|-m|FILE REV')
991 '-c|-m|FILE REV')
987 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
992 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
988 """Benchmark obtaining a revlog revision.
993 """Benchmark obtaining a revlog revision.
989
994
990 Obtaining a revlog revision consists of roughly the following steps:
995 Obtaining a revlog revision consists of roughly the following steps:
991
996
992 1. Compute the delta chain
997 1. Compute the delta chain
993 2. Obtain the raw chunks for that delta chain
998 2. Obtain the raw chunks for that delta chain
994 3. Decompress each raw chunk
999 3. Decompress each raw chunk
995 4. Apply binary patches to obtain fulltext
1000 4. Apply binary patches to obtain fulltext
996 5. Verify hash of fulltext
1001 5. Verify hash of fulltext
997
1002
998 This command measures the time spent in each of these phases.
1003 This command measures the time spent in each of these phases.
999 """
1004 """
1000 if opts.get('changelog') or opts.get('manifest'):
1005 if opts.get('changelog') or opts.get('manifest'):
1001 file_, rev = None, file_
1006 file_, rev = None, file_
1002 elif rev is None:
1007 elif rev is None:
1003 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1008 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1004
1009
1005 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1010 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1006 segmentforrevs = r._chunkraw
1011
1012 # _chunkraw was renamed to _getsegmentforrevs.
1013 try:
1014 segmentforrevs = r._getsegmentforrevs
1015 except AttributeError:
1016 segmentforrevs = r._chunkraw
1017
1007 node = r.lookup(rev)
1018 node = r.lookup(rev)
1008 rev = r.rev(node)
1019 rev = r.rev(node)
1009
1020
1010 def getrawchunks(data, chain):
1021 def getrawchunks(data, chain):
1011 start = r.start
1022 start = r.start
1012 length = r.length
1023 length = r.length
1013 inline = r._inline
1024 inline = r._inline
1014 iosize = r._io.size
1025 iosize = r._io.size
1015 buffer = util.buffer
1026 buffer = util.buffer
1016 offset = start(chain[0])
1027 offset = start(chain[0])
1017
1028
1018 chunks = []
1029 chunks = []
1019 ladd = chunks.append
1030 ladd = chunks.append
1020
1031
1021 for rev in chain:
1032 for rev in chain:
1022 chunkstart = start(rev)
1033 chunkstart = start(rev)
1023 if inline:
1034 if inline:
1024 chunkstart += (rev + 1) * iosize
1035 chunkstart += (rev + 1) * iosize
1025 chunklength = length(rev)
1036 chunklength = length(rev)
1026 ladd(buffer(data, chunkstart - offset, chunklength))
1037 ladd(buffer(data, chunkstart - offset, chunklength))
1027
1038
1028 return chunks
1039 return chunks
1029
1040
1030 def dodeltachain(rev):
1041 def dodeltachain(rev):
1031 if not cache:
1042 if not cache:
1032 r.clearcaches()
1043 r.clearcaches()
1033 r._deltachain(rev)
1044 r._deltachain(rev)
1034
1045
1035 def doread(chain):
1046 def doread(chain):
1036 if not cache:
1047 if not cache:
1037 r.clearcaches()
1048 r.clearcaches()
1038 segmentforrevs(chain[0], chain[-1])
1049 segmentforrevs(chain[0], chain[-1])
1039
1050
1040 def dorawchunks(data, chain):
1051 def dorawchunks(data, chain):
1041 if not cache:
1052 if not cache:
1042 r.clearcaches()
1053 r.clearcaches()
1043 getrawchunks(data, chain)
1054 getrawchunks(data, chain)
1044
1055
1045 def dodecompress(chunks):
1056 def dodecompress(chunks):
1046 decomp = r.decompress
1057 decomp = r.decompress
1047 for chunk in chunks:
1058 for chunk in chunks:
1048 decomp(chunk)
1059 decomp(chunk)
1049
1060
1050 def dopatch(text, bins):
1061 def dopatch(text, bins):
1051 if not cache:
1062 if not cache:
1052 r.clearcaches()
1063 r.clearcaches()
1053 mdiff.patches(text, bins)
1064 mdiff.patches(text, bins)
1054
1065
1055 def dohash(text):
1066 def dohash(text):
1056 if not cache:
1067 if not cache:
1057 r.clearcaches()
1068 r.clearcaches()
1058 r.checkhash(text, node, rev=rev)
1069 r.checkhash(text, node, rev=rev)
1059
1070
1060 def dorevision():
1071 def dorevision():
1061 if not cache:
1072 if not cache:
1062 r.clearcaches()
1073 r.clearcaches()
1063 r.revision(node)
1074 r.revision(node)
1064
1075
1065 chain = r._deltachain(rev)[0]
1076 chain = r._deltachain(rev)[0]
1066 data = segmentforrevs(chain[0], chain[-1])[1]
1077 data = segmentforrevs(chain[0], chain[-1])[1]
1067 rawchunks = getrawchunks(data, chain)
1078 rawchunks = getrawchunks(data, chain)
1068 bins = r._chunks(chain)
1079 bins = r._chunks(chain)
1069 text = str(bins[0])
1080 text = str(bins[0])
1070 bins = bins[1:]
1081 bins = bins[1:]
1071 text = mdiff.patches(text, bins)
1082 text = mdiff.patches(text, bins)
1072
1083
1073 benches = [
1084 benches = [
1074 (lambda: dorevision(), 'full'),
1085 (lambda: dorevision(), 'full'),
1075 (lambda: dodeltachain(rev), 'deltachain'),
1086 (lambda: dodeltachain(rev), 'deltachain'),
1076 (lambda: doread(chain), 'read'),
1087 (lambda: doread(chain), 'read'),
1077 (lambda: dorawchunks(data, chain), 'rawchunks'),
1088 (lambda: dorawchunks(data, chain), 'rawchunks'),
1078 (lambda: dodecompress(rawchunks), 'decompress'),
1089 (lambda: dodecompress(rawchunks), 'decompress'),
1079 (lambda: dopatch(text, bins), 'patch'),
1090 (lambda: dopatch(text, bins), 'patch'),
1080 (lambda: dohash(text), 'hash'),
1091 (lambda: dohash(text), 'hash'),
1081 ]
1092 ]
1082
1093
1083 for fn, title in benches:
1094 for fn, title in benches:
1084 timer, fm = gettimer(ui, opts)
1095 timer, fm = gettimer(ui, opts)
1085 timer(fn, title=title)
1096 timer(fn, title=title)
1086 fm.end()
1097 fm.end()
1087
1098
1088 @command('perfrevset',
1099 @command('perfrevset',
1089 [('C', 'clear', False, 'clear volatile cache between each call.'),
1100 [('C', 'clear', False, 'clear volatile cache between each call.'),
1090 ('', 'contexts', False, 'obtain changectx for each revision')]
1101 ('', 'contexts', False, 'obtain changectx for each revision')]
1091 + formatteropts, "REVSET")
1102 + formatteropts, "REVSET")
1092 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1103 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1093 """benchmark the execution time of a revset
1104 """benchmark the execution time of a revset
1094
1105
1095 Use the --clean option if need to evaluate the impact of build volatile
1106 Use the --clean option if need to evaluate the impact of build volatile
1096 revisions set cache on the revset execution. Volatile cache hold filtered
1107 revisions set cache on the revset execution. Volatile cache hold filtered
1097 and obsolete related cache."""
1108 and obsolete related cache."""
1098 timer, fm = gettimer(ui, opts)
1109 timer, fm = gettimer(ui, opts)
1099 def d():
1110 def d():
1100 if clear:
1111 if clear:
1101 repo.invalidatevolatilesets()
1112 repo.invalidatevolatilesets()
1102 if contexts:
1113 if contexts:
1103 for ctx in repo.set(expr): pass
1114 for ctx in repo.set(expr): pass
1104 else:
1115 else:
1105 for r in repo.revs(expr): pass
1116 for r in repo.revs(expr): pass
1106 timer(d)
1117 timer(d)
1107 fm.end()
1118 fm.end()
1108
1119
1109 @command('perfvolatilesets', formatteropts)
1120 @command('perfvolatilesets', formatteropts)
1110 def perfvolatilesets(ui, repo, *names, **opts):
1121 def perfvolatilesets(ui, repo, *names, **opts):
1111 """benchmark the computation of various volatile set
1122 """benchmark the computation of various volatile set
1112
1123
1113 Volatile set computes element related to filtering and obsolescence."""
1124 Volatile set computes element related to filtering and obsolescence."""
1114 timer, fm = gettimer(ui, opts)
1125 timer, fm = gettimer(ui, opts)
1115 repo = repo.unfiltered()
1126 repo = repo.unfiltered()
1116
1127
1117 def getobs(name):
1128 def getobs(name):
1118 def d():
1129 def d():
1119 repo.invalidatevolatilesets()
1130 repo.invalidatevolatilesets()
1120 obsolete.getrevs(repo, name)
1131 obsolete.getrevs(repo, name)
1121 return d
1132 return d
1122
1133
1123 allobs = sorted(obsolete.cachefuncs)
1134 allobs = sorted(obsolete.cachefuncs)
1124 if names:
1135 if names:
1125 allobs = [n for n in allobs if n in names]
1136 allobs = [n for n in allobs if n in names]
1126
1137
1127 for name in allobs:
1138 for name in allobs:
1128 timer(getobs(name), title=name)
1139 timer(getobs(name), title=name)
1129
1140
1130 def getfiltered(name):
1141 def getfiltered(name):
1131 def d():
1142 def d():
1132 repo.invalidatevolatilesets()
1143 repo.invalidatevolatilesets()
1133 repoview.filterrevs(repo, name)
1144 repoview.filterrevs(repo, name)
1134 return d
1145 return d
1135
1146
1136 allfilter = sorted(repoview.filtertable)
1147 allfilter = sorted(repoview.filtertable)
1137 if names:
1148 if names:
1138 allfilter = [n for n in allfilter if n in names]
1149 allfilter = [n for n in allfilter if n in names]
1139
1150
1140 for name in allfilter:
1151 for name in allfilter:
1141 timer(getfiltered(name), title=name)
1152 timer(getfiltered(name), title=name)
1142 fm.end()
1153 fm.end()
1143
1154
1144 @command('perfbranchmap',
1155 @command('perfbranchmap',
1145 [('f', 'full', False,
1156 [('f', 'full', False,
1146 'Includes build time of subset'),
1157 'Includes build time of subset'),
1147 ] + formatteropts)
1158 ] + formatteropts)
1148 def perfbranchmap(ui, repo, full=False, **opts):
1159 def perfbranchmap(ui, repo, full=False, **opts):
1149 """benchmark the update of a branchmap
1160 """benchmark the update of a branchmap
1150
1161
1151 This benchmarks the full repo.branchmap() call with read and write disabled
1162 This benchmarks the full repo.branchmap() call with read and write disabled
1152 """
1163 """
1153 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1154 def getbranchmap(filtername):
1165 def getbranchmap(filtername):
1155 """generate a benchmark function for the filtername"""
1166 """generate a benchmark function for the filtername"""
1156 if filtername is None:
1167 if filtername is None:
1157 view = repo
1168 view = repo
1158 else:
1169 else:
1159 view = repo.filtered(filtername)
1170 view = repo.filtered(filtername)
1160 def d():
1171 def d():
1161 if full:
1172 if full:
1162 view._branchcaches.clear()
1173 view._branchcaches.clear()
1163 else:
1174 else:
1164 view._branchcaches.pop(filtername, None)
1175 view._branchcaches.pop(filtername, None)
1165 view.branchmap()
1176 view.branchmap()
1166 return d
1177 return d
1167 # add filter in smaller subset to bigger subset
1178 # add filter in smaller subset to bigger subset
1168 possiblefilters = set(repoview.filtertable)
1179 possiblefilters = set(repoview.filtertable)
1169 subsettable = getbranchmapsubsettable()
1180 subsettable = getbranchmapsubsettable()
1170 allfilters = []
1181 allfilters = []
1171 while possiblefilters:
1182 while possiblefilters:
1172 for name in possiblefilters:
1183 for name in possiblefilters:
1173 subset = subsettable.get(name)
1184 subset = subsettable.get(name)
1174 if subset not in possiblefilters:
1185 if subset not in possiblefilters:
1175 break
1186 break
1176 else:
1187 else:
1177 assert False, 'subset cycle %s!' % possiblefilters
1188 assert False, 'subset cycle %s!' % possiblefilters
1178 allfilters.append(name)
1189 allfilters.append(name)
1179 possiblefilters.remove(name)
1190 possiblefilters.remove(name)
1180
1191
1181 # warm the cache
1192 # warm the cache
1182 if not full:
1193 if not full:
1183 for name in allfilters:
1194 for name in allfilters:
1184 repo.filtered(name).branchmap()
1195 repo.filtered(name).branchmap()
1185 # add unfiltered
1196 # add unfiltered
1186 allfilters.append(None)
1197 allfilters.append(None)
1187
1198
1188 branchcacheread = safeattrsetter(branchmap, 'read')
1199 branchcacheread = safeattrsetter(branchmap, 'read')
1189 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1200 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1190 branchcacheread.set(lambda repo: None)
1201 branchcacheread.set(lambda repo: None)
1191 branchcachewrite.set(lambda bc, repo: None)
1202 branchcachewrite.set(lambda bc, repo: None)
1192 try:
1203 try:
1193 for name in allfilters:
1204 for name in allfilters:
1194 timer(getbranchmap(name), title=str(name))
1205 timer(getbranchmap(name), title=str(name))
1195 finally:
1206 finally:
1196 branchcacheread.restore()
1207 branchcacheread.restore()
1197 branchcachewrite.restore()
1208 branchcachewrite.restore()
1198 fm.end()
1209 fm.end()
1199
1210
1200 @command('perfloadmarkers')
1211 @command('perfloadmarkers')
1201 def perfloadmarkers(ui, repo):
1212 def perfloadmarkers(ui, repo):
1202 """benchmark the time to parse the on-disk markers for a repo
1213 """benchmark the time to parse the on-disk markers for a repo
1203
1214
1204 Result is the number of markers in the repo."""
1215 Result is the number of markers in the repo."""
1205 timer, fm = gettimer(ui)
1216 timer, fm = gettimer(ui)
1206 svfs = getsvfs(repo)
1217 svfs = getsvfs(repo)
1207 timer(lambda: len(obsolete.obsstore(svfs)))
1218 timer(lambda: len(obsolete.obsstore(svfs)))
1208 fm.end()
1219 fm.end()
1209
1220
1210 @command('perflrucachedict', formatteropts +
1221 @command('perflrucachedict', formatteropts +
1211 [('', 'size', 4, 'size of cache'),
1222 [('', 'size', 4, 'size of cache'),
1212 ('', 'gets', 10000, 'number of key lookups'),
1223 ('', 'gets', 10000, 'number of key lookups'),
1213 ('', 'sets', 10000, 'number of key sets'),
1224 ('', 'sets', 10000, 'number of key sets'),
1214 ('', 'mixed', 10000, 'number of mixed mode operations'),
1225 ('', 'mixed', 10000, 'number of mixed mode operations'),
1215 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1226 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1216 norepo=True)
1227 norepo=True)
1217 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1228 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1218 mixedgetfreq=50, **opts):
1229 mixedgetfreq=50, **opts):
1219 def doinit():
1230 def doinit():
1220 for i in xrange(10000):
1231 for i in xrange(10000):
1221 util.lrucachedict(size)
1232 util.lrucachedict(size)
1222
1233
1223 values = []
1234 values = []
1224 for i in xrange(size):
1235 for i in xrange(size):
1225 values.append(random.randint(0, sys.maxint))
1236 values.append(random.randint(0, sys.maxint))
1226
1237
1227 # Get mode fills the cache and tests raw lookup performance with no
1238 # Get mode fills the cache and tests raw lookup performance with no
1228 # eviction.
1239 # eviction.
1229 getseq = []
1240 getseq = []
1230 for i in xrange(gets):
1241 for i in xrange(gets):
1231 getseq.append(random.choice(values))
1242 getseq.append(random.choice(values))
1232
1243
1233 def dogets():
1244 def dogets():
1234 d = util.lrucachedict(size)
1245 d = util.lrucachedict(size)
1235 for v in values:
1246 for v in values:
1236 d[v] = v
1247 d[v] = v
1237 for key in getseq:
1248 for key in getseq:
1238 value = d[key]
1249 value = d[key]
1239 value # silence pyflakes warning
1250 value # silence pyflakes warning
1240
1251
1241 # Set mode tests insertion speed with cache eviction.
1252 # Set mode tests insertion speed with cache eviction.
1242 setseq = []
1253 setseq = []
1243 for i in xrange(sets):
1254 for i in xrange(sets):
1244 setseq.append(random.randint(0, sys.maxint))
1255 setseq.append(random.randint(0, sys.maxint))
1245
1256
1246 def dosets():
1257 def dosets():
1247 d = util.lrucachedict(size)
1258 d = util.lrucachedict(size)
1248 for v in setseq:
1259 for v in setseq:
1249 d[v] = v
1260 d[v] = v
1250
1261
1251 # Mixed mode randomly performs gets and sets with eviction.
1262 # Mixed mode randomly performs gets and sets with eviction.
1252 mixedops = []
1263 mixedops = []
1253 for i in xrange(mixed):
1264 for i in xrange(mixed):
1254 r = random.randint(0, 100)
1265 r = random.randint(0, 100)
1255 if r < mixedgetfreq:
1266 if r < mixedgetfreq:
1256 op = 0
1267 op = 0
1257 else:
1268 else:
1258 op = 1
1269 op = 1
1259
1270
1260 mixedops.append((op, random.randint(0, size * 2)))
1271 mixedops.append((op, random.randint(0, size * 2)))
1261
1272
1262 def domixed():
1273 def domixed():
1263 d = util.lrucachedict(size)
1274 d = util.lrucachedict(size)
1264
1275
1265 for op, v in mixedops:
1276 for op, v in mixedops:
1266 if op == 0:
1277 if op == 0:
1267 try:
1278 try:
1268 d[v]
1279 d[v]
1269 except KeyError:
1280 except KeyError:
1270 pass
1281 pass
1271 else:
1282 else:
1272 d[v] = v
1283 d[v] = v
1273
1284
1274 benches = [
1285 benches = [
1275 (doinit, 'init'),
1286 (doinit, 'init'),
1276 (dogets, 'gets'),
1287 (dogets, 'gets'),
1277 (dosets, 'sets'),
1288 (dosets, 'sets'),
1278 (domixed, 'mixed')
1289 (domixed, 'mixed')
1279 ]
1290 ]
1280
1291
1281 for fn, title in benches:
1292 for fn, title in benches:
1282 timer, fm = gettimer(ui, opts)
1293 timer, fm = gettimer(ui, opts)
1283 timer(fn, title=title)
1294 timer(fn, title=title)
1284 fm.end()
1295 fm.end()
1285
1296
1286 @command('perfwrite', formatteropts)
1297 @command('perfwrite', formatteropts)
1287 def perfwrite(ui, repo, **opts):
1298 def perfwrite(ui, repo, **opts):
1288 """microbenchmark ui.write
1299 """microbenchmark ui.write
1289 """
1300 """
1290 timer, fm = gettimer(ui, opts)
1301 timer, fm = gettimer(ui, opts)
1291 def write():
1302 def write():
1292 for i in range(100000):
1303 for i in range(100000):
1293 ui.write(('Testing write performance\n'))
1304 ui.write(('Testing write performance\n'))
1294 timer(write)
1305 timer(write)
1295 fm.end()
1306 fm.end()
1296
1307
1297 def uisetup(ui):
1308 def uisetup(ui):
1298 if (util.safehasattr(cmdutil, 'openrevlog') and
1309 if (util.safehasattr(cmdutil, 'openrevlog') and
1299 not util.safehasattr(commands, 'debugrevlogopts')):
1310 not util.safehasattr(commands, 'debugrevlogopts')):
1300 # for "historical portability":
1311 # for "historical portability":
1301 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1312 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1302 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1313 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1303 # openrevlog() should cause failure, because it has been
1314 # openrevlog() should cause failure, because it has been
1304 # available since 3.5 (or 49c583ca48c4).
1315 # available since 3.5 (or 49c583ca48c4).
1305 def openrevlog(orig, repo, cmd, file_, opts):
1316 def openrevlog(orig, repo, cmd, file_, opts):
1306 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1317 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1307 raise error.Abort("This version doesn't support --dir option",
1318 raise error.Abort("This version doesn't support --dir option",
1308 hint="use 3.5 or later")
1319 hint="use 3.5 or later")
1309 return orig(repo, cmd, file_, opts)
1320 return orig(repo, cmd, file_, opts)
1310 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
1321 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,2117 +1,2117 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import operator
12 import operator
13 import os
13 import os
14 import random
14 import random
15 import socket
15 import socket
16 import string
16 import string
17 import sys
17 import sys
18 import tempfile
18 import tempfile
19 import time
19 import time
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import (
22 from .node import (
23 bin,
23 bin,
24 hex,
24 hex,
25 nullhex,
25 nullhex,
26 nullid,
26 nullid,
27 nullrev,
27 nullrev,
28 short,
28 short,
29 )
29 )
30 from . import (
30 from . import (
31 bundle2,
31 bundle2,
32 changegroup,
32 changegroup,
33 cmdutil,
33 cmdutil,
34 color,
34 color,
35 commands,
35 commands,
36 context,
36 context,
37 dagparser,
37 dagparser,
38 dagutil,
38 dagutil,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 fileset,
43 fileset,
44 formatter,
44 formatter,
45 hg,
45 hg,
46 localrepo,
46 localrepo,
47 lock as lockmod,
47 lock as lockmod,
48 merge as mergemod,
48 merge as mergemod,
49 obsolete,
49 obsolete,
50 policy,
50 policy,
51 pvec,
51 pvec,
52 pycompat,
52 pycompat,
53 repair,
53 repair,
54 revlog,
54 revlog,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 setdiscovery,
58 setdiscovery,
59 simplemerge,
59 simplemerge,
60 smartset,
60 smartset,
61 sslutil,
61 sslutil,
62 streamclone,
62 streamclone,
63 templater,
63 templater,
64 treediscovery,
64 treediscovery,
65 upgrade,
65 upgrade,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69
69
70 release = lockmod.release
70 release = lockmod.release
71
71
72 # We reuse the command table from commands because it is easier than
72 # We reuse the command table from commands because it is easier than
73 # teaching dispatch about multiple tables.
73 # teaching dispatch about multiple tables.
74 command = cmdutil.command(commands.table)
74 command = cmdutil.command(commands.table)
75
75
76 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
77 def debugancestor(ui, repo, *args):
77 def debugancestor(ui, repo, *args):
78 """find the ancestor revision of two revisions in a given index"""
78 """find the ancestor revision of two revisions in a given index"""
79 if len(args) == 3:
79 if len(args) == 3:
80 index, rev1, rev2 = args
80 index, rev1, rev2 = args
81 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
82 lookup = r.lookup
82 lookup = r.lookup
83 elif len(args) == 2:
83 elif len(args) == 2:
84 if not repo:
84 if not repo:
85 raise error.Abort(_('there is no Mercurial repository here '
85 raise error.Abort(_('there is no Mercurial repository here '
86 '(.hg not found)'))
86 '(.hg not found)'))
87 rev1, rev2 = args
87 rev1, rev2 = args
88 r = repo.changelog
88 r = repo.changelog
89 lookup = repo.lookup
89 lookup = repo.lookup
90 else:
90 else:
91 raise error.Abort(_('either two or three arguments required'))
91 raise error.Abort(_('either two or three arguments required'))
92 a = r.ancestor(lookup(rev1), lookup(rev2))
92 a = r.ancestor(lookup(rev1), lookup(rev2))
93 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
94
94
95 @command('debugapplystreamclonebundle', [], 'FILE')
95 @command('debugapplystreamclonebundle', [], 'FILE')
96 def debugapplystreamclonebundle(ui, repo, fname):
96 def debugapplystreamclonebundle(ui, repo, fname):
97 """apply a stream clone bundle file"""
97 """apply a stream clone bundle file"""
98 f = hg.openpath(ui, fname)
98 f = hg.openpath(ui, fname)
99 gen = exchange.readbundle(ui, f, fname)
99 gen = exchange.readbundle(ui, f, fname)
100 gen.apply(repo)
100 gen.apply(repo)
101
101
102 @command('debugbuilddag',
102 @command('debugbuilddag',
103 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
104 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
105 ('n', 'new-file', None, _('add new file at each rev'))],
105 ('n', 'new-file', None, _('add new file at each rev'))],
106 _('[OPTION]... [TEXT]'))
106 _('[OPTION]... [TEXT]'))
107 def debugbuilddag(ui, repo, text=None,
107 def debugbuilddag(ui, repo, text=None,
108 mergeable_file=False,
108 mergeable_file=False,
109 overwritten_file=False,
109 overwritten_file=False,
110 new_file=False):
110 new_file=False):
111 """builds a repo with a given DAG from scratch in the current empty repo
111 """builds a repo with a given DAG from scratch in the current empty repo
112
112
113 The description of the DAG is read from stdin if not given on the
113 The description of the DAG is read from stdin if not given on the
114 command line.
114 command line.
115
115
116 Elements:
116 Elements:
117
117
118 - "+n" is a linear run of n nodes based on the current default parent
118 - "+n" is a linear run of n nodes based on the current default parent
119 - "." is a single node based on the current default parent
119 - "." is a single node based on the current default parent
120 - "$" resets the default parent to null (implied at the start);
120 - "$" resets the default parent to null (implied at the start);
121 otherwise the default parent is always the last node created
121 otherwise the default parent is always the last node created
122 - "<p" sets the default parent to the backref p
122 - "<p" sets the default parent to the backref p
123 - "*p" is a fork at parent p, which is a backref
123 - "*p" is a fork at parent p, which is a backref
124 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
125 - "/p2" is a merge of the preceding node and p2
125 - "/p2" is a merge of the preceding node and p2
126 - ":tag" defines a local tag for the preceding node
126 - ":tag" defines a local tag for the preceding node
127 - "@branch" sets the named branch for subsequent nodes
127 - "@branch" sets the named branch for subsequent nodes
128 - "#...\\n" is a comment up to the end of the line
128 - "#...\\n" is a comment up to the end of the line
129
129
130 Whitespace between the above elements is ignored.
130 Whitespace between the above elements is ignored.
131
131
132 A backref is either
132 A backref is either
133
133
134 - a number n, which references the node curr-n, where curr is the current
134 - a number n, which references the node curr-n, where curr is the current
135 node, or
135 node, or
136 - the name of a local tag you placed earlier using ":tag", or
136 - the name of a local tag you placed earlier using ":tag", or
137 - empty to denote the default parent.
137 - empty to denote the default parent.
138
138
139 All string valued-elements are either strictly alphanumeric, or must
139 All string valued-elements are either strictly alphanumeric, or must
140 be enclosed in double quotes ("..."), with "\\" as escape character.
140 be enclosed in double quotes ("..."), with "\\" as escape character.
141 """
141 """
142
142
143 if text is None:
143 if text is None:
144 ui.status(_("reading DAG from stdin\n"))
144 ui.status(_("reading DAG from stdin\n"))
145 text = ui.fin.read()
145 text = ui.fin.read()
146
146
147 cl = repo.changelog
147 cl = repo.changelog
148 if len(cl) > 0:
148 if len(cl) > 0:
149 raise error.Abort(_('repository is not empty'))
149 raise error.Abort(_('repository is not empty'))
150
150
151 # determine number of revs in DAG
151 # determine number of revs in DAG
152 total = 0
152 total = 0
153 for type, data in dagparser.parsedag(text):
153 for type, data in dagparser.parsedag(text):
154 if type == 'n':
154 if type == 'n':
155 total += 1
155 total += 1
156
156
157 if mergeable_file:
157 if mergeable_file:
158 linesperrev = 2
158 linesperrev = 2
159 # make a file with k lines per rev
159 # make a file with k lines per rev
160 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
161 initialmergedlines.append("")
161 initialmergedlines.append("")
162
162
163 tags = []
163 tags = []
164
164
165 wlock = lock = tr = None
165 wlock = lock = tr = None
166 try:
166 try:
167 wlock = repo.wlock()
167 wlock = repo.wlock()
168 lock = repo.lock()
168 lock = repo.lock()
169 tr = repo.transaction("builddag")
169 tr = repo.transaction("builddag")
170
170
171 at = -1
171 at = -1
172 atbranch = 'default'
172 atbranch = 'default'
173 nodeids = []
173 nodeids = []
174 id = 0
174 id = 0
175 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 ui.progress(_('building'), id, unit=_('revisions'), total=total)
176 for type, data in dagparser.parsedag(text):
176 for type, data in dagparser.parsedag(text):
177 if type == 'n':
177 if type == 'n':
178 ui.note(('node %s\n' % str(data)))
178 ui.note(('node %s\n' % str(data)))
179 id, ps = data
179 id, ps = data
180
180
181 files = []
181 files = []
182 fctxs = {}
182 fctxs = {}
183
183
184 p2 = None
184 p2 = None
185 if mergeable_file:
185 if mergeable_file:
186 fn = "mf"
186 fn = "mf"
187 p1 = repo[ps[0]]
187 p1 = repo[ps[0]]
188 if len(ps) > 1:
188 if len(ps) > 1:
189 p2 = repo[ps[1]]
189 p2 = repo[ps[1]]
190 pa = p1.ancestor(p2)
190 pa = p1.ancestor(p2)
191 base, local, other = [x[fn].data() for x in (pa, p1,
191 base, local, other = [x[fn].data() for x in (pa, p1,
192 p2)]
192 p2)]
193 m3 = simplemerge.Merge3Text(base, local, other)
193 m3 = simplemerge.Merge3Text(base, local, other)
194 ml = [l.strip() for l in m3.merge_lines()]
194 ml = [l.strip() for l in m3.merge_lines()]
195 ml.append("")
195 ml.append("")
196 elif at > 0:
196 elif at > 0:
197 ml = p1[fn].data().split("\n")
197 ml = p1[fn].data().split("\n")
198 else:
198 else:
199 ml = initialmergedlines
199 ml = initialmergedlines
200 ml[id * linesperrev] += " r%i" % id
200 ml[id * linesperrev] += " r%i" % id
201 mergedtext = "\n".join(ml)
201 mergedtext = "\n".join(ml)
202 files.append(fn)
202 files.append(fn)
203 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
204
204
205 if overwritten_file:
205 if overwritten_file:
206 fn = "of"
206 fn = "of"
207 files.append(fn)
207 files.append(fn)
208 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
209
209
210 if new_file:
210 if new_file:
211 fn = "nf%i" % id
211 fn = "nf%i" % id
212 files.append(fn)
212 files.append(fn)
213 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
214 if len(ps) > 1:
214 if len(ps) > 1:
215 if not p2:
215 if not p2:
216 p2 = repo[ps[1]]
216 p2 = repo[ps[1]]
217 for fn in p2:
217 for fn in p2:
218 if fn.startswith("nf"):
218 if fn.startswith("nf"):
219 files.append(fn)
219 files.append(fn)
220 fctxs[fn] = p2[fn]
220 fctxs[fn] = p2[fn]
221
221
222 def fctxfn(repo, cx, path):
222 def fctxfn(repo, cx, path):
223 return fctxs.get(path)
223 return fctxs.get(path)
224
224
225 if len(ps) == 0 or ps[0] < 0:
225 if len(ps) == 0 or ps[0] < 0:
226 pars = [None, None]
226 pars = [None, None]
227 elif len(ps) == 1:
227 elif len(ps) == 1:
228 pars = [nodeids[ps[0]], None]
228 pars = [nodeids[ps[0]], None]
229 else:
229 else:
230 pars = [nodeids[p] for p in ps]
230 pars = [nodeids[p] for p in ps]
231 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
232 date=(id, 0),
232 date=(id, 0),
233 user="debugbuilddag",
233 user="debugbuilddag",
234 extra={'branch': atbranch})
234 extra={'branch': atbranch})
235 nodeid = repo.commitctx(cx)
235 nodeid = repo.commitctx(cx)
236 nodeids.append(nodeid)
236 nodeids.append(nodeid)
237 at = id
237 at = id
238 elif type == 'l':
238 elif type == 'l':
239 id, name = data
239 id, name = data
240 ui.note(('tag %s\n' % name))
240 ui.note(('tag %s\n' % name))
241 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
242 elif type == 'a':
242 elif type == 'a':
243 ui.note(('branch %s\n' % data))
243 ui.note(('branch %s\n' % data))
244 atbranch = data
244 atbranch = data
245 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 ui.progress(_('building'), id, unit=_('revisions'), total=total)
246 tr.close()
246 tr.close()
247
247
248 if tags:
248 if tags:
249 repo.vfs.write("localtags", "".join(tags))
249 repo.vfs.write("localtags", "".join(tags))
250 finally:
250 finally:
251 ui.progress(_('building'), None)
251 ui.progress(_('building'), None)
252 release(tr, lock, wlock)
252 release(tr, lock, wlock)
253
253
254 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
255 indent_string = ' ' * indent
255 indent_string = ' ' * indent
256 if all:
256 if all:
257 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
258 % indent_string)
258 % indent_string)
259
259
260 def showchunks(named):
260 def showchunks(named):
261 ui.write("\n%s%s\n" % (indent_string, named))
261 ui.write("\n%s%s\n" % (indent_string, named))
262 chain = None
262 chain = None
263 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
264 node = chunkdata['node']
264 node = chunkdata['node']
265 p1 = chunkdata['p1']
265 p1 = chunkdata['p1']
266 p2 = chunkdata['p2']
266 p2 = chunkdata['p2']
267 cs = chunkdata['cs']
267 cs = chunkdata['cs']
268 deltabase = chunkdata['deltabase']
268 deltabase = chunkdata['deltabase']
269 delta = chunkdata['delta']
269 delta = chunkdata['delta']
270 ui.write("%s%s %s %s %s %s %s\n" %
270 ui.write("%s%s %s %s %s %s %s\n" %
271 (indent_string, hex(node), hex(p1), hex(p2),
271 (indent_string, hex(node), hex(p1), hex(p2),
272 hex(cs), hex(deltabase), len(delta)))
272 hex(cs), hex(deltabase), len(delta)))
273 chain = node
273 chain = node
274
274
275 chunkdata = gen.changelogheader()
275 chunkdata = gen.changelogheader()
276 showchunks("changelog")
276 showchunks("changelog")
277 chunkdata = gen.manifestheader()
277 chunkdata = gen.manifestheader()
278 showchunks("manifest")
278 showchunks("manifest")
279 for chunkdata in iter(gen.filelogheader, {}):
279 for chunkdata in iter(gen.filelogheader, {}):
280 fname = chunkdata['filename']
280 fname = chunkdata['filename']
281 showchunks(fname)
281 showchunks(fname)
282 else:
282 else:
283 if isinstance(gen, bundle2.unbundle20):
283 if isinstance(gen, bundle2.unbundle20):
284 raise error.Abort(_('use debugbundle2 for this file'))
284 raise error.Abort(_('use debugbundle2 for this file'))
285 chunkdata = gen.changelogheader()
285 chunkdata = gen.changelogheader()
286 chain = None
286 chain = None
287 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
288 node = chunkdata['node']
288 node = chunkdata['node']
289 ui.write("%s%s\n" % (indent_string, hex(node)))
289 ui.write("%s%s\n" % (indent_string, hex(node)))
290 chain = node
290 chain = node
291
291
292 def _debugbundle2(ui, gen, all=None, **opts):
292 def _debugbundle2(ui, gen, all=None, **opts):
293 """lists the contents of a bundle2"""
293 """lists the contents of a bundle2"""
294 if not isinstance(gen, bundle2.unbundle20):
294 if not isinstance(gen, bundle2.unbundle20):
295 raise error.Abort(_('not a bundle2 file'))
295 raise error.Abort(_('not a bundle2 file'))
296 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 ui.write(('Stream params: %s\n' % repr(gen.params)))
297 for part in gen.iterparts():
297 for part in gen.iterparts():
298 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
299 if part.type == 'changegroup':
299 if part.type == 'changegroup':
300 version = part.params.get('version', '01')
300 version = part.params.get('version', '01')
301 cg = changegroup.getunbundler(version, part, 'UN')
301 cg = changegroup.getunbundler(version, part, 'UN')
302 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
303
303
304 @command('debugbundle',
304 @command('debugbundle',
305 [('a', 'all', None, _('show all details')),
305 [('a', 'all', None, _('show all details')),
306 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 ('', 'spec', None, _('print the bundlespec of the bundle'))],
307 _('FILE'),
307 _('FILE'),
308 norepo=True)
308 norepo=True)
309 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
310 """lists the contents of a bundle"""
310 """lists the contents of a bundle"""
311 with hg.openpath(ui, bundlepath) as f:
311 with hg.openpath(ui, bundlepath) as f:
312 if spec:
312 if spec:
313 spec = exchange.getbundlespec(ui, f)
313 spec = exchange.getbundlespec(ui, f)
314 ui.write('%s\n' % spec)
314 ui.write('%s\n' % spec)
315 return
315 return
316
316
317 gen = exchange.readbundle(ui, f, bundlepath)
317 gen = exchange.readbundle(ui, f, bundlepath)
318 if isinstance(gen, bundle2.unbundle20):
318 if isinstance(gen, bundle2.unbundle20):
319 return _debugbundle2(ui, gen, all=all, **opts)
319 return _debugbundle2(ui, gen, all=all, **opts)
320 _debugchangegroup(ui, gen, all=all, **opts)
320 _debugchangegroup(ui, gen, all=all, **opts)
321
321
322 @command('debugcheckstate', [], '')
322 @command('debugcheckstate', [], '')
323 def debugcheckstate(ui, repo):
323 def debugcheckstate(ui, repo):
324 """validate the correctness of the current dirstate"""
324 """validate the correctness of the current dirstate"""
325 parent1, parent2 = repo.dirstate.parents()
325 parent1, parent2 = repo.dirstate.parents()
326 m1 = repo[parent1].manifest()
326 m1 = repo[parent1].manifest()
327 m2 = repo[parent2].manifest()
327 m2 = repo[parent2].manifest()
328 errors = 0
328 errors = 0
329 for f in repo.dirstate:
329 for f in repo.dirstate:
330 state = repo.dirstate[f]
330 state = repo.dirstate[f]
331 if state in "nr" and f not in m1:
331 if state in "nr" and f not in m1:
332 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
333 errors += 1
333 errors += 1
334 if state in "a" and f in m1:
334 if state in "a" and f in m1:
335 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
336 errors += 1
336 errors += 1
337 if state in "m" and f not in m1 and f not in m2:
337 if state in "m" and f not in m1 and f not in m2:
338 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 ui.warn(_("%s in state %s, but not in either manifest\n") %
339 (f, state))
339 (f, state))
340 errors += 1
340 errors += 1
341 for f in m1:
341 for f in m1:
342 state = repo.dirstate[f]
342 state = repo.dirstate[f]
343 if state not in "nrm":
343 if state not in "nrm":
344 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
345 errors += 1
345 errors += 1
346 if errors:
346 if errors:
347 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 error = _(".hg/dirstate inconsistent with current parent's manifest")
348 raise error.Abort(error)
348 raise error.Abort(error)
349
349
350 @command('debugcolor',
350 @command('debugcolor',
351 [('', 'style', None, _('show all configured styles'))],
351 [('', 'style', None, _('show all configured styles'))],
352 'hg debugcolor')
352 'hg debugcolor')
353 def debugcolor(ui, repo, **opts):
353 def debugcolor(ui, repo, **opts):
354 """show available color, effects or style"""
354 """show available color, effects or style"""
355 ui.write(('color mode: %s\n') % ui._colormode)
355 ui.write(('color mode: %s\n') % ui._colormode)
356 if opts.get('style'):
356 if opts.get('style'):
357 return _debugdisplaystyle(ui)
357 return _debugdisplaystyle(ui)
358 else:
358 else:
359 return _debugdisplaycolor(ui)
359 return _debugdisplaycolor(ui)
360
360
361 def _debugdisplaycolor(ui):
361 def _debugdisplaycolor(ui):
362 ui = ui.copy()
362 ui = ui.copy()
363 ui._styles.clear()
363 ui._styles.clear()
364 for effect in color._activeeffects(ui).keys():
364 for effect in color._activeeffects(ui).keys():
365 ui._styles[effect] = effect
365 ui._styles[effect] = effect
366 if ui._terminfoparams:
366 if ui._terminfoparams:
367 for k, v in ui.configitems('color'):
367 for k, v in ui.configitems('color'):
368 if k.startswith('color.'):
368 if k.startswith('color.'):
369 ui._styles[k] = k[6:]
369 ui._styles[k] = k[6:]
370 elif k.startswith('terminfo.'):
370 elif k.startswith('terminfo.'):
371 ui._styles[k] = k[9:]
371 ui._styles[k] = k[9:]
372 ui.write(_('available colors:\n'))
372 ui.write(_('available colors:\n'))
373 # sort label with a '_' after the other to group '_background' entry.
373 # sort label with a '_' after the other to group '_background' entry.
374 items = sorted(ui._styles.items(),
374 items = sorted(ui._styles.items(),
375 key=lambda i: ('_' in i[0], i[0], i[1]))
375 key=lambda i: ('_' in i[0], i[0], i[1]))
376 for colorname, label in items:
376 for colorname, label in items:
377 ui.write(('%s\n') % colorname, label=label)
377 ui.write(('%s\n') % colorname, label=label)
378
378
379 def _debugdisplaystyle(ui):
379 def _debugdisplaystyle(ui):
380 ui.write(_('available style:\n'))
380 ui.write(_('available style:\n'))
381 width = max(len(s) for s in ui._styles)
381 width = max(len(s) for s in ui._styles)
382 for label, effects in sorted(ui._styles.items()):
382 for label, effects in sorted(ui._styles.items()):
383 ui.write('%s' % label, label=label)
383 ui.write('%s' % label, label=label)
384 if effects:
384 if effects:
385 # 50
385 # 50
386 ui.write(': ')
386 ui.write(': ')
387 ui.write(' ' * (max(0, width - len(label))))
387 ui.write(' ' * (max(0, width - len(label))))
388 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
389 ui.write('\n')
389 ui.write('\n')
390
390
391 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
391 @command('debugcommands', [], _('[COMMAND]'), norepo=True)
392 def debugcommands(ui, cmd='', *args):
392 def debugcommands(ui, cmd='', *args):
393 """list all available commands and options"""
393 """list all available commands and options"""
394 for cmd, vals in sorted(commands.table.iteritems()):
394 for cmd, vals in sorted(commands.table.iteritems()):
395 cmd = cmd.split('|')[0].strip('^')
395 cmd = cmd.split('|')[0].strip('^')
396 opts = ', '.join([i[1] for i in vals[1]])
396 opts = ', '.join([i[1] for i in vals[1]])
397 ui.write('%s: %s\n' % (cmd, opts))
397 ui.write('%s: %s\n' % (cmd, opts))
398
398
399 @command('debugcomplete',
399 @command('debugcomplete',
400 [('o', 'options', None, _('show the command options'))],
400 [('o', 'options', None, _('show the command options'))],
401 _('[-o] CMD'),
401 _('[-o] CMD'),
402 norepo=True)
402 norepo=True)
403 def debugcomplete(ui, cmd='', **opts):
403 def debugcomplete(ui, cmd='', **opts):
404 """returns the completion list associated with the given command"""
404 """returns the completion list associated with the given command"""
405
405
406 if opts.get('options'):
406 if opts.get('options'):
407 options = []
407 options = []
408 otables = [commands.globalopts]
408 otables = [commands.globalopts]
409 if cmd:
409 if cmd:
410 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
410 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
411 otables.append(entry[1])
411 otables.append(entry[1])
412 for t in otables:
412 for t in otables:
413 for o in t:
413 for o in t:
414 if "(DEPRECATED)" in o[3]:
414 if "(DEPRECATED)" in o[3]:
415 continue
415 continue
416 if o[0]:
416 if o[0]:
417 options.append('-%s' % o[0])
417 options.append('-%s' % o[0])
418 options.append('--%s' % o[1])
418 options.append('--%s' % o[1])
419 ui.write("%s\n" % "\n".join(options))
419 ui.write("%s\n" % "\n".join(options))
420 return
420 return
421
421
422 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
422 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, commands.table)
423 if ui.verbose:
423 if ui.verbose:
424 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
424 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
425 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
425 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
426
426
427 @command('debugcreatestreamclonebundle', [], 'FILE')
427 @command('debugcreatestreamclonebundle', [], 'FILE')
428 def debugcreatestreamclonebundle(ui, repo, fname):
428 def debugcreatestreamclonebundle(ui, repo, fname):
429 """create a stream clone bundle file
429 """create a stream clone bundle file
430
430
431 Stream bundles are special bundles that are essentially archives of
431 Stream bundles are special bundles that are essentially archives of
432 revlog files. They are commonly used for cloning very quickly.
432 revlog files. They are commonly used for cloning very quickly.
433 """
433 """
434 requirements, gen = streamclone.generatebundlev1(repo)
434 requirements, gen = streamclone.generatebundlev1(repo)
435 changegroup.writechunks(ui, gen, fname)
435 changegroup.writechunks(ui, gen, fname)
436
436
437 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
437 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
438
438
439 @command('debugdag',
439 @command('debugdag',
440 [('t', 'tags', None, _('use tags as labels')),
440 [('t', 'tags', None, _('use tags as labels')),
441 ('b', 'branches', None, _('annotate with branch names')),
441 ('b', 'branches', None, _('annotate with branch names')),
442 ('', 'dots', None, _('use dots for runs')),
442 ('', 'dots', None, _('use dots for runs')),
443 ('s', 'spaces', None, _('separate elements by spaces'))],
443 ('s', 'spaces', None, _('separate elements by spaces'))],
444 _('[OPTION]... [FILE [REV]...]'),
444 _('[OPTION]... [FILE [REV]...]'),
445 optionalrepo=True)
445 optionalrepo=True)
446 def debugdag(ui, repo, file_=None, *revs, **opts):
446 def debugdag(ui, repo, file_=None, *revs, **opts):
447 """format the changelog or an index DAG as a concise textual description
447 """format the changelog or an index DAG as a concise textual description
448
448
449 If you pass a revlog index, the revlog's DAG is emitted. If you list
449 If you pass a revlog index, the revlog's DAG is emitted. If you list
450 revision numbers, they get labeled in the output as rN.
450 revision numbers, they get labeled in the output as rN.
451
451
452 Otherwise, the changelog DAG of the current repo is emitted.
452 Otherwise, the changelog DAG of the current repo is emitted.
453 """
453 """
454 spaces = opts.get('spaces')
454 spaces = opts.get('spaces')
455 dots = opts.get('dots')
455 dots = opts.get('dots')
456 if file_:
456 if file_:
457 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
457 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
458 file_)
458 file_)
459 revs = set((int(r) for r in revs))
459 revs = set((int(r) for r in revs))
460 def events():
460 def events():
461 for r in rlog:
461 for r in rlog:
462 yield 'n', (r, list(p for p in rlog.parentrevs(r)
462 yield 'n', (r, list(p for p in rlog.parentrevs(r)
463 if p != -1))
463 if p != -1))
464 if r in revs:
464 if r in revs:
465 yield 'l', (r, "r%i" % r)
465 yield 'l', (r, "r%i" % r)
466 elif repo:
466 elif repo:
467 cl = repo.changelog
467 cl = repo.changelog
468 tags = opts.get('tags')
468 tags = opts.get('tags')
469 branches = opts.get('branches')
469 branches = opts.get('branches')
470 if tags:
470 if tags:
471 labels = {}
471 labels = {}
472 for l, n in repo.tags().items():
472 for l, n in repo.tags().items():
473 labels.setdefault(cl.rev(n), []).append(l)
473 labels.setdefault(cl.rev(n), []).append(l)
474 def events():
474 def events():
475 b = "default"
475 b = "default"
476 for r in cl:
476 for r in cl:
477 if branches:
477 if branches:
478 newb = cl.read(cl.node(r))[5]['branch']
478 newb = cl.read(cl.node(r))[5]['branch']
479 if newb != b:
479 if newb != b:
480 yield 'a', newb
480 yield 'a', newb
481 b = newb
481 b = newb
482 yield 'n', (r, list(p for p in cl.parentrevs(r)
482 yield 'n', (r, list(p for p in cl.parentrevs(r)
483 if p != -1))
483 if p != -1))
484 if tags:
484 if tags:
485 ls = labels.get(r)
485 ls = labels.get(r)
486 if ls:
486 if ls:
487 for l in ls:
487 for l in ls:
488 yield 'l', (r, l)
488 yield 'l', (r, l)
489 else:
489 else:
490 raise error.Abort(_('need repo for changelog dag'))
490 raise error.Abort(_('need repo for changelog dag'))
491
491
492 for line in dagparser.dagtextlines(events(),
492 for line in dagparser.dagtextlines(events(),
493 addspaces=spaces,
493 addspaces=spaces,
494 wraplabels=True,
494 wraplabels=True,
495 wrapannotations=True,
495 wrapannotations=True,
496 wrapnonlinear=dots,
496 wrapnonlinear=dots,
497 usedots=dots,
497 usedots=dots,
498 maxlinewidth=70):
498 maxlinewidth=70):
499 ui.write(line)
499 ui.write(line)
500 ui.write("\n")
500 ui.write("\n")
501
501
502 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
502 @command('debugdata', commands.debugrevlogopts, _('-c|-m|FILE REV'))
503 def debugdata(ui, repo, file_, rev=None, **opts):
503 def debugdata(ui, repo, file_, rev=None, **opts):
504 """dump the contents of a data file revision"""
504 """dump the contents of a data file revision"""
505 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
505 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
506 if rev is not None:
506 if rev is not None:
507 raise error.CommandError('debugdata', _('invalid arguments'))
507 raise error.CommandError('debugdata', _('invalid arguments'))
508 file_, rev = None, file_
508 file_, rev = None, file_
509 elif rev is None:
509 elif rev is None:
510 raise error.CommandError('debugdata', _('invalid arguments'))
510 raise error.CommandError('debugdata', _('invalid arguments'))
511 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
511 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
512 try:
512 try:
513 ui.write(r.revision(r.lookup(rev), raw=True))
513 ui.write(r.revision(r.lookup(rev), raw=True))
514 except KeyError:
514 except KeyError:
515 raise error.Abort(_('invalid revision identifier %s') % rev)
515 raise error.Abort(_('invalid revision identifier %s') % rev)
516
516
517 @command('debugdate',
517 @command('debugdate',
518 [('e', 'extended', None, _('try extended date formats'))],
518 [('e', 'extended', None, _('try extended date formats'))],
519 _('[-e] DATE [RANGE]'),
519 _('[-e] DATE [RANGE]'),
520 norepo=True, optionalrepo=True)
520 norepo=True, optionalrepo=True)
521 def debugdate(ui, date, range=None, **opts):
521 def debugdate(ui, date, range=None, **opts):
522 """parse and display a date"""
522 """parse and display a date"""
523 if opts["extended"]:
523 if opts["extended"]:
524 d = util.parsedate(date, util.extendeddateformats)
524 d = util.parsedate(date, util.extendeddateformats)
525 else:
525 else:
526 d = util.parsedate(date)
526 d = util.parsedate(date)
527 ui.write(("internal: %s %s\n") % d)
527 ui.write(("internal: %s %s\n") % d)
528 ui.write(("standard: %s\n") % util.datestr(d))
528 ui.write(("standard: %s\n") % util.datestr(d))
529 if range:
529 if range:
530 m = util.matchdate(range)
530 m = util.matchdate(range)
531 ui.write(("match: %s\n") % m(d[0]))
531 ui.write(("match: %s\n") % m(d[0]))
532
532
533 @command('debugdeltachain',
533 @command('debugdeltachain',
534 commands.debugrevlogopts + commands.formatteropts,
534 commands.debugrevlogopts + commands.formatteropts,
535 _('-c|-m|FILE'),
535 _('-c|-m|FILE'),
536 optionalrepo=True)
536 optionalrepo=True)
537 def debugdeltachain(ui, repo, file_=None, **opts):
537 def debugdeltachain(ui, repo, file_=None, **opts):
538 """dump information about delta chains in a revlog
538 """dump information about delta chains in a revlog
539
539
540 Output can be templatized. Available template keywords are:
540 Output can be templatized. Available template keywords are:
541
541
542 :``rev``: revision number
542 :``rev``: revision number
543 :``chainid``: delta chain identifier (numbered by unique base)
543 :``chainid``: delta chain identifier (numbered by unique base)
544 :``chainlen``: delta chain length to this revision
544 :``chainlen``: delta chain length to this revision
545 :``prevrev``: previous revision in delta chain
545 :``prevrev``: previous revision in delta chain
546 :``deltatype``: role of delta / how it was computed
546 :``deltatype``: role of delta / how it was computed
547 :``compsize``: compressed size of revision
547 :``compsize``: compressed size of revision
548 :``uncompsize``: uncompressed size of revision
548 :``uncompsize``: uncompressed size of revision
549 :``chainsize``: total size of compressed revisions in chain
549 :``chainsize``: total size of compressed revisions in chain
550 :``chainratio``: total chain size divided by uncompressed revision size
550 :``chainratio``: total chain size divided by uncompressed revision size
551 (new delta chains typically start at ratio 2.00)
551 (new delta chains typically start at ratio 2.00)
552 :``lindist``: linear distance from base revision in delta chain to end
552 :``lindist``: linear distance from base revision in delta chain to end
553 of this revision
553 of this revision
554 :``extradist``: total size of revisions not part of this delta chain from
554 :``extradist``: total size of revisions not part of this delta chain from
555 base of delta chain to end of this revision; a measurement
555 base of delta chain to end of this revision; a measurement
556 of how much extra data we need to read/seek across to read
556 of how much extra data we need to read/seek across to read
557 the delta chain for this revision
557 the delta chain for this revision
558 :``extraratio``: extradist divided by chainsize; another representation of
558 :``extraratio``: extradist divided by chainsize; another representation of
559 how much unrelated data is needed to load this delta chain
559 how much unrelated data is needed to load this delta chain
560 """
560 """
561 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
561 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
562 index = r.index
562 index = r.index
563 generaldelta = r.version & revlog.REVLOGGENERALDELTA
563 generaldelta = r.version & revlog.REVLOGGENERALDELTA
564
564
565 def revinfo(rev):
565 def revinfo(rev):
566 e = index[rev]
566 e = index[rev]
567 compsize = e[1]
567 compsize = e[1]
568 uncompsize = e[2]
568 uncompsize = e[2]
569 chainsize = 0
569 chainsize = 0
570
570
571 if generaldelta:
571 if generaldelta:
572 if e[3] == e[5]:
572 if e[3] == e[5]:
573 deltatype = 'p1'
573 deltatype = 'p1'
574 elif e[3] == e[6]:
574 elif e[3] == e[6]:
575 deltatype = 'p2'
575 deltatype = 'p2'
576 elif e[3] == rev - 1:
576 elif e[3] == rev - 1:
577 deltatype = 'prev'
577 deltatype = 'prev'
578 elif e[3] == rev:
578 elif e[3] == rev:
579 deltatype = 'base'
579 deltatype = 'base'
580 else:
580 else:
581 deltatype = 'other'
581 deltatype = 'other'
582 else:
582 else:
583 if e[3] == rev:
583 if e[3] == rev:
584 deltatype = 'base'
584 deltatype = 'base'
585 else:
585 else:
586 deltatype = 'prev'
586 deltatype = 'prev'
587
587
588 chain = r._deltachain(rev)[0]
588 chain = r._deltachain(rev)[0]
589 for iterrev in chain:
589 for iterrev in chain:
590 e = index[iterrev]
590 e = index[iterrev]
591 chainsize += e[1]
591 chainsize += e[1]
592
592
593 return compsize, uncompsize, deltatype, chain, chainsize
593 return compsize, uncompsize, deltatype, chain, chainsize
594
594
595 fm = ui.formatter('debugdeltachain', opts)
595 fm = ui.formatter('debugdeltachain', opts)
596
596
597 fm.plain(' rev chain# chainlen prev delta '
597 fm.plain(' rev chain# chainlen prev delta '
598 'size rawsize chainsize ratio lindist extradist '
598 'size rawsize chainsize ratio lindist extradist '
599 'extraratio\n')
599 'extraratio\n')
600
600
601 chainbases = {}
601 chainbases = {}
602 for rev in r:
602 for rev in r:
603 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
603 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
604 chainbase = chain[0]
604 chainbase = chain[0]
605 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
605 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
606 basestart = r.start(chainbase)
606 basestart = r.start(chainbase)
607 revstart = r.start(rev)
607 revstart = r.start(rev)
608 lineardist = revstart + comp - basestart
608 lineardist = revstart + comp - basestart
609 extradist = lineardist - chainsize
609 extradist = lineardist - chainsize
610 try:
610 try:
611 prevrev = chain[-2]
611 prevrev = chain[-2]
612 except IndexError:
612 except IndexError:
613 prevrev = -1
613 prevrev = -1
614
614
615 chainratio = float(chainsize) / float(uncomp)
615 chainratio = float(chainsize) / float(uncomp)
616 extraratio = float(extradist) / float(chainsize)
616 extraratio = float(extradist) / float(chainsize)
617
617
618 fm.startitem()
618 fm.startitem()
619 fm.write('rev chainid chainlen prevrev deltatype compsize '
619 fm.write('rev chainid chainlen prevrev deltatype compsize '
620 'uncompsize chainsize chainratio lindist extradist '
620 'uncompsize chainsize chainratio lindist extradist '
621 'extraratio',
621 'extraratio',
622 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
622 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
623 rev, chainid, len(chain), prevrev, deltatype, comp,
623 rev, chainid, len(chain), prevrev, deltatype, comp,
624 uncomp, chainsize, chainratio, lineardist, extradist,
624 uncomp, chainsize, chainratio, lineardist, extradist,
625 extraratio,
625 extraratio,
626 rev=rev, chainid=chainid, chainlen=len(chain),
626 rev=rev, chainid=chainid, chainlen=len(chain),
627 prevrev=prevrev, deltatype=deltatype, compsize=comp,
627 prevrev=prevrev, deltatype=deltatype, compsize=comp,
628 uncompsize=uncomp, chainsize=chainsize,
628 uncompsize=uncomp, chainsize=chainsize,
629 chainratio=chainratio, lindist=lineardist,
629 chainratio=chainratio, lindist=lineardist,
630 extradist=extradist, extraratio=extraratio)
630 extradist=extradist, extraratio=extraratio)
631
631
632 fm.end()
632 fm.end()
633
633
634 @command('debugdirstate|debugstate',
634 @command('debugdirstate|debugstate',
635 [('', 'nodates', None, _('do not display the saved mtime')),
635 [('', 'nodates', None, _('do not display the saved mtime')),
636 ('', 'datesort', None, _('sort by saved mtime'))],
636 ('', 'datesort', None, _('sort by saved mtime'))],
637 _('[OPTION]...'))
637 _('[OPTION]...'))
638 def debugstate(ui, repo, **opts):
638 def debugstate(ui, repo, **opts):
639 """show the contents of the current dirstate"""
639 """show the contents of the current dirstate"""
640
640
641 nodates = opts.get('nodates')
641 nodates = opts.get('nodates')
642 datesort = opts.get('datesort')
642 datesort = opts.get('datesort')
643
643
644 timestr = ""
644 timestr = ""
645 if datesort:
645 if datesort:
646 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
646 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
647 else:
647 else:
648 keyfunc = None # sort by filename
648 keyfunc = None # sort by filename
649 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
649 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
650 if ent[3] == -1:
650 if ent[3] == -1:
651 timestr = 'unset '
651 timestr = 'unset '
652 elif nodates:
652 elif nodates:
653 timestr = 'set '
653 timestr = 'set '
654 else:
654 else:
655 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
655 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
656 time.localtime(ent[3]))
656 time.localtime(ent[3]))
657 if ent[1] & 0o20000:
657 if ent[1] & 0o20000:
658 mode = 'lnk'
658 mode = 'lnk'
659 else:
659 else:
660 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
660 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
661 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
661 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
662 for f in repo.dirstate.copies():
662 for f in repo.dirstate.copies():
663 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
663 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
664
664
665 @command('debugdiscovery',
665 @command('debugdiscovery',
666 [('', 'old', None, _('use old-style discovery')),
666 [('', 'old', None, _('use old-style discovery')),
667 ('', 'nonheads', None,
667 ('', 'nonheads', None,
668 _('use old-style discovery with non-heads included')),
668 _('use old-style discovery with non-heads included')),
669 ] + commands.remoteopts,
669 ] + commands.remoteopts,
670 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
670 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
671 def debugdiscovery(ui, repo, remoteurl="default", **opts):
671 def debugdiscovery(ui, repo, remoteurl="default", **opts):
672 """runs the changeset discovery protocol in isolation"""
672 """runs the changeset discovery protocol in isolation"""
673 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
673 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
674 opts.get('branch'))
674 opts.get('branch'))
675 remote = hg.peer(repo, opts, remoteurl)
675 remote = hg.peer(repo, opts, remoteurl)
676 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
676 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
677
677
678 # make sure tests are repeatable
678 # make sure tests are repeatable
679 random.seed(12323)
679 random.seed(12323)
680
680
681 def doit(localheads, remoteheads, remote=remote):
681 def doit(localheads, remoteheads, remote=remote):
682 if opts.get('old'):
682 if opts.get('old'):
683 if localheads:
683 if localheads:
684 raise error.Abort('cannot use localheads with old style '
684 raise error.Abort('cannot use localheads with old style '
685 'discovery')
685 'discovery')
686 if not util.safehasattr(remote, 'branches'):
686 if not util.safehasattr(remote, 'branches'):
687 # enable in-client legacy support
687 # enable in-client legacy support
688 remote = localrepo.locallegacypeer(remote.local())
688 remote = localrepo.locallegacypeer(remote.local())
689 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
689 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
690 force=True)
690 force=True)
691 common = set(common)
691 common = set(common)
692 if not opts.get('nonheads'):
692 if not opts.get('nonheads'):
693 ui.write(("unpruned common: %s\n") %
693 ui.write(("unpruned common: %s\n") %
694 " ".join(sorted(short(n) for n in common)))
694 " ".join(sorted(short(n) for n in common)))
695 dag = dagutil.revlogdag(repo.changelog)
695 dag = dagutil.revlogdag(repo.changelog)
696 all = dag.ancestorset(dag.internalizeall(common))
696 all = dag.ancestorset(dag.internalizeall(common))
697 common = dag.externalizeall(dag.headsetofconnecteds(all))
697 common = dag.externalizeall(dag.headsetofconnecteds(all))
698 else:
698 else:
699 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
699 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
700 common = set(common)
700 common = set(common)
701 rheads = set(hds)
701 rheads = set(hds)
702 lheads = set(repo.heads())
702 lheads = set(repo.heads())
703 ui.write(("common heads: %s\n") %
703 ui.write(("common heads: %s\n") %
704 " ".join(sorted(short(n) for n in common)))
704 " ".join(sorted(short(n) for n in common)))
705 if lheads <= common:
705 if lheads <= common:
706 ui.write(("local is subset\n"))
706 ui.write(("local is subset\n"))
707 elif rheads <= common:
707 elif rheads <= common:
708 ui.write(("remote is subset\n"))
708 ui.write(("remote is subset\n"))
709
709
710 serverlogs = opts.get('serverlog')
710 serverlogs = opts.get('serverlog')
711 if serverlogs:
711 if serverlogs:
712 for filename in serverlogs:
712 for filename in serverlogs:
713 with open(filename, 'r') as logfile:
713 with open(filename, 'r') as logfile:
714 line = logfile.readline()
714 line = logfile.readline()
715 while line:
715 while line:
716 parts = line.strip().split(';')
716 parts = line.strip().split(';')
717 op = parts[1]
717 op = parts[1]
718 if op == 'cg':
718 if op == 'cg':
719 pass
719 pass
720 elif op == 'cgss':
720 elif op == 'cgss':
721 doit(parts[2].split(' '), parts[3].split(' '))
721 doit(parts[2].split(' '), parts[3].split(' '))
722 elif op == 'unb':
722 elif op == 'unb':
723 doit(parts[3].split(' '), parts[2].split(' '))
723 doit(parts[3].split(' '), parts[2].split(' '))
724 line = logfile.readline()
724 line = logfile.readline()
725 else:
725 else:
726 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
726 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
727 opts.get('remote_head'))
727 opts.get('remote_head'))
728 localrevs = opts.get('local_head')
728 localrevs = opts.get('local_head')
729 doit(localrevs, remoterevs)
729 doit(localrevs, remoterevs)
730
730
731 @command('debugextensions', commands.formatteropts, [], norepo=True)
731 @command('debugextensions', commands.formatteropts, [], norepo=True)
732 def debugextensions(ui, **opts):
732 def debugextensions(ui, **opts):
733 '''show information about active extensions'''
733 '''show information about active extensions'''
734 exts = extensions.extensions(ui)
734 exts = extensions.extensions(ui)
735 hgver = util.version()
735 hgver = util.version()
736 fm = ui.formatter('debugextensions', opts)
736 fm = ui.formatter('debugextensions', opts)
737 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
737 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
738 isinternal = extensions.ismoduleinternal(extmod)
738 isinternal = extensions.ismoduleinternal(extmod)
739 extsource = pycompat.fsencode(extmod.__file__)
739 extsource = pycompat.fsencode(extmod.__file__)
740 if isinternal:
740 if isinternal:
741 exttestedwith = [] # never expose magic string to users
741 exttestedwith = [] # never expose magic string to users
742 else:
742 else:
743 exttestedwith = getattr(extmod, 'testedwith', '').split()
743 exttestedwith = getattr(extmod, 'testedwith', '').split()
744 extbuglink = getattr(extmod, 'buglink', None)
744 extbuglink = getattr(extmod, 'buglink', None)
745
745
746 fm.startitem()
746 fm.startitem()
747
747
748 if ui.quiet or ui.verbose:
748 if ui.quiet or ui.verbose:
749 fm.write('name', '%s\n', extname)
749 fm.write('name', '%s\n', extname)
750 else:
750 else:
751 fm.write('name', '%s', extname)
751 fm.write('name', '%s', extname)
752 if isinternal or hgver in exttestedwith:
752 if isinternal or hgver in exttestedwith:
753 fm.plain('\n')
753 fm.plain('\n')
754 elif not exttestedwith:
754 elif not exttestedwith:
755 fm.plain(_(' (untested!)\n'))
755 fm.plain(_(' (untested!)\n'))
756 else:
756 else:
757 lasttestedversion = exttestedwith[-1]
757 lasttestedversion = exttestedwith[-1]
758 fm.plain(' (%s!)\n' % lasttestedversion)
758 fm.plain(' (%s!)\n' % lasttestedversion)
759
759
760 fm.condwrite(ui.verbose and extsource, 'source',
760 fm.condwrite(ui.verbose and extsource, 'source',
761 _(' location: %s\n'), extsource or "")
761 _(' location: %s\n'), extsource or "")
762
762
763 if ui.verbose:
763 if ui.verbose:
764 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
764 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
765 fm.data(bundled=isinternal)
765 fm.data(bundled=isinternal)
766
766
767 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
767 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
768 _(' tested with: %s\n'),
768 _(' tested with: %s\n'),
769 fm.formatlist(exttestedwith, name='ver'))
769 fm.formatlist(exttestedwith, name='ver'))
770
770
771 fm.condwrite(ui.verbose and extbuglink, 'buglink',
771 fm.condwrite(ui.verbose and extbuglink, 'buglink',
772 _(' bug reporting: %s\n'), extbuglink or "")
772 _(' bug reporting: %s\n'), extbuglink or "")
773
773
774 fm.end()
774 fm.end()
775
775
776 @command('debugfileset',
776 @command('debugfileset',
777 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
777 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
778 _('[-r REV] FILESPEC'))
778 _('[-r REV] FILESPEC'))
779 def debugfileset(ui, repo, expr, **opts):
779 def debugfileset(ui, repo, expr, **opts):
780 '''parse and apply a fileset specification'''
780 '''parse and apply a fileset specification'''
781 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
781 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
782 if ui.verbose:
782 if ui.verbose:
783 tree = fileset.parse(expr)
783 tree = fileset.parse(expr)
784 ui.note(fileset.prettyformat(tree), "\n")
784 ui.note(fileset.prettyformat(tree), "\n")
785
785
786 for f in ctx.getfileset(expr):
786 for f in ctx.getfileset(expr):
787 ui.write("%s\n" % f)
787 ui.write("%s\n" % f)
788
788
789 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
789 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
790 def debugfsinfo(ui, path="."):
790 def debugfsinfo(ui, path="."):
791 """show information detected about current filesystem"""
791 """show information detected about current filesystem"""
792 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
792 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
793 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
793 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
794 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
794 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
795 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
795 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
796 casesensitive = '(unknown)'
796 casesensitive = '(unknown)'
797 try:
797 try:
798 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
798 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
799 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
799 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
800 except OSError:
800 except OSError:
801 pass
801 pass
802 ui.write(('case-sensitive: %s\n') % casesensitive)
802 ui.write(('case-sensitive: %s\n') % casesensitive)
803
803
804 @command('debuggetbundle',
804 @command('debuggetbundle',
805 [('H', 'head', [], _('id of head node'), _('ID')),
805 [('H', 'head', [], _('id of head node'), _('ID')),
806 ('C', 'common', [], _('id of common node'), _('ID')),
806 ('C', 'common', [], _('id of common node'), _('ID')),
807 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
807 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
808 _('REPO FILE [-H|-C ID]...'),
808 _('REPO FILE [-H|-C ID]...'),
809 norepo=True)
809 norepo=True)
810 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
810 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
811 """retrieves a bundle from a repo
811 """retrieves a bundle from a repo
812
812
813 Every ID must be a full-length hex node id string. Saves the bundle to the
813 Every ID must be a full-length hex node id string. Saves the bundle to the
814 given file.
814 given file.
815 """
815 """
816 repo = hg.peer(ui, opts, repopath)
816 repo = hg.peer(ui, opts, repopath)
817 if not repo.capable('getbundle'):
817 if not repo.capable('getbundle'):
818 raise error.Abort("getbundle() not supported by target repository")
818 raise error.Abort("getbundle() not supported by target repository")
819 args = {}
819 args = {}
820 if common:
820 if common:
821 args['common'] = [bin(s) for s in common]
821 args['common'] = [bin(s) for s in common]
822 if head:
822 if head:
823 args['heads'] = [bin(s) for s in head]
823 args['heads'] = [bin(s) for s in head]
824 # TODO: get desired bundlecaps from command line.
824 # TODO: get desired bundlecaps from command line.
825 args['bundlecaps'] = None
825 args['bundlecaps'] = None
826 bundle = repo.getbundle('debug', **args)
826 bundle = repo.getbundle('debug', **args)
827
827
828 bundletype = opts.get('type', 'bzip2').lower()
828 bundletype = opts.get('type', 'bzip2').lower()
829 btypes = {'none': 'HG10UN',
829 btypes = {'none': 'HG10UN',
830 'bzip2': 'HG10BZ',
830 'bzip2': 'HG10BZ',
831 'gzip': 'HG10GZ',
831 'gzip': 'HG10GZ',
832 'bundle2': 'HG20'}
832 'bundle2': 'HG20'}
833 bundletype = btypes.get(bundletype)
833 bundletype = btypes.get(bundletype)
834 if bundletype not in bundle2.bundletypes:
834 if bundletype not in bundle2.bundletypes:
835 raise error.Abort(_('unknown bundle type specified with --type'))
835 raise error.Abort(_('unknown bundle type specified with --type'))
836 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
836 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
837
837
838 @command('debugignore', [], '[FILE]')
838 @command('debugignore', [], '[FILE]')
839 def debugignore(ui, repo, *files, **opts):
839 def debugignore(ui, repo, *files, **opts):
840 """display the combined ignore pattern and information about ignored files
840 """display the combined ignore pattern and information about ignored files
841
841
842 With no argument display the combined ignore pattern.
842 With no argument display the combined ignore pattern.
843
843
844 Given space separated file names, shows if the given file is ignored and
844 Given space separated file names, shows if the given file is ignored and
845 if so, show the ignore rule (file and line number) that matched it.
845 if so, show the ignore rule (file and line number) that matched it.
846 """
846 """
847 ignore = repo.dirstate._ignore
847 ignore = repo.dirstate._ignore
848 if not files:
848 if not files:
849 # Show all the patterns
849 # Show all the patterns
850 includepat = getattr(ignore, 'includepat', None)
850 includepat = getattr(ignore, 'includepat', None)
851 if includepat is not None:
851 if includepat is not None:
852 ui.write("%s\n" % includepat)
852 ui.write("%s\n" % includepat)
853 else:
853 else:
854 raise error.Abort(_("no ignore patterns found"))
854 raise error.Abort(_("no ignore patterns found"))
855 else:
855 else:
856 for f in files:
856 for f in files:
857 nf = util.normpath(f)
857 nf = util.normpath(f)
858 ignored = None
858 ignored = None
859 ignoredata = None
859 ignoredata = None
860 if nf != '.':
860 if nf != '.':
861 if ignore(nf):
861 if ignore(nf):
862 ignored = nf
862 ignored = nf
863 ignoredata = repo.dirstate._ignorefileandline(nf)
863 ignoredata = repo.dirstate._ignorefileandline(nf)
864 else:
864 else:
865 for p in util.finddirs(nf):
865 for p in util.finddirs(nf):
866 if ignore(p):
866 if ignore(p):
867 ignored = p
867 ignored = p
868 ignoredata = repo.dirstate._ignorefileandline(p)
868 ignoredata = repo.dirstate._ignorefileandline(p)
869 break
869 break
870 if ignored:
870 if ignored:
871 if ignored == nf:
871 if ignored == nf:
872 ui.write(_("%s is ignored\n") % f)
872 ui.write(_("%s is ignored\n") % f)
873 else:
873 else:
874 ui.write(_("%s is ignored because of "
874 ui.write(_("%s is ignored because of "
875 "containing folder %s\n")
875 "containing folder %s\n")
876 % (f, ignored))
876 % (f, ignored))
877 ignorefile, lineno, line = ignoredata
877 ignorefile, lineno, line = ignoredata
878 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
878 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
879 % (ignorefile, lineno, line))
879 % (ignorefile, lineno, line))
880 else:
880 else:
881 ui.write(_("%s is not ignored\n") % f)
881 ui.write(_("%s is not ignored\n") % f)
882
882
883 @command('debugindex', commands.debugrevlogopts +
883 @command('debugindex', commands.debugrevlogopts +
884 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
884 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
885 _('[-f FORMAT] -c|-m|FILE'),
885 _('[-f FORMAT] -c|-m|FILE'),
886 optionalrepo=True)
886 optionalrepo=True)
887 def debugindex(ui, repo, file_=None, **opts):
887 def debugindex(ui, repo, file_=None, **opts):
888 """dump the contents of an index file"""
888 """dump the contents of an index file"""
889 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
889 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
890 format = opts.get('format', 0)
890 format = opts.get('format', 0)
891 if format not in (0, 1):
891 if format not in (0, 1):
892 raise error.Abort(_("unknown format %d") % format)
892 raise error.Abort(_("unknown format %d") % format)
893
893
894 generaldelta = r.version & revlog.REVLOGGENERALDELTA
894 generaldelta = r.version & revlog.REVLOGGENERALDELTA
895 if generaldelta:
895 if generaldelta:
896 basehdr = ' delta'
896 basehdr = ' delta'
897 else:
897 else:
898 basehdr = ' base'
898 basehdr = ' base'
899
899
900 if ui.debugflag:
900 if ui.debugflag:
901 shortfn = hex
901 shortfn = hex
902 else:
902 else:
903 shortfn = short
903 shortfn = short
904
904
905 # There might not be anything in r, so have a sane default
905 # There might not be anything in r, so have a sane default
906 idlen = 12
906 idlen = 12
907 for i in r:
907 for i in r:
908 idlen = len(shortfn(r.node(i)))
908 idlen = len(shortfn(r.node(i)))
909 break
909 break
910
910
911 if format == 0:
911 if format == 0:
912 ui.write((" rev offset length " + basehdr + " linkrev"
912 ui.write((" rev offset length " + basehdr + " linkrev"
913 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
913 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
914 elif format == 1:
914 elif format == 1:
915 ui.write((" rev flag offset length"
915 ui.write((" rev flag offset length"
916 " size " + basehdr + " link p1 p2"
916 " size " + basehdr + " link p1 p2"
917 " %s\n") % "nodeid".rjust(idlen))
917 " %s\n") % "nodeid".rjust(idlen))
918
918
919 for i in r:
919 for i in r:
920 node = r.node(i)
920 node = r.node(i)
921 if generaldelta:
921 if generaldelta:
922 base = r.deltaparent(i)
922 base = r.deltaparent(i)
923 else:
923 else:
924 base = r.chainbase(i)
924 base = r.chainbase(i)
925 if format == 0:
925 if format == 0:
926 try:
926 try:
927 pp = r.parents(node)
927 pp = r.parents(node)
928 except Exception:
928 except Exception:
929 pp = [nullid, nullid]
929 pp = [nullid, nullid]
930 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
930 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
931 i, r.start(i), r.length(i), base, r.linkrev(i),
931 i, r.start(i), r.length(i), base, r.linkrev(i),
932 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
932 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
933 elif format == 1:
933 elif format == 1:
934 pr = r.parentrevs(i)
934 pr = r.parentrevs(i)
935 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
935 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
936 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
936 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
937 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
937 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
938
938
939 @command('debugindexdot', commands.debugrevlogopts,
939 @command('debugindexdot', commands.debugrevlogopts,
940 _('-c|-m|FILE'), optionalrepo=True)
940 _('-c|-m|FILE'), optionalrepo=True)
941 def debugindexdot(ui, repo, file_=None, **opts):
941 def debugindexdot(ui, repo, file_=None, **opts):
942 """dump an index DAG as a graphviz dot file"""
942 """dump an index DAG as a graphviz dot file"""
943 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
943 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
944 ui.write(("digraph G {\n"))
944 ui.write(("digraph G {\n"))
945 for i in r:
945 for i in r:
946 node = r.node(i)
946 node = r.node(i)
947 pp = r.parents(node)
947 pp = r.parents(node)
948 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
948 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
949 if pp[1] != nullid:
949 if pp[1] != nullid:
950 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
950 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
951 ui.write("}\n")
951 ui.write("}\n")
952
952
953 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
953 @command('debuginstall', [] + commands.formatteropts, '', norepo=True)
954 def debuginstall(ui, **opts):
954 def debuginstall(ui, **opts):
955 '''test Mercurial installation
955 '''test Mercurial installation
956
956
957 Returns 0 on success.
957 Returns 0 on success.
958 '''
958 '''
959
959
960 def writetemp(contents):
960 def writetemp(contents):
961 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
961 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
962 f = os.fdopen(fd, pycompat.sysstr("wb"))
962 f = os.fdopen(fd, pycompat.sysstr("wb"))
963 f.write(contents)
963 f.write(contents)
964 f.close()
964 f.close()
965 return name
965 return name
966
966
967 problems = 0
967 problems = 0
968
968
969 fm = ui.formatter('debuginstall', opts)
969 fm = ui.formatter('debuginstall', opts)
970 fm.startitem()
970 fm.startitem()
971
971
972 # encoding
972 # encoding
973 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
973 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
974 err = None
974 err = None
975 try:
975 try:
976 encoding.fromlocal("test")
976 encoding.fromlocal("test")
977 except error.Abort as inst:
977 except error.Abort as inst:
978 err = inst
978 err = inst
979 problems += 1
979 problems += 1
980 fm.condwrite(err, 'encodingerror', _(" %s\n"
980 fm.condwrite(err, 'encodingerror', _(" %s\n"
981 " (check that your locale is properly set)\n"), err)
981 " (check that your locale is properly set)\n"), err)
982
982
983 # Python
983 # Python
984 fm.write('pythonexe', _("checking Python executable (%s)\n"),
984 fm.write('pythonexe', _("checking Python executable (%s)\n"),
985 pycompat.sysexecutable)
985 pycompat.sysexecutable)
986 fm.write('pythonver', _("checking Python version (%s)\n"),
986 fm.write('pythonver', _("checking Python version (%s)\n"),
987 ("%d.%d.%d" % sys.version_info[:3]))
987 ("%d.%d.%d" % sys.version_info[:3]))
988 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
988 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
989 os.path.dirname(pycompat.fsencode(os.__file__)))
989 os.path.dirname(pycompat.fsencode(os.__file__)))
990
990
991 security = set(sslutil.supportedprotocols)
991 security = set(sslutil.supportedprotocols)
992 if sslutil.hassni:
992 if sslutil.hassni:
993 security.add('sni')
993 security.add('sni')
994
994
995 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
995 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
996 fm.formatlist(sorted(security), name='protocol',
996 fm.formatlist(sorted(security), name='protocol',
997 fmt='%s', sep=','))
997 fmt='%s', sep=','))
998
998
999 # These are warnings, not errors. So don't increment problem count. This
999 # These are warnings, not errors. So don't increment problem count. This
1000 # may change in the future.
1000 # may change in the future.
1001 if 'tls1.2' not in security:
1001 if 'tls1.2' not in security:
1002 fm.plain(_(' TLS 1.2 not supported by Python install; '
1002 fm.plain(_(' TLS 1.2 not supported by Python install; '
1003 'network connections lack modern security\n'))
1003 'network connections lack modern security\n'))
1004 if 'sni' not in security:
1004 if 'sni' not in security:
1005 fm.plain(_(' SNI not supported by Python install; may have '
1005 fm.plain(_(' SNI not supported by Python install; may have '
1006 'connectivity issues with some servers\n'))
1006 'connectivity issues with some servers\n'))
1007
1007
1008 # TODO print CA cert info
1008 # TODO print CA cert info
1009
1009
1010 # hg version
1010 # hg version
1011 hgver = util.version()
1011 hgver = util.version()
1012 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1012 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1013 hgver.split('+')[0])
1013 hgver.split('+')[0])
1014 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1014 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1015 '+'.join(hgver.split('+')[1:]))
1015 '+'.join(hgver.split('+')[1:]))
1016
1016
1017 # compiled modules
1017 # compiled modules
1018 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1018 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1019 policy.policy)
1019 policy.policy)
1020 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1020 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1021 os.path.dirname(pycompat.fsencode(__file__)))
1021 os.path.dirname(pycompat.fsencode(__file__)))
1022
1022
1023 if policy.policy in ('c', 'allow'):
1023 if policy.policy in ('c', 'allow'):
1024 err = None
1024 err = None
1025 try:
1025 try:
1026 from . import (
1026 from . import (
1027 base85,
1027 base85,
1028 bdiff,
1028 bdiff,
1029 mpatch,
1029 mpatch,
1030 osutil,
1030 osutil,
1031 )
1031 )
1032 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1032 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
1033 except Exception as inst:
1033 except Exception as inst:
1034 err = inst
1034 err = inst
1035 problems += 1
1035 problems += 1
1036 fm.condwrite(err, 'extensionserror', " %s\n", err)
1036 fm.condwrite(err, 'extensionserror', " %s\n", err)
1037
1037
1038 compengines = util.compengines._engines.values()
1038 compengines = util.compengines._engines.values()
1039 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1039 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1040 fm.formatlist(sorted(e.name() for e in compengines),
1040 fm.formatlist(sorted(e.name() for e in compengines),
1041 name='compengine', fmt='%s', sep=', '))
1041 name='compengine', fmt='%s', sep=', '))
1042 fm.write('compenginesavail', _('checking available compression engines '
1042 fm.write('compenginesavail', _('checking available compression engines '
1043 '(%s)\n'),
1043 '(%s)\n'),
1044 fm.formatlist(sorted(e.name() for e in compengines
1044 fm.formatlist(sorted(e.name() for e in compengines
1045 if e.available()),
1045 if e.available()),
1046 name='compengine', fmt='%s', sep=', '))
1046 name='compengine', fmt='%s', sep=', '))
1047 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1047 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1048 fm.write('compenginesserver', _('checking available compression engines '
1048 fm.write('compenginesserver', _('checking available compression engines '
1049 'for wire protocol (%s)\n'),
1049 'for wire protocol (%s)\n'),
1050 fm.formatlist([e.name() for e in wirecompengines
1050 fm.formatlist([e.name() for e in wirecompengines
1051 if e.wireprotosupport()],
1051 if e.wireprotosupport()],
1052 name='compengine', fmt='%s', sep=', '))
1052 name='compengine', fmt='%s', sep=', '))
1053
1053
1054 # templates
1054 # templates
1055 p = templater.templatepaths()
1055 p = templater.templatepaths()
1056 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1056 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1057 fm.condwrite(not p, '', _(" no template directories found\n"))
1057 fm.condwrite(not p, '', _(" no template directories found\n"))
1058 if p:
1058 if p:
1059 m = templater.templatepath("map-cmdline.default")
1059 m = templater.templatepath("map-cmdline.default")
1060 if m:
1060 if m:
1061 # template found, check if it is working
1061 # template found, check if it is working
1062 err = None
1062 err = None
1063 try:
1063 try:
1064 templater.templater.frommapfile(m)
1064 templater.templater.frommapfile(m)
1065 except Exception as inst:
1065 except Exception as inst:
1066 err = inst
1066 err = inst
1067 p = None
1067 p = None
1068 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1068 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1069 else:
1069 else:
1070 p = None
1070 p = None
1071 fm.condwrite(p, 'defaulttemplate',
1071 fm.condwrite(p, 'defaulttemplate',
1072 _("checking default template (%s)\n"), m)
1072 _("checking default template (%s)\n"), m)
1073 fm.condwrite(not m, 'defaulttemplatenotfound',
1073 fm.condwrite(not m, 'defaulttemplatenotfound',
1074 _(" template '%s' not found\n"), "default")
1074 _(" template '%s' not found\n"), "default")
1075 if not p:
1075 if not p:
1076 problems += 1
1076 problems += 1
1077 fm.condwrite(not p, '',
1077 fm.condwrite(not p, '',
1078 _(" (templates seem to have been installed incorrectly)\n"))
1078 _(" (templates seem to have been installed incorrectly)\n"))
1079
1079
1080 # editor
1080 # editor
1081 editor = ui.geteditor()
1081 editor = ui.geteditor()
1082 editor = util.expandpath(editor)
1082 editor = util.expandpath(editor)
1083 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1083 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1084 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1084 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1085 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1085 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1086 _(" No commit editor set and can't find %s in PATH\n"
1086 _(" No commit editor set and can't find %s in PATH\n"
1087 " (specify a commit editor in your configuration"
1087 " (specify a commit editor in your configuration"
1088 " file)\n"), not cmdpath and editor == 'vi' and editor)
1088 " file)\n"), not cmdpath and editor == 'vi' and editor)
1089 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1089 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1090 _(" Can't find editor '%s' in PATH\n"
1090 _(" Can't find editor '%s' in PATH\n"
1091 " (specify a commit editor in your configuration"
1091 " (specify a commit editor in your configuration"
1092 " file)\n"), not cmdpath and editor)
1092 " file)\n"), not cmdpath and editor)
1093 if not cmdpath and editor != 'vi':
1093 if not cmdpath and editor != 'vi':
1094 problems += 1
1094 problems += 1
1095
1095
1096 # check username
1096 # check username
1097 username = None
1097 username = None
1098 err = None
1098 err = None
1099 try:
1099 try:
1100 username = ui.username()
1100 username = ui.username()
1101 except error.Abort as e:
1101 except error.Abort as e:
1102 err = e
1102 err = e
1103 problems += 1
1103 problems += 1
1104
1104
1105 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1105 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1106 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1106 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1107 " (specify a username in your configuration file)\n"), err)
1107 " (specify a username in your configuration file)\n"), err)
1108
1108
1109 fm.condwrite(not problems, '',
1109 fm.condwrite(not problems, '',
1110 _("no problems detected\n"))
1110 _("no problems detected\n"))
1111 if not problems:
1111 if not problems:
1112 fm.data(problems=problems)
1112 fm.data(problems=problems)
1113 fm.condwrite(problems, 'problems',
1113 fm.condwrite(problems, 'problems',
1114 _("%d problems detected,"
1114 _("%d problems detected,"
1115 " please check your install!\n"), problems)
1115 " please check your install!\n"), problems)
1116 fm.end()
1116 fm.end()
1117
1117
1118 return problems
1118 return problems
1119
1119
1120 @command('debugknown', [], _('REPO ID...'), norepo=True)
1120 @command('debugknown', [], _('REPO ID...'), norepo=True)
1121 def debugknown(ui, repopath, *ids, **opts):
1121 def debugknown(ui, repopath, *ids, **opts):
1122 """test whether node ids are known to a repo
1122 """test whether node ids are known to a repo
1123
1123
1124 Every ID must be a full-length hex node id string. Returns a list of 0s
1124 Every ID must be a full-length hex node id string. Returns a list of 0s
1125 and 1s indicating unknown/known.
1125 and 1s indicating unknown/known.
1126 """
1126 """
1127 repo = hg.peer(ui, opts, repopath)
1127 repo = hg.peer(ui, opts, repopath)
1128 if not repo.capable('known'):
1128 if not repo.capable('known'):
1129 raise error.Abort("known() not supported by target repository")
1129 raise error.Abort("known() not supported by target repository")
1130 flags = repo.known([bin(s) for s in ids])
1130 flags = repo.known([bin(s) for s in ids])
1131 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1131 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1132
1132
1133 @command('debuglabelcomplete', [], _('LABEL...'))
1133 @command('debuglabelcomplete', [], _('LABEL...'))
1134 def debuglabelcomplete(ui, repo, *args):
1134 def debuglabelcomplete(ui, repo, *args):
1135 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1135 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1136 debugnamecomplete(ui, repo, *args)
1136 debugnamecomplete(ui, repo, *args)
1137
1137
1138 @command('debuglocks',
1138 @command('debuglocks',
1139 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1139 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1140 ('W', 'force-wlock', None,
1140 ('W', 'force-wlock', None,
1141 _('free the working state lock (DANGEROUS)'))],
1141 _('free the working state lock (DANGEROUS)'))],
1142 _('[OPTION]...'))
1142 _('[OPTION]...'))
1143 def debuglocks(ui, repo, **opts):
1143 def debuglocks(ui, repo, **opts):
1144 """show or modify state of locks
1144 """show or modify state of locks
1145
1145
1146 By default, this command will show which locks are held. This
1146 By default, this command will show which locks are held. This
1147 includes the user and process holding the lock, the amount of time
1147 includes the user and process holding the lock, the amount of time
1148 the lock has been held, and the machine name where the process is
1148 the lock has been held, and the machine name where the process is
1149 running if it's not local.
1149 running if it's not local.
1150
1150
1151 Locks protect the integrity of Mercurial's data, so should be
1151 Locks protect the integrity of Mercurial's data, so should be
1152 treated with care. System crashes or other interruptions may cause
1152 treated with care. System crashes or other interruptions may cause
1153 locks to not be properly released, though Mercurial will usually
1153 locks to not be properly released, though Mercurial will usually
1154 detect and remove such stale locks automatically.
1154 detect and remove such stale locks automatically.
1155
1155
1156 However, detecting stale locks may not always be possible (for
1156 However, detecting stale locks may not always be possible (for
1157 instance, on a shared filesystem). Removing locks may also be
1157 instance, on a shared filesystem). Removing locks may also be
1158 blocked by filesystem permissions.
1158 blocked by filesystem permissions.
1159
1159
1160 Returns 0 if no locks are held.
1160 Returns 0 if no locks are held.
1161
1161
1162 """
1162 """
1163
1163
1164 if opts.get('force_lock'):
1164 if opts.get('force_lock'):
1165 repo.svfs.unlink('lock')
1165 repo.svfs.unlink('lock')
1166 if opts.get('force_wlock'):
1166 if opts.get('force_wlock'):
1167 repo.vfs.unlink('wlock')
1167 repo.vfs.unlink('wlock')
1168 if opts.get('force_lock') or opts.get('force_lock'):
1168 if opts.get('force_lock') or opts.get('force_lock'):
1169 return 0
1169 return 0
1170
1170
1171 now = time.time()
1171 now = time.time()
1172 held = 0
1172 held = 0
1173
1173
1174 def report(vfs, name, method):
1174 def report(vfs, name, method):
1175 # this causes stale locks to get reaped for more accurate reporting
1175 # this causes stale locks to get reaped for more accurate reporting
1176 try:
1176 try:
1177 l = method(False)
1177 l = method(False)
1178 except error.LockHeld:
1178 except error.LockHeld:
1179 l = None
1179 l = None
1180
1180
1181 if l:
1181 if l:
1182 l.release()
1182 l.release()
1183 else:
1183 else:
1184 try:
1184 try:
1185 stat = vfs.lstat(name)
1185 stat = vfs.lstat(name)
1186 age = now - stat.st_mtime
1186 age = now - stat.st_mtime
1187 user = util.username(stat.st_uid)
1187 user = util.username(stat.st_uid)
1188 locker = vfs.readlock(name)
1188 locker = vfs.readlock(name)
1189 if ":" in locker:
1189 if ":" in locker:
1190 host, pid = locker.split(':')
1190 host, pid = locker.split(':')
1191 if host == socket.gethostname():
1191 if host == socket.gethostname():
1192 locker = 'user %s, process %s' % (user, pid)
1192 locker = 'user %s, process %s' % (user, pid)
1193 else:
1193 else:
1194 locker = 'user %s, process %s, host %s' \
1194 locker = 'user %s, process %s, host %s' \
1195 % (user, pid, host)
1195 % (user, pid, host)
1196 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1196 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1197 return 1
1197 return 1
1198 except OSError as e:
1198 except OSError as e:
1199 if e.errno != errno.ENOENT:
1199 if e.errno != errno.ENOENT:
1200 raise
1200 raise
1201
1201
1202 ui.write(("%-6s free\n") % (name + ":"))
1202 ui.write(("%-6s free\n") % (name + ":"))
1203 return 0
1203 return 0
1204
1204
1205 held += report(repo.svfs, "lock", repo.lock)
1205 held += report(repo.svfs, "lock", repo.lock)
1206 held += report(repo.vfs, "wlock", repo.wlock)
1206 held += report(repo.vfs, "wlock", repo.wlock)
1207
1207
1208 return held
1208 return held
1209
1209
1210 @command('debugmergestate', [], '')
1210 @command('debugmergestate', [], '')
1211 def debugmergestate(ui, repo, *args):
1211 def debugmergestate(ui, repo, *args):
1212 """print merge state
1212 """print merge state
1213
1213
1214 Use --verbose to print out information about whether v1 or v2 merge state
1214 Use --verbose to print out information about whether v1 or v2 merge state
1215 was chosen."""
1215 was chosen."""
1216 def _hashornull(h):
1216 def _hashornull(h):
1217 if h == nullhex:
1217 if h == nullhex:
1218 return 'null'
1218 return 'null'
1219 else:
1219 else:
1220 return h
1220 return h
1221
1221
1222 def printrecords(version):
1222 def printrecords(version):
1223 ui.write(('* version %s records\n') % version)
1223 ui.write(('* version %s records\n') % version)
1224 if version == 1:
1224 if version == 1:
1225 records = v1records
1225 records = v1records
1226 else:
1226 else:
1227 records = v2records
1227 records = v2records
1228
1228
1229 for rtype, record in records:
1229 for rtype, record in records:
1230 # pretty print some record types
1230 # pretty print some record types
1231 if rtype == 'L':
1231 if rtype == 'L':
1232 ui.write(('local: %s\n') % record)
1232 ui.write(('local: %s\n') % record)
1233 elif rtype == 'O':
1233 elif rtype == 'O':
1234 ui.write(('other: %s\n') % record)
1234 ui.write(('other: %s\n') % record)
1235 elif rtype == 'm':
1235 elif rtype == 'm':
1236 driver, mdstate = record.split('\0', 1)
1236 driver, mdstate = record.split('\0', 1)
1237 ui.write(('merge driver: %s (state "%s")\n')
1237 ui.write(('merge driver: %s (state "%s")\n')
1238 % (driver, mdstate))
1238 % (driver, mdstate))
1239 elif rtype in 'FDC':
1239 elif rtype in 'FDC':
1240 r = record.split('\0')
1240 r = record.split('\0')
1241 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1241 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1242 if version == 1:
1242 if version == 1:
1243 onode = 'not stored in v1 format'
1243 onode = 'not stored in v1 format'
1244 flags = r[7]
1244 flags = r[7]
1245 else:
1245 else:
1246 onode, flags = r[7:9]
1246 onode, flags = r[7:9]
1247 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1247 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1248 % (f, rtype, state, _hashornull(hash)))
1248 % (f, rtype, state, _hashornull(hash)))
1249 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1249 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1250 ui.write((' ancestor path: %s (node %s)\n')
1250 ui.write((' ancestor path: %s (node %s)\n')
1251 % (afile, _hashornull(anode)))
1251 % (afile, _hashornull(anode)))
1252 ui.write((' other path: %s (node %s)\n')
1252 ui.write((' other path: %s (node %s)\n')
1253 % (ofile, _hashornull(onode)))
1253 % (ofile, _hashornull(onode)))
1254 elif rtype == 'f':
1254 elif rtype == 'f':
1255 filename, rawextras = record.split('\0', 1)
1255 filename, rawextras = record.split('\0', 1)
1256 extras = rawextras.split('\0')
1256 extras = rawextras.split('\0')
1257 i = 0
1257 i = 0
1258 extrastrings = []
1258 extrastrings = []
1259 while i < len(extras):
1259 while i < len(extras):
1260 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1260 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1261 i += 2
1261 i += 2
1262
1262
1263 ui.write(('file extras: %s (%s)\n')
1263 ui.write(('file extras: %s (%s)\n')
1264 % (filename, ', '.join(extrastrings)))
1264 % (filename, ', '.join(extrastrings)))
1265 elif rtype == 'l':
1265 elif rtype == 'l':
1266 labels = record.split('\0', 2)
1266 labels = record.split('\0', 2)
1267 labels = [l for l in labels if len(l) > 0]
1267 labels = [l for l in labels if len(l) > 0]
1268 ui.write(('labels:\n'))
1268 ui.write(('labels:\n'))
1269 ui.write((' local: %s\n' % labels[0]))
1269 ui.write((' local: %s\n' % labels[0]))
1270 ui.write((' other: %s\n' % labels[1]))
1270 ui.write((' other: %s\n' % labels[1]))
1271 if len(labels) > 2:
1271 if len(labels) > 2:
1272 ui.write((' base: %s\n' % labels[2]))
1272 ui.write((' base: %s\n' % labels[2]))
1273 else:
1273 else:
1274 ui.write(('unrecognized entry: %s\t%s\n')
1274 ui.write(('unrecognized entry: %s\t%s\n')
1275 % (rtype, record.replace('\0', '\t')))
1275 % (rtype, record.replace('\0', '\t')))
1276
1276
1277 # Avoid mergestate.read() since it may raise an exception for unsupported
1277 # Avoid mergestate.read() since it may raise an exception for unsupported
1278 # merge state records. We shouldn't be doing this, but this is OK since this
1278 # merge state records. We shouldn't be doing this, but this is OK since this
1279 # command is pretty low-level.
1279 # command is pretty low-level.
1280 ms = mergemod.mergestate(repo)
1280 ms = mergemod.mergestate(repo)
1281
1281
1282 # sort so that reasonable information is on top
1282 # sort so that reasonable information is on top
1283 v1records = ms._readrecordsv1()
1283 v1records = ms._readrecordsv1()
1284 v2records = ms._readrecordsv2()
1284 v2records = ms._readrecordsv2()
1285 order = 'LOml'
1285 order = 'LOml'
1286 def key(r):
1286 def key(r):
1287 idx = order.find(r[0])
1287 idx = order.find(r[0])
1288 if idx == -1:
1288 if idx == -1:
1289 return (1, r[1])
1289 return (1, r[1])
1290 else:
1290 else:
1291 return (0, idx)
1291 return (0, idx)
1292 v1records.sort(key=key)
1292 v1records.sort(key=key)
1293 v2records.sort(key=key)
1293 v2records.sort(key=key)
1294
1294
1295 if not v1records and not v2records:
1295 if not v1records and not v2records:
1296 ui.write(('no merge state found\n'))
1296 ui.write(('no merge state found\n'))
1297 elif not v2records:
1297 elif not v2records:
1298 ui.note(('no version 2 merge state\n'))
1298 ui.note(('no version 2 merge state\n'))
1299 printrecords(1)
1299 printrecords(1)
1300 elif ms._v1v2match(v1records, v2records):
1300 elif ms._v1v2match(v1records, v2records):
1301 ui.note(('v1 and v2 states match: using v2\n'))
1301 ui.note(('v1 and v2 states match: using v2\n'))
1302 printrecords(2)
1302 printrecords(2)
1303 else:
1303 else:
1304 ui.note(('v1 and v2 states mismatch: using v1\n'))
1304 ui.note(('v1 and v2 states mismatch: using v1\n'))
1305 printrecords(1)
1305 printrecords(1)
1306 if ui.verbose:
1306 if ui.verbose:
1307 printrecords(2)
1307 printrecords(2)
1308
1308
1309 @command('debugnamecomplete', [], _('NAME...'))
1309 @command('debugnamecomplete', [], _('NAME...'))
1310 def debugnamecomplete(ui, repo, *args):
1310 def debugnamecomplete(ui, repo, *args):
1311 '''complete "names" - tags, open branch names, bookmark names'''
1311 '''complete "names" - tags, open branch names, bookmark names'''
1312
1312
1313 names = set()
1313 names = set()
1314 # since we previously only listed open branches, we will handle that
1314 # since we previously only listed open branches, we will handle that
1315 # specially (after this for loop)
1315 # specially (after this for loop)
1316 for name, ns in repo.names.iteritems():
1316 for name, ns in repo.names.iteritems():
1317 if name != 'branches':
1317 if name != 'branches':
1318 names.update(ns.listnames(repo))
1318 names.update(ns.listnames(repo))
1319 names.update(tag for (tag, heads, tip, closed)
1319 names.update(tag for (tag, heads, tip, closed)
1320 in repo.branchmap().iterbranches() if not closed)
1320 in repo.branchmap().iterbranches() if not closed)
1321 completions = set()
1321 completions = set()
1322 if not args:
1322 if not args:
1323 args = ['']
1323 args = ['']
1324 for a in args:
1324 for a in args:
1325 completions.update(n for n in names if n.startswith(a))
1325 completions.update(n for n in names if n.startswith(a))
1326 ui.write('\n'.join(sorted(completions)))
1326 ui.write('\n'.join(sorted(completions)))
1327 ui.write('\n')
1327 ui.write('\n')
1328
1328
1329 @command('debugobsolete',
1329 @command('debugobsolete',
1330 [('', 'flags', 0, _('markers flag')),
1330 [('', 'flags', 0, _('markers flag')),
1331 ('', 'record-parents', False,
1331 ('', 'record-parents', False,
1332 _('record parent information for the precursor')),
1332 _('record parent information for the precursor')),
1333 ('r', 'rev', [], _('display markers relevant to REV')),
1333 ('r', 'rev', [], _('display markers relevant to REV')),
1334 ('', 'index', False, _('display index of the marker')),
1334 ('', 'index', False, _('display index of the marker')),
1335 ('', 'delete', [], _('delete markers specified by indices')),
1335 ('', 'delete', [], _('delete markers specified by indices')),
1336 ] + commands.commitopts2 + commands.formatteropts,
1336 ] + commands.commitopts2 + commands.formatteropts,
1337 _('[OBSOLETED [REPLACEMENT ...]]'))
1337 _('[OBSOLETED [REPLACEMENT ...]]'))
1338 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1338 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1339 """create arbitrary obsolete marker
1339 """create arbitrary obsolete marker
1340
1340
1341 With no arguments, displays the list of obsolescence markers."""
1341 With no arguments, displays the list of obsolescence markers."""
1342
1342
1343 def parsenodeid(s):
1343 def parsenodeid(s):
1344 try:
1344 try:
1345 # We do not use revsingle/revrange functions here to accept
1345 # We do not use revsingle/revrange functions here to accept
1346 # arbitrary node identifiers, possibly not present in the
1346 # arbitrary node identifiers, possibly not present in the
1347 # local repository.
1347 # local repository.
1348 n = bin(s)
1348 n = bin(s)
1349 if len(n) != len(nullid):
1349 if len(n) != len(nullid):
1350 raise TypeError()
1350 raise TypeError()
1351 return n
1351 return n
1352 except TypeError:
1352 except TypeError:
1353 raise error.Abort('changeset references must be full hexadecimal '
1353 raise error.Abort('changeset references must be full hexadecimal '
1354 'node identifiers')
1354 'node identifiers')
1355
1355
1356 if opts.get('delete'):
1356 if opts.get('delete'):
1357 indices = []
1357 indices = []
1358 for v in opts.get('delete'):
1358 for v in opts.get('delete'):
1359 try:
1359 try:
1360 indices.append(int(v))
1360 indices.append(int(v))
1361 except ValueError:
1361 except ValueError:
1362 raise error.Abort(_('invalid index value: %r') % v,
1362 raise error.Abort(_('invalid index value: %r') % v,
1363 hint=_('use integers for indices'))
1363 hint=_('use integers for indices'))
1364
1364
1365 if repo.currenttransaction():
1365 if repo.currenttransaction():
1366 raise error.Abort(_('cannot delete obsmarkers in the middle '
1366 raise error.Abort(_('cannot delete obsmarkers in the middle '
1367 'of transaction.'))
1367 'of transaction.'))
1368
1368
1369 with repo.lock():
1369 with repo.lock():
1370 n = repair.deleteobsmarkers(repo.obsstore, indices)
1370 n = repair.deleteobsmarkers(repo.obsstore, indices)
1371 ui.write(_('deleted %i obsolescence markers\n') % n)
1371 ui.write(_('deleted %i obsolescence markers\n') % n)
1372
1372
1373 return
1373 return
1374
1374
1375 if precursor is not None:
1375 if precursor is not None:
1376 if opts['rev']:
1376 if opts['rev']:
1377 raise error.Abort('cannot select revision when creating marker')
1377 raise error.Abort('cannot select revision when creating marker')
1378 metadata = {}
1378 metadata = {}
1379 metadata['user'] = opts['user'] or ui.username()
1379 metadata['user'] = opts['user'] or ui.username()
1380 succs = tuple(parsenodeid(succ) for succ in successors)
1380 succs = tuple(parsenodeid(succ) for succ in successors)
1381 l = repo.lock()
1381 l = repo.lock()
1382 try:
1382 try:
1383 tr = repo.transaction('debugobsolete')
1383 tr = repo.transaction('debugobsolete')
1384 try:
1384 try:
1385 date = opts.get('date')
1385 date = opts.get('date')
1386 if date:
1386 if date:
1387 date = util.parsedate(date)
1387 date = util.parsedate(date)
1388 else:
1388 else:
1389 date = None
1389 date = None
1390 prec = parsenodeid(precursor)
1390 prec = parsenodeid(precursor)
1391 parents = None
1391 parents = None
1392 if opts['record_parents']:
1392 if opts['record_parents']:
1393 if prec not in repo.unfiltered():
1393 if prec not in repo.unfiltered():
1394 raise error.Abort('cannot used --record-parents on '
1394 raise error.Abort('cannot used --record-parents on '
1395 'unknown changesets')
1395 'unknown changesets')
1396 parents = repo.unfiltered()[prec].parents()
1396 parents = repo.unfiltered()[prec].parents()
1397 parents = tuple(p.node() for p in parents)
1397 parents = tuple(p.node() for p in parents)
1398 repo.obsstore.create(tr, prec, succs, opts['flags'],
1398 repo.obsstore.create(tr, prec, succs, opts['flags'],
1399 parents=parents, date=date,
1399 parents=parents, date=date,
1400 metadata=metadata)
1400 metadata=metadata)
1401 tr.close()
1401 tr.close()
1402 except ValueError as exc:
1402 except ValueError as exc:
1403 raise error.Abort(_('bad obsmarker input: %s') % exc)
1403 raise error.Abort(_('bad obsmarker input: %s') % exc)
1404 finally:
1404 finally:
1405 tr.release()
1405 tr.release()
1406 finally:
1406 finally:
1407 l.release()
1407 l.release()
1408 else:
1408 else:
1409 if opts['rev']:
1409 if opts['rev']:
1410 revs = scmutil.revrange(repo, opts['rev'])
1410 revs = scmutil.revrange(repo, opts['rev'])
1411 nodes = [repo[r].node() for r in revs]
1411 nodes = [repo[r].node() for r in revs]
1412 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1412 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1413 markers.sort(key=lambda x: x._data)
1413 markers.sort(key=lambda x: x._data)
1414 else:
1414 else:
1415 markers = obsolete.getmarkers(repo)
1415 markers = obsolete.getmarkers(repo)
1416
1416
1417 markerstoiter = markers
1417 markerstoiter = markers
1418 isrelevant = lambda m: True
1418 isrelevant = lambda m: True
1419 if opts.get('rev') and opts.get('index'):
1419 if opts.get('rev') and opts.get('index'):
1420 markerstoiter = obsolete.getmarkers(repo)
1420 markerstoiter = obsolete.getmarkers(repo)
1421 markerset = set(markers)
1421 markerset = set(markers)
1422 isrelevant = lambda m: m in markerset
1422 isrelevant = lambda m: m in markerset
1423
1423
1424 fm = ui.formatter('debugobsolete', opts)
1424 fm = ui.formatter('debugobsolete', opts)
1425 for i, m in enumerate(markerstoiter):
1425 for i, m in enumerate(markerstoiter):
1426 if not isrelevant(m):
1426 if not isrelevant(m):
1427 # marker can be irrelevant when we're iterating over a set
1427 # marker can be irrelevant when we're iterating over a set
1428 # of markers (markerstoiter) which is bigger than the set
1428 # of markers (markerstoiter) which is bigger than the set
1429 # of markers we want to display (markers)
1429 # of markers we want to display (markers)
1430 # this can happen if both --index and --rev options are
1430 # this can happen if both --index and --rev options are
1431 # provided and thus we need to iterate over all of the markers
1431 # provided and thus we need to iterate over all of the markers
1432 # to get the correct indices, but only display the ones that
1432 # to get the correct indices, but only display the ones that
1433 # are relevant to --rev value
1433 # are relevant to --rev value
1434 continue
1434 continue
1435 fm.startitem()
1435 fm.startitem()
1436 ind = i if opts.get('index') else None
1436 ind = i if opts.get('index') else None
1437 cmdutil.showmarker(fm, m, index=ind)
1437 cmdutil.showmarker(fm, m, index=ind)
1438 fm.end()
1438 fm.end()
1439
1439
1440 @command('debugpathcomplete',
1440 @command('debugpathcomplete',
1441 [('f', 'full', None, _('complete an entire path')),
1441 [('f', 'full', None, _('complete an entire path')),
1442 ('n', 'normal', None, _('show only normal files')),
1442 ('n', 'normal', None, _('show only normal files')),
1443 ('a', 'added', None, _('show only added files')),
1443 ('a', 'added', None, _('show only added files')),
1444 ('r', 'removed', None, _('show only removed files'))],
1444 ('r', 'removed', None, _('show only removed files'))],
1445 _('FILESPEC...'))
1445 _('FILESPEC...'))
1446 def debugpathcomplete(ui, repo, *specs, **opts):
1446 def debugpathcomplete(ui, repo, *specs, **opts):
1447 '''complete part or all of a tracked path
1447 '''complete part or all of a tracked path
1448
1448
1449 This command supports shells that offer path name completion. It
1449 This command supports shells that offer path name completion. It
1450 currently completes only files already known to the dirstate.
1450 currently completes only files already known to the dirstate.
1451
1451
1452 Completion extends only to the next path segment unless
1452 Completion extends only to the next path segment unless
1453 --full is specified, in which case entire paths are used.'''
1453 --full is specified, in which case entire paths are used.'''
1454
1454
1455 def complete(path, acceptable):
1455 def complete(path, acceptable):
1456 dirstate = repo.dirstate
1456 dirstate = repo.dirstate
1457 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1457 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1458 rootdir = repo.root + pycompat.ossep
1458 rootdir = repo.root + pycompat.ossep
1459 if spec != repo.root and not spec.startswith(rootdir):
1459 if spec != repo.root and not spec.startswith(rootdir):
1460 return [], []
1460 return [], []
1461 if os.path.isdir(spec):
1461 if os.path.isdir(spec):
1462 spec += '/'
1462 spec += '/'
1463 spec = spec[len(rootdir):]
1463 spec = spec[len(rootdir):]
1464 fixpaths = pycompat.ossep != '/'
1464 fixpaths = pycompat.ossep != '/'
1465 if fixpaths:
1465 if fixpaths:
1466 spec = spec.replace(pycompat.ossep, '/')
1466 spec = spec.replace(pycompat.ossep, '/')
1467 speclen = len(spec)
1467 speclen = len(spec)
1468 fullpaths = opts['full']
1468 fullpaths = opts['full']
1469 files, dirs = set(), set()
1469 files, dirs = set(), set()
1470 adddir, addfile = dirs.add, files.add
1470 adddir, addfile = dirs.add, files.add
1471 for f, st in dirstate.iteritems():
1471 for f, st in dirstate.iteritems():
1472 if f.startswith(spec) and st[0] in acceptable:
1472 if f.startswith(spec) and st[0] in acceptable:
1473 if fixpaths:
1473 if fixpaths:
1474 f = f.replace('/', pycompat.ossep)
1474 f = f.replace('/', pycompat.ossep)
1475 if fullpaths:
1475 if fullpaths:
1476 addfile(f)
1476 addfile(f)
1477 continue
1477 continue
1478 s = f.find(pycompat.ossep, speclen)
1478 s = f.find(pycompat.ossep, speclen)
1479 if s >= 0:
1479 if s >= 0:
1480 adddir(f[:s])
1480 adddir(f[:s])
1481 else:
1481 else:
1482 addfile(f)
1482 addfile(f)
1483 return files, dirs
1483 return files, dirs
1484
1484
1485 acceptable = ''
1485 acceptable = ''
1486 if opts['normal']:
1486 if opts['normal']:
1487 acceptable += 'nm'
1487 acceptable += 'nm'
1488 if opts['added']:
1488 if opts['added']:
1489 acceptable += 'a'
1489 acceptable += 'a'
1490 if opts['removed']:
1490 if opts['removed']:
1491 acceptable += 'r'
1491 acceptable += 'r'
1492 cwd = repo.getcwd()
1492 cwd = repo.getcwd()
1493 if not specs:
1493 if not specs:
1494 specs = ['.']
1494 specs = ['.']
1495
1495
1496 files, dirs = set(), set()
1496 files, dirs = set(), set()
1497 for spec in specs:
1497 for spec in specs:
1498 f, d = complete(spec, acceptable or 'nmar')
1498 f, d = complete(spec, acceptable or 'nmar')
1499 files.update(f)
1499 files.update(f)
1500 dirs.update(d)
1500 dirs.update(d)
1501 files.update(dirs)
1501 files.update(dirs)
1502 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1502 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1503 ui.write('\n')
1503 ui.write('\n')
1504
1504
1505 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1505 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1506 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1506 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1507 '''access the pushkey key/value protocol
1507 '''access the pushkey key/value protocol
1508
1508
1509 With two args, list the keys in the given namespace.
1509 With two args, list the keys in the given namespace.
1510
1510
1511 With five args, set a key to new if it currently is set to old.
1511 With five args, set a key to new if it currently is set to old.
1512 Reports success or failure.
1512 Reports success or failure.
1513 '''
1513 '''
1514
1514
1515 target = hg.peer(ui, {}, repopath)
1515 target = hg.peer(ui, {}, repopath)
1516 if keyinfo:
1516 if keyinfo:
1517 key, old, new = keyinfo
1517 key, old, new = keyinfo
1518 r = target.pushkey(namespace, key, old, new)
1518 r = target.pushkey(namespace, key, old, new)
1519 ui.status(str(r) + '\n')
1519 ui.status(str(r) + '\n')
1520 return not r
1520 return not r
1521 else:
1521 else:
1522 for k, v in sorted(target.listkeys(namespace).iteritems()):
1522 for k, v in sorted(target.listkeys(namespace).iteritems()):
1523 ui.write("%s\t%s\n" % (util.escapestr(k),
1523 ui.write("%s\t%s\n" % (util.escapestr(k),
1524 util.escapestr(v)))
1524 util.escapestr(v)))
1525
1525
1526 @command('debugpvec', [], _('A B'))
1526 @command('debugpvec', [], _('A B'))
1527 def debugpvec(ui, repo, a, b=None):
1527 def debugpvec(ui, repo, a, b=None):
1528 ca = scmutil.revsingle(repo, a)
1528 ca = scmutil.revsingle(repo, a)
1529 cb = scmutil.revsingle(repo, b)
1529 cb = scmutil.revsingle(repo, b)
1530 pa = pvec.ctxpvec(ca)
1530 pa = pvec.ctxpvec(ca)
1531 pb = pvec.ctxpvec(cb)
1531 pb = pvec.ctxpvec(cb)
1532 if pa == pb:
1532 if pa == pb:
1533 rel = "="
1533 rel = "="
1534 elif pa > pb:
1534 elif pa > pb:
1535 rel = ">"
1535 rel = ">"
1536 elif pa < pb:
1536 elif pa < pb:
1537 rel = "<"
1537 rel = "<"
1538 elif pa | pb:
1538 elif pa | pb:
1539 rel = "|"
1539 rel = "|"
1540 ui.write(_("a: %s\n") % pa)
1540 ui.write(_("a: %s\n") % pa)
1541 ui.write(_("b: %s\n") % pb)
1541 ui.write(_("b: %s\n") % pb)
1542 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1542 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1543 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1543 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1544 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1544 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1545 pa.distance(pb), rel))
1545 pa.distance(pb), rel))
1546
1546
1547 @command('debugrebuilddirstate|debugrebuildstate',
1547 @command('debugrebuilddirstate|debugrebuildstate',
1548 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1548 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1549 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1549 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1550 'the working copy parent')),
1550 'the working copy parent')),
1551 ],
1551 ],
1552 _('[-r REV]'))
1552 _('[-r REV]'))
1553 def debugrebuilddirstate(ui, repo, rev, **opts):
1553 def debugrebuilddirstate(ui, repo, rev, **opts):
1554 """rebuild the dirstate as it would look like for the given revision
1554 """rebuild the dirstate as it would look like for the given revision
1555
1555
1556 If no revision is specified the first current parent will be used.
1556 If no revision is specified the first current parent will be used.
1557
1557
1558 The dirstate will be set to the files of the given revision.
1558 The dirstate will be set to the files of the given revision.
1559 The actual working directory content or existing dirstate
1559 The actual working directory content or existing dirstate
1560 information such as adds or removes is not considered.
1560 information such as adds or removes is not considered.
1561
1561
1562 ``minimal`` will only rebuild the dirstate status for files that claim to be
1562 ``minimal`` will only rebuild the dirstate status for files that claim to be
1563 tracked but are not in the parent manifest, or that exist in the parent
1563 tracked but are not in the parent manifest, or that exist in the parent
1564 manifest but are not in the dirstate. It will not change adds, removes, or
1564 manifest but are not in the dirstate. It will not change adds, removes, or
1565 modified files that are in the working copy parent.
1565 modified files that are in the working copy parent.
1566
1566
1567 One use of this command is to make the next :hg:`status` invocation
1567 One use of this command is to make the next :hg:`status` invocation
1568 check the actual file content.
1568 check the actual file content.
1569 """
1569 """
1570 ctx = scmutil.revsingle(repo, rev)
1570 ctx = scmutil.revsingle(repo, rev)
1571 with repo.wlock():
1571 with repo.wlock():
1572 dirstate = repo.dirstate
1572 dirstate = repo.dirstate
1573 changedfiles = None
1573 changedfiles = None
1574 # See command doc for what minimal does.
1574 # See command doc for what minimal does.
1575 if opts.get('minimal'):
1575 if opts.get('minimal'):
1576 manifestfiles = set(ctx.manifest().keys())
1576 manifestfiles = set(ctx.manifest().keys())
1577 dirstatefiles = set(dirstate)
1577 dirstatefiles = set(dirstate)
1578 manifestonly = manifestfiles - dirstatefiles
1578 manifestonly = manifestfiles - dirstatefiles
1579 dsonly = dirstatefiles - manifestfiles
1579 dsonly = dirstatefiles - manifestfiles
1580 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1580 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1581 changedfiles = manifestonly | dsnotadded
1581 changedfiles = manifestonly | dsnotadded
1582
1582
1583 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1583 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1584
1584
1585 @command('debugrebuildfncache', [], '')
1585 @command('debugrebuildfncache', [], '')
1586 def debugrebuildfncache(ui, repo):
1586 def debugrebuildfncache(ui, repo):
1587 """rebuild the fncache file"""
1587 """rebuild the fncache file"""
1588 repair.rebuildfncache(ui, repo)
1588 repair.rebuildfncache(ui, repo)
1589
1589
1590 @command('debugrename',
1590 @command('debugrename',
1591 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1591 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1592 _('[-r REV] FILE'))
1592 _('[-r REV] FILE'))
1593 def debugrename(ui, repo, file1, *pats, **opts):
1593 def debugrename(ui, repo, file1, *pats, **opts):
1594 """dump rename information"""
1594 """dump rename information"""
1595
1595
1596 ctx = scmutil.revsingle(repo, opts.get('rev'))
1596 ctx = scmutil.revsingle(repo, opts.get('rev'))
1597 m = scmutil.match(ctx, (file1,) + pats, opts)
1597 m = scmutil.match(ctx, (file1,) + pats, opts)
1598 for abs in ctx.walk(m):
1598 for abs in ctx.walk(m):
1599 fctx = ctx[abs]
1599 fctx = ctx[abs]
1600 o = fctx.filelog().renamed(fctx.filenode())
1600 o = fctx.filelog().renamed(fctx.filenode())
1601 rel = m.rel(abs)
1601 rel = m.rel(abs)
1602 if o:
1602 if o:
1603 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1603 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1604 else:
1604 else:
1605 ui.write(_("%s not renamed\n") % rel)
1605 ui.write(_("%s not renamed\n") % rel)
1606
1606
1607 @command('debugrevlog', commands.debugrevlogopts +
1607 @command('debugrevlog', commands.debugrevlogopts +
1608 [('d', 'dump', False, _('dump index data'))],
1608 [('d', 'dump', False, _('dump index data'))],
1609 _('-c|-m|FILE'),
1609 _('-c|-m|FILE'),
1610 optionalrepo=True)
1610 optionalrepo=True)
1611 def debugrevlog(ui, repo, file_=None, **opts):
1611 def debugrevlog(ui, repo, file_=None, **opts):
1612 """show data and statistics about a revlog"""
1612 """show data and statistics about a revlog"""
1613 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1613 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1614
1614
1615 if opts.get("dump"):
1615 if opts.get("dump"):
1616 numrevs = len(r)
1616 numrevs = len(r)
1617 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1617 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1618 " rawsize totalsize compression heads chainlen\n"))
1618 " rawsize totalsize compression heads chainlen\n"))
1619 ts = 0
1619 ts = 0
1620 heads = set()
1620 heads = set()
1621
1621
1622 for rev in xrange(numrevs):
1622 for rev in xrange(numrevs):
1623 dbase = r.deltaparent(rev)
1623 dbase = r.deltaparent(rev)
1624 if dbase == -1:
1624 if dbase == -1:
1625 dbase = rev
1625 dbase = rev
1626 cbase = r.chainbase(rev)
1626 cbase = r.chainbase(rev)
1627 clen = r.chainlen(rev)
1627 clen = r.chainlen(rev)
1628 p1, p2 = r.parentrevs(rev)
1628 p1, p2 = r.parentrevs(rev)
1629 rs = r.rawsize(rev)
1629 rs = r.rawsize(rev)
1630 ts = ts + rs
1630 ts = ts + rs
1631 heads -= set(r.parentrevs(rev))
1631 heads -= set(r.parentrevs(rev))
1632 heads.add(rev)
1632 heads.add(rev)
1633 try:
1633 try:
1634 compression = ts / r.end(rev)
1634 compression = ts / r.end(rev)
1635 except ZeroDivisionError:
1635 except ZeroDivisionError:
1636 compression = 0
1636 compression = 0
1637 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1637 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1638 "%11d %5d %8d\n" %
1638 "%11d %5d %8d\n" %
1639 (rev, p1, p2, r.start(rev), r.end(rev),
1639 (rev, p1, p2, r.start(rev), r.end(rev),
1640 r.start(dbase), r.start(cbase),
1640 r.start(dbase), r.start(cbase),
1641 r.start(p1), r.start(p2),
1641 r.start(p1), r.start(p2),
1642 rs, ts, compression, len(heads), clen))
1642 rs, ts, compression, len(heads), clen))
1643 return 0
1643 return 0
1644
1644
1645 v = r.version
1645 v = r.version
1646 format = v & 0xFFFF
1646 format = v & 0xFFFF
1647 flags = []
1647 flags = []
1648 gdelta = False
1648 gdelta = False
1649 if v & revlog.REVLOGNGINLINEDATA:
1649 if v & revlog.REVLOGNGINLINEDATA:
1650 flags.append('inline')
1650 flags.append('inline')
1651 if v & revlog.REVLOGGENERALDELTA:
1651 if v & revlog.REVLOGGENERALDELTA:
1652 gdelta = True
1652 gdelta = True
1653 flags.append('generaldelta')
1653 flags.append('generaldelta')
1654 if not flags:
1654 if not flags:
1655 flags = ['(none)']
1655 flags = ['(none)']
1656
1656
1657 nummerges = 0
1657 nummerges = 0
1658 numfull = 0
1658 numfull = 0
1659 numprev = 0
1659 numprev = 0
1660 nump1 = 0
1660 nump1 = 0
1661 nump2 = 0
1661 nump2 = 0
1662 numother = 0
1662 numother = 0
1663 nump1prev = 0
1663 nump1prev = 0
1664 nump2prev = 0
1664 nump2prev = 0
1665 chainlengths = []
1665 chainlengths = []
1666
1666
1667 datasize = [None, 0, 0]
1667 datasize = [None, 0, 0]
1668 fullsize = [None, 0, 0]
1668 fullsize = [None, 0, 0]
1669 deltasize = [None, 0, 0]
1669 deltasize = [None, 0, 0]
1670 chunktypecounts = {}
1670 chunktypecounts = {}
1671 chunktypesizes = {}
1671 chunktypesizes = {}
1672
1672
1673 def addsize(size, l):
1673 def addsize(size, l):
1674 if l[0] is None or size < l[0]:
1674 if l[0] is None or size < l[0]:
1675 l[0] = size
1675 l[0] = size
1676 if size > l[1]:
1676 if size > l[1]:
1677 l[1] = size
1677 l[1] = size
1678 l[2] += size
1678 l[2] += size
1679
1679
1680 numrevs = len(r)
1680 numrevs = len(r)
1681 for rev in xrange(numrevs):
1681 for rev in xrange(numrevs):
1682 p1, p2 = r.parentrevs(rev)
1682 p1, p2 = r.parentrevs(rev)
1683 delta = r.deltaparent(rev)
1683 delta = r.deltaparent(rev)
1684 if format > 0:
1684 if format > 0:
1685 addsize(r.rawsize(rev), datasize)
1685 addsize(r.rawsize(rev), datasize)
1686 if p2 != nullrev:
1686 if p2 != nullrev:
1687 nummerges += 1
1687 nummerges += 1
1688 size = r.length(rev)
1688 size = r.length(rev)
1689 if delta == nullrev:
1689 if delta == nullrev:
1690 chainlengths.append(0)
1690 chainlengths.append(0)
1691 numfull += 1
1691 numfull += 1
1692 addsize(size, fullsize)
1692 addsize(size, fullsize)
1693 else:
1693 else:
1694 chainlengths.append(chainlengths[delta] + 1)
1694 chainlengths.append(chainlengths[delta] + 1)
1695 addsize(size, deltasize)
1695 addsize(size, deltasize)
1696 if delta == rev - 1:
1696 if delta == rev - 1:
1697 numprev += 1
1697 numprev += 1
1698 if delta == p1:
1698 if delta == p1:
1699 nump1prev += 1
1699 nump1prev += 1
1700 elif delta == p2:
1700 elif delta == p2:
1701 nump2prev += 1
1701 nump2prev += 1
1702 elif delta == p1:
1702 elif delta == p1:
1703 nump1 += 1
1703 nump1 += 1
1704 elif delta == p2:
1704 elif delta == p2:
1705 nump2 += 1
1705 nump2 += 1
1706 elif delta != nullrev:
1706 elif delta != nullrev:
1707 numother += 1
1707 numother += 1
1708
1708
1709 # Obtain data on the raw chunks in the revlog.
1709 # Obtain data on the raw chunks in the revlog.
1710 chunk = r._chunkraw(rev, rev)[1]
1710 segment = r._getsegmentforrevs(rev, rev)[1]
1711 if chunk:
1711 if segment:
1712 chunktype = chunk[0]
1712 chunktype = segment[0]
1713 else:
1713 else:
1714 chunktype = 'empty'
1714 chunktype = 'empty'
1715
1715
1716 if chunktype not in chunktypecounts:
1716 if chunktype not in chunktypecounts:
1717 chunktypecounts[chunktype] = 0
1717 chunktypecounts[chunktype] = 0
1718 chunktypesizes[chunktype] = 0
1718 chunktypesizes[chunktype] = 0
1719
1719
1720 chunktypecounts[chunktype] += 1
1720 chunktypecounts[chunktype] += 1
1721 chunktypesizes[chunktype] += size
1721 chunktypesizes[chunktype] += size
1722
1722
1723 # Adjust size min value for empty cases
1723 # Adjust size min value for empty cases
1724 for size in (datasize, fullsize, deltasize):
1724 for size in (datasize, fullsize, deltasize):
1725 if size[0] is None:
1725 if size[0] is None:
1726 size[0] = 0
1726 size[0] = 0
1727
1727
1728 numdeltas = numrevs - numfull
1728 numdeltas = numrevs - numfull
1729 numoprev = numprev - nump1prev - nump2prev
1729 numoprev = numprev - nump1prev - nump2prev
1730 totalrawsize = datasize[2]
1730 totalrawsize = datasize[2]
1731 datasize[2] /= numrevs
1731 datasize[2] /= numrevs
1732 fulltotal = fullsize[2]
1732 fulltotal = fullsize[2]
1733 fullsize[2] /= numfull
1733 fullsize[2] /= numfull
1734 deltatotal = deltasize[2]
1734 deltatotal = deltasize[2]
1735 if numrevs - numfull > 0:
1735 if numrevs - numfull > 0:
1736 deltasize[2] /= numrevs - numfull
1736 deltasize[2] /= numrevs - numfull
1737 totalsize = fulltotal + deltatotal
1737 totalsize = fulltotal + deltatotal
1738 avgchainlen = sum(chainlengths) / numrevs
1738 avgchainlen = sum(chainlengths) / numrevs
1739 maxchainlen = max(chainlengths)
1739 maxchainlen = max(chainlengths)
1740 compratio = 1
1740 compratio = 1
1741 if totalsize:
1741 if totalsize:
1742 compratio = totalrawsize / totalsize
1742 compratio = totalrawsize / totalsize
1743
1743
1744 basedfmtstr = '%%%dd\n'
1744 basedfmtstr = '%%%dd\n'
1745 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1745 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1746
1746
1747 def dfmtstr(max):
1747 def dfmtstr(max):
1748 return basedfmtstr % len(str(max))
1748 return basedfmtstr % len(str(max))
1749 def pcfmtstr(max, padding=0):
1749 def pcfmtstr(max, padding=0):
1750 return basepcfmtstr % (len(str(max)), ' ' * padding)
1750 return basepcfmtstr % (len(str(max)), ' ' * padding)
1751
1751
1752 def pcfmt(value, total):
1752 def pcfmt(value, total):
1753 if total:
1753 if total:
1754 return (value, 100 * float(value) / total)
1754 return (value, 100 * float(value) / total)
1755 else:
1755 else:
1756 return value, 100.0
1756 return value, 100.0
1757
1757
1758 ui.write(('format : %d\n') % format)
1758 ui.write(('format : %d\n') % format)
1759 ui.write(('flags : %s\n') % ', '.join(flags))
1759 ui.write(('flags : %s\n') % ', '.join(flags))
1760
1760
1761 ui.write('\n')
1761 ui.write('\n')
1762 fmt = pcfmtstr(totalsize)
1762 fmt = pcfmtstr(totalsize)
1763 fmt2 = dfmtstr(totalsize)
1763 fmt2 = dfmtstr(totalsize)
1764 ui.write(('revisions : ') + fmt2 % numrevs)
1764 ui.write(('revisions : ') + fmt2 % numrevs)
1765 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1765 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1766 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1766 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1767 ui.write(('revisions : ') + fmt2 % numrevs)
1767 ui.write(('revisions : ') + fmt2 % numrevs)
1768 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1768 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1769 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1769 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1770 ui.write(('revision size : ') + fmt2 % totalsize)
1770 ui.write(('revision size : ') + fmt2 % totalsize)
1771 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1771 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1772 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1772 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1773
1773
1774 def fmtchunktype(chunktype):
1774 def fmtchunktype(chunktype):
1775 if chunktype == 'empty':
1775 if chunktype == 'empty':
1776 return ' %s : ' % chunktype
1776 return ' %s : ' % chunktype
1777 elif chunktype in string.ascii_letters:
1777 elif chunktype in string.ascii_letters:
1778 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1778 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1779 else:
1779 else:
1780 return ' 0x%s : ' % hex(chunktype)
1780 return ' 0x%s : ' % hex(chunktype)
1781
1781
1782 ui.write('\n')
1782 ui.write('\n')
1783 ui.write(('chunks : ') + fmt2 % numrevs)
1783 ui.write(('chunks : ') + fmt2 % numrevs)
1784 for chunktype in sorted(chunktypecounts):
1784 for chunktype in sorted(chunktypecounts):
1785 ui.write(fmtchunktype(chunktype))
1785 ui.write(fmtchunktype(chunktype))
1786 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1786 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1787 ui.write(('chunks size : ') + fmt2 % totalsize)
1787 ui.write(('chunks size : ') + fmt2 % totalsize)
1788 for chunktype in sorted(chunktypecounts):
1788 for chunktype in sorted(chunktypecounts):
1789 ui.write(fmtchunktype(chunktype))
1789 ui.write(fmtchunktype(chunktype))
1790 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1790 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1791
1791
1792 ui.write('\n')
1792 ui.write('\n')
1793 fmt = dfmtstr(max(avgchainlen, compratio))
1793 fmt = dfmtstr(max(avgchainlen, compratio))
1794 ui.write(('avg chain length : ') + fmt % avgchainlen)
1794 ui.write(('avg chain length : ') + fmt % avgchainlen)
1795 ui.write(('max chain length : ') + fmt % maxchainlen)
1795 ui.write(('max chain length : ') + fmt % maxchainlen)
1796 ui.write(('compression ratio : ') + fmt % compratio)
1796 ui.write(('compression ratio : ') + fmt % compratio)
1797
1797
1798 if format > 0:
1798 if format > 0:
1799 ui.write('\n')
1799 ui.write('\n')
1800 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1800 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1801 % tuple(datasize))
1801 % tuple(datasize))
1802 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1802 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1803 % tuple(fullsize))
1803 % tuple(fullsize))
1804 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1804 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1805 % tuple(deltasize))
1805 % tuple(deltasize))
1806
1806
1807 if numdeltas > 0:
1807 if numdeltas > 0:
1808 ui.write('\n')
1808 ui.write('\n')
1809 fmt = pcfmtstr(numdeltas)
1809 fmt = pcfmtstr(numdeltas)
1810 fmt2 = pcfmtstr(numdeltas, 4)
1810 fmt2 = pcfmtstr(numdeltas, 4)
1811 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1811 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1812 if numprev > 0:
1812 if numprev > 0:
1813 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1813 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1814 numprev))
1814 numprev))
1815 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1815 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1816 numprev))
1816 numprev))
1817 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1817 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1818 numprev))
1818 numprev))
1819 if gdelta:
1819 if gdelta:
1820 ui.write(('deltas against p1 : ')
1820 ui.write(('deltas against p1 : ')
1821 + fmt % pcfmt(nump1, numdeltas))
1821 + fmt % pcfmt(nump1, numdeltas))
1822 ui.write(('deltas against p2 : ')
1822 ui.write(('deltas against p2 : ')
1823 + fmt % pcfmt(nump2, numdeltas))
1823 + fmt % pcfmt(nump2, numdeltas))
1824 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1824 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1825 numdeltas))
1825 numdeltas))
1826
1826
1827 @command('debugrevspec',
1827 @command('debugrevspec',
1828 [('', 'optimize', None,
1828 [('', 'optimize', None,
1829 _('print parsed tree after optimizing (DEPRECATED)')),
1829 _('print parsed tree after optimizing (DEPRECATED)')),
1830 ('p', 'show-stage', [],
1830 ('p', 'show-stage', [],
1831 _('print parsed tree at the given stage'), _('NAME')),
1831 _('print parsed tree at the given stage'), _('NAME')),
1832 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1832 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1833 ('', 'verify-optimized', False, _('verify optimized result')),
1833 ('', 'verify-optimized', False, _('verify optimized result')),
1834 ],
1834 ],
1835 ('REVSPEC'))
1835 ('REVSPEC'))
1836 def debugrevspec(ui, repo, expr, **opts):
1836 def debugrevspec(ui, repo, expr, **opts):
1837 """parse and apply a revision specification
1837 """parse and apply a revision specification
1838
1838
1839 Use -p/--show-stage option to print the parsed tree at the given stages.
1839 Use -p/--show-stage option to print the parsed tree at the given stages.
1840 Use -p all to print tree at every stage.
1840 Use -p all to print tree at every stage.
1841
1841
1842 Use --verify-optimized to compare the optimized result with the unoptimized
1842 Use --verify-optimized to compare the optimized result with the unoptimized
1843 one. Returns 1 if the optimized result differs.
1843 one. Returns 1 if the optimized result differs.
1844 """
1844 """
1845 stages = [
1845 stages = [
1846 ('parsed', lambda tree: tree),
1846 ('parsed', lambda tree: tree),
1847 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1847 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1848 ('concatenated', revsetlang.foldconcat),
1848 ('concatenated', revsetlang.foldconcat),
1849 ('analyzed', revsetlang.analyze),
1849 ('analyzed', revsetlang.analyze),
1850 ('optimized', revsetlang.optimize),
1850 ('optimized', revsetlang.optimize),
1851 ]
1851 ]
1852 if opts['no_optimized']:
1852 if opts['no_optimized']:
1853 stages = stages[:-1]
1853 stages = stages[:-1]
1854 if opts['verify_optimized'] and opts['no_optimized']:
1854 if opts['verify_optimized'] and opts['no_optimized']:
1855 raise error.Abort(_('cannot use --verify-optimized with '
1855 raise error.Abort(_('cannot use --verify-optimized with '
1856 '--no-optimized'))
1856 '--no-optimized'))
1857 stagenames = set(n for n, f in stages)
1857 stagenames = set(n for n, f in stages)
1858
1858
1859 showalways = set()
1859 showalways = set()
1860 showchanged = set()
1860 showchanged = set()
1861 if ui.verbose and not opts['show_stage']:
1861 if ui.verbose and not opts['show_stage']:
1862 # show parsed tree by --verbose (deprecated)
1862 # show parsed tree by --verbose (deprecated)
1863 showalways.add('parsed')
1863 showalways.add('parsed')
1864 showchanged.update(['expanded', 'concatenated'])
1864 showchanged.update(['expanded', 'concatenated'])
1865 if opts['optimize']:
1865 if opts['optimize']:
1866 showalways.add('optimized')
1866 showalways.add('optimized')
1867 if opts['show_stage'] and opts['optimize']:
1867 if opts['show_stage'] and opts['optimize']:
1868 raise error.Abort(_('cannot use --optimize with --show-stage'))
1868 raise error.Abort(_('cannot use --optimize with --show-stage'))
1869 if opts['show_stage'] == ['all']:
1869 if opts['show_stage'] == ['all']:
1870 showalways.update(stagenames)
1870 showalways.update(stagenames)
1871 else:
1871 else:
1872 for n in opts['show_stage']:
1872 for n in opts['show_stage']:
1873 if n not in stagenames:
1873 if n not in stagenames:
1874 raise error.Abort(_('invalid stage name: %s') % n)
1874 raise error.Abort(_('invalid stage name: %s') % n)
1875 showalways.update(opts['show_stage'])
1875 showalways.update(opts['show_stage'])
1876
1876
1877 treebystage = {}
1877 treebystage = {}
1878 printedtree = None
1878 printedtree = None
1879 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1879 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1880 for n, f in stages:
1880 for n, f in stages:
1881 treebystage[n] = tree = f(tree)
1881 treebystage[n] = tree = f(tree)
1882 if n in showalways or (n in showchanged and tree != printedtree):
1882 if n in showalways or (n in showchanged and tree != printedtree):
1883 if opts['show_stage'] or n != 'parsed':
1883 if opts['show_stage'] or n != 'parsed':
1884 ui.write(("* %s:\n") % n)
1884 ui.write(("* %s:\n") % n)
1885 ui.write(revsetlang.prettyformat(tree), "\n")
1885 ui.write(revsetlang.prettyformat(tree), "\n")
1886 printedtree = tree
1886 printedtree = tree
1887
1887
1888 if opts['verify_optimized']:
1888 if opts['verify_optimized']:
1889 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1889 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1890 brevs = revset.makematcher(treebystage['optimized'])(repo)
1890 brevs = revset.makematcher(treebystage['optimized'])(repo)
1891 if ui.verbose:
1891 if ui.verbose:
1892 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1892 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1893 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1893 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1894 arevs = list(arevs)
1894 arevs = list(arevs)
1895 brevs = list(brevs)
1895 brevs = list(brevs)
1896 if arevs == brevs:
1896 if arevs == brevs:
1897 return 0
1897 return 0
1898 ui.write(('--- analyzed\n'), label='diff.file_a')
1898 ui.write(('--- analyzed\n'), label='diff.file_a')
1899 ui.write(('+++ optimized\n'), label='diff.file_b')
1899 ui.write(('+++ optimized\n'), label='diff.file_b')
1900 sm = difflib.SequenceMatcher(None, arevs, brevs)
1900 sm = difflib.SequenceMatcher(None, arevs, brevs)
1901 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1901 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1902 if tag in ('delete', 'replace'):
1902 if tag in ('delete', 'replace'):
1903 for c in arevs[alo:ahi]:
1903 for c in arevs[alo:ahi]:
1904 ui.write('-%s\n' % c, label='diff.deleted')
1904 ui.write('-%s\n' % c, label='diff.deleted')
1905 if tag in ('insert', 'replace'):
1905 if tag in ('insert', 'replace'):
1906 for c in brevs[blo:bhi]:
1906 for c in brevs[blo:bhi]:
1907 ui.write('+%s\n' % c, label='diff.inserted')
1907 ui.write('+%s\n' % c, label='diff.inserted')
1908 if tag == 'equal':
1908 if tag == 'equal':
1909 for c in arevs[alo:ahi]:
1909 for c in arevs[alo:ahi]:
1910 ui.write(' %s\n' % c)
1910 ui.write(' %s\n' % c)
1911 return 1
1911 return 1
1912
1912
1913 func = revset.makematcher(tree)
1913 func = revset.makematcher(tree)
1914 revs = func(repo)
1914 revs = func(repo)
1915 if ui.verbose:
1915 if ui.verbose:
1916 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1916 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1917 for c in revs:
1917 for c in revs:
1918 ui.write("%s\n" % c)
1918 ui.write("%s\n" % c)
1919
1919
1920 @command('debugsetparents', [], _('REV1 [REV2]'))
1920 @command('debugsetparents', [], _('REV1 [REV2]'))
1921 def debugsetparents(ui, repo, rev1, rev2=None):
1921 def debugsetparents(ui, repo, rev1, rev2=None):
1922 """manually set the parents of the current working directory
1922 """manually set the parents of the current working directory
1923
1923
1924 This is useful for writing repository conversion tools, but should
1924 This is useful for writing repository conversion tools, but should
1925 be used with care. For example, neither the working directory nor the
1925 be used with care. For example, neither the working directory nor the
1926 dirstate is updated, so file status may be incorrect after running this
1926 dirstate is updated, so file status may be incorrect after running this
1927 command.
1927 command.
1928
1928
1929 Returns 0 on success.
1929 Returns 0 on success.
1930 """
1930 """
1931
1931
1932 r1 = scmutil.revsingle(repo, rev1).node()
1932 r1 = scmutil.revsingle(repo, rev1).node()
1933 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1933 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1934
1934
1935 with repo.wlock():
1935 with repo.wlock():
1936 repo.setparents(r1, r2)
1936 repo.setparents(r1, r2)
1937
1937
1938 @command('debugsub',
1938 @command('debugsub',
1939 [('r', 'rev', '',
1939 [('r', 'rev', '',
1940 _('revision to check'), _('REV'))],
1940 _('revision to check'), _('REV'))],
1941 _('[-r REV] [REV]'))
1941 _('[-r REV] [REV]'))
1942 def debugsub(ui, repo, rev=None):
1942 def debugsub(ui, repo, rev=None):
1943 ctx = scmutil.revsingle(repo, rev, None)
1943 ctx = scmutil.revsingle(repo, rev, None)
1944 for k, v in sorted(ctx.substate.items()):
1944 for k, v in sorted(ctx.substate.items()):
1945 ui.write(('path %s\n') % k)
1945 ui.write(('path %s\n') % k)
1946 ui.write((' source %s\n') % v[0])
1946 ui.write((' source %s\n') % v[0])
1947 ui.write((' revision %s\n') % v[1])
1947 ui.write((' revision %s\n') % v[1])
1948
1948
1949 @command('debugsuccessorssets',
1949 @command('debugsuccessorssets',
1950 [],
1950 [],
1951 _('[REV]'))
1951 _('[REV]'))
1952 def debugsuccessorssets(ui, repo, *revs):
1952 def debugsuccessorssets(ui, repo, *revs):
1953 """show set of successors for revision
1953 """show set of successors for revision
1954
1954
1955 A successors set of changeset A is a consistent group of revisions that
1955 A successors set of changeset A is a consistent group of revisions that
1956 succeed A. It contains non-obsolete changesets only.
1956 succeed A. It contains non-obsolete changesets only.
1957
1957
1958 In most cases a changeset A has a single successors set containing a single
1958 In most cases a changeset A has a single successors set containing a single
1959 successor (changeset A replaced by A').
1959 successor (changeset A replaced by A').
1960
1960
1961 A changeset that is made obsolete with no successors are called "pruned".
1961 A changeset that is made obsolete with no successors are called "pruned".
1962 Such changesets have no successors sets at all.
1962 Such changesets have no successors sets at all.
1963
1963
1964 A changeset that has been "split" will have a successors set containing
1964 A changeset that has been "split" will have a successors set containing
1965 more than one successor.
1965 more than one successor.
1966
1966
1967 A changeset that has been rewritten in multiple different ways is called
1967 A changeset that has been rewritten in multiple different ways is called
1968 "divergent". Such changesets have multiple successor sets (each of which
1968 "divergent". Such changesets have multiple successor sets (each of which
1969 may also be split, i.e. have multiple successors).
1969 may also be split, i.e. have multiple successors).
1970
1970
1971 Results are displayed as follows::
1971 Results are displayed as follows::
1972
1972
1973 <rev1>
1973 <rev1>
1974 <successors-1A>
1974 <successors-1A>
1975 <rev2>
1975 <rev2>
1976 <successors-2A>
1976 <successors-2A>
1977 <successors-2B1> <successors-2B2> <successors-2B3>
1977 <successors-2B1> <successors-2B2> <successors-2B3>
1978
1978
1979 Here rev2 has two possible (i.e. divergent) successors sets. The first
1979 Here rev2 has two possible (i.e. divergent) successors sets. The first
1980 holds one element, whereas the second holds three (i.e. the changeset has
1980 holds one element, whereas the second holds three (i.e. the changeset has
1981 been split).
1981 been split).
1982 """
1982 """
1983 # passed to successorssets caching computation from one call to another
1983 # passed to successorssets caching computation from one call to another
1984 cache = {}
1984 cache = {}
1985 ctx2str = str
1985 ctx2str = str
1986 node2str = short
1986 node2str = short
1987 if ui.debug():
1987 if ui.debug():
1988 def ctx2str(ctx):
1988 def ctx2str(ctx):
1989 return ctx.hex()
1989 return ctx.hex()
1990 node2str = hex
1990 node2str = hex
1991 for rev in scmutil.revrange(repo, revs):
1991 for rev in scmutil.revrange(repo, revs):
1992 ctx = repo[rev]
1992 ctx = repo[rev]
1993 ui.write('%s\n'% ctx2str(ctx))
1993 ui.write('%s\n'% ctx2str(ctx))
1994 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1994 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
1995 if succsset:
1995 if succsset:
1996 ui.write(' ')
1996 ui.write(' ')
1997 ui.write(node2str(succsset[0]))
1997 ui.write(node2str(succsset[0]))
1998 for node in succsset[1:]:
1998 for node in succsset[1:]:
1999 ui.write(' ')
1999 ui.write(' ')
2000 ui.write(node2str(node))
2000 ui.write(node2str(node))
2001 ui.write('\n')
2001 ui.write('\n')
2002
2002
2003 @command('debugtemplate',
2003 @command('debugtemplate',
2004 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2004 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2005 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2005 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2006 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2006 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2007 optionalrepo=True)
2007 optionalrepo=True)
2008 def debugtemplate(ui, repo, tmpl, **opts):
2008 def debugtemplate(ui, repo, tmpl, **opts):
2009 """parse and apply a template
2009 """parse and apply a template
2010
2010
2011 If -r/--rev is given, the template is processed as a log template and
2011 If -r/--rev is given, the template is processed as a log template and
2012 applied to the given changesets. Otherwise, it is processed as a generic
2012 applied to the given changesets. Otherwise, it is processed as a generic
2013 template.
2013 template.
2014
2014
2015 Use --verbose to print the parsed tree.
2015 Use --verbose to print the parsed tree.
2016 """
2016 """
2017 revs = None
2017 revs = None
2018 if opts['rev']:
2018 if opts['rev']:
2019 if repo is None:
2019 if repo is None:
2020 raise error.RepoError(_('there is no Mercurial repository here '
2020 raise error.RepoError(_('there is no Mercurial repository here '
2021 '(.hg not found)'))
2021 '(.hg not found)'))
2022 revs = scmutil.revrange(repo, opts['rev'])
2022 revs = scmutil.revrange(repo, opts['rev'])
2023
2023
2024 props = {}
2024 props = {}
2025 for d in opts['define']:
2025 for d in opts['define']:
2026 try:
2026 try:
2027 k, v = (e.strip() for e in d.split('=', 1))
2027 k, v = (e.strip() for e in d.split('=', 1))
2028 if not k or k == 'ui':
2028 if not k or k == 'ui':
2029 raise ValueError
2029 raise ValueError
2030 props[k] = v
2030 props[k] = v
2031 except ValueError:
2031 except ValueError:
2032 raise error.Abort(_('malformed keyword definition: %s') % d)
2032 raise error.Abort(_('malformed keyword definition: %s') % d)
2033
2033
2034 if ui.verbose:
2034 if ui.verbose:
2035 aliases = ui.configitems('templatealias')
2035 aliases = ui.configitems('templatealias')
2036 tree = templater.parse(tmpl)
2036 tree = templater.parse(tmpl)
2037 ui.note(templater.prettyformat(tree), '\n')
2037 ui.note(templater.prettyformat(tree), '\n')
2038 newtree = templater.expandaliases(tree, aliases)
2038 newtree = templater.expandaliases(tree, aliases)
2039 if newtree != tree:
2039 if newtree != tree:
2040 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2040 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2041
2041
2042 mapfile = None
2042 mapfile = None
2043 if revs is None:
2043 if revs is None:
2044 k = 'debugtemplate'
2044 k = 'debugtemplate'
2045 t = formatter.maketemplater(ui, k, tmpl)
2045 t = formatter.maketemplater(ui, k, tmpl)
2046 ui.write(templater.stringify(t(k, ui=ui, **props)))
2046 ui.write(templater.stringify(t(k, ui=ui, **props)))
2047 else:
2047 else:
2048 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2048 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2049 mapfile, buffered=False)
2049 mapfile, buffered=False)
2050 for r in revs:
2050 for r in revs:
2051 displayer.show(repo[r], **props)
2051 displayer.show(repo[r], **props)
2052 displayer.close()
2052 displayer.close()
2053
2053
2054 @command('debugupgraderepo', [
2054 @command('debugupgraderepo', [
2055 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2055 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2056 ('', 'run', False, _('performs an upgrade')),
2056 ('', 'run', False, _('performs an upgrade')),
2057 ])
2057 ])
2058 def debugupgraderepo(ui, repo, run=False, optimize=None):
2058 def debugupgraderepo(ui, repo, run=False, optimize=None):
2059 """upgrade a repository to use different features
2059 """upgrade a repository to use different features
2060
2060
2061 If no arguments are specified, the repository is evaluated for upgrade
2061 If no arguments are specified, the repository is evaluated for upgrade
2062 and a list of problems and potential optimizations is printed.
2062 and a list of problems and potential optimizations is printed.
2063
2063
2064 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2064 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2065 can be influenced via additional arguments. More details will be provided
2065 can be influenced via additional arguments. More details will be provided
2066 by the command output when run without ``--run``.
2066 by the command output when run without ``--run``.
2067
2067
2068 During the upgrade, the repository will be locked and no writes will be
2068 During the upgrade, the repository will be locked and no writes will be
2069 allowed.
2069 allowed.
2070
2070
2071 At the end of the upgrade, the repository may not be readable while new
2071 At the end of the upgrade, the repository may not be readable while new
2072 repository data is swapped in. This window will be as long as it takes to
2072 repository data is swapped in. This window will be as long as it takes to
2073 rename some directories inside the ``.hg`` directory. On most machines, this
2073 rename some directories inside the ``.hg`` directory. On most machines, this
2074 should complete almost instantaneously and the chances of a consumer being
2074 should complete almost instantaneously and the chances of a consumer being
2075 unable to access the repository should be low.
2075 unable to access the repository should be low.
2076 """
2076 """
2077 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2077 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2078
2078
2079 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2079 @command('debugwalk', commands.walkopts, _('[OPTION]... [FILE]...'),
2080 inferrepo=True)
2080 inferrepo=True)
2081 def debugwalk(ui, repo, *pats, **opts):
2081 def debugwalk(ui, repo, *pats, **opts):
2082 """show how files match on given patterns"""
2082 """show how files match on given patterns"""
2083 m = scmutil.match(repo[None], pats, opts)
2083 m = scmutil.match(repo[None], pats, opts)
2084 items = list(repo.walk(m))
2084 items = list(repo.walk(m))
2085 if not items:
2085 if not items:
2086 return
2086 return
2087 f = lambda fn: fn
2087 f = lambda fn: fn
2088 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2088 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2089 f = lambda fn: util.normpath(fn)
2089 f = lambda fn: util.normpath(fn)
2090 fmt = 'f %%-%ds %%-%ds %%s' % (
2090 fmt = 'f %%-%ds %%-%ds %%s' % (
2091 max([len(abs) for abs in items]),
2091 max([len(abs) for abs in items]),
2092 max([len(m.rel(abs)) for abs in items]))
2092 max([len(m.rel(abs)) for abs in items]))
2093 for abs in items:
2093 for abs in items:
2094 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2094 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2095 ui.write("%s\n" % line.rstrip())
2095 ui.write("%s\n" % line.rstrip())
2096
2096
2097 @command('debugwireargs',
2097 @command('debugwireargs',
2098 [('', 'three', '', 'three'),
2098 [('', 'three', '', 'three'),
2099 ('', 'four', '', 'four'),
2099 ('', 'four', '', 'four'),
2100 ('', 'five', '', 'five'),
2100 ('', 'five', '', 'five'),
2101 ] + commands.remoteopts,
2101 ] + commands.remoteopts,
2102 _('REPO [OPTIONS]... [ONE [TWO]]'),
2102 _('REPO [OPTIONS]... [ONE [TWO]]'),
2103 norepo=True)
2103 norepo=True)
2104 def debugwireargs(ui, repopath, *vals, **opts):
2104 def debugwireargs(ui, repopath, *vals, **opts):
2105 repo = hg.peer(ui, opts, repopath)
2105 repo = hg.peer(ui, opts, repopath)
2106 for opt in commands.remoteopts:
2106 for opt in commands.remoteopts:
2107 del opts[opt[1]]
2107 del opts[opt[1]]
2108 args = {}
2108 args = {}
2109 for k, v in opts.iteritems():
2109 for k, v in opts.iteritems():
2110 if v:
2110 if v:
2111 args[k] = v
2111 args[k] = v
2112 # run twice to check that we don't mess up the stream for the next command
2112 # run twice to check that we don't mess up the stream for the next command
2113 res1 = repo.debugwireargs(*vals, **args)
2113 res1 = repo.debugwireargs(*vals, **args)
2114 res2 = repo.debugwireargs(*vals, **args)
2114 res2 = repo.debugwireargs(*vals, **args)
2115 ui.write("%s\n" % res1)
2115 ui.write("%s\n" % res1)
2116 if res1 != res2:
2116 if res1 != res2:
2117 ui.warn("%s\n" % res2)
2117 ui.warn("%s\n" % res2)
@@ -1,2138 +1,2138 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import hashlib
18 import hashlib
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullid,
27 nullid,
28 nullrev,
28 nullrev,
29 )
29 )
30 from .i18n import _
30 from .i18n import _
31 from . import (
31 from . import (
32 ancestor,
32 ancestor,
33 error,
33 error,
34 mdiff,
34 mdiff,
35 parsers,
35 parsers,
36 pycompat,
36 pycompat,
37 templatefilters,
37 templatefilters,
38 util,
38 util,
39 )
39 )
40
40
41 _pack = struct.pack
41 _pack = struct.pack
42 _unpack = struct.unpack
42 _unpack = struct.unpack
43 # Aliased for performance.
43 # Aliased for performance.
44 _zlibdecompress = zlib.decompress
44 _zlibdecompress = zlib.decompress
45
45
46 # revlog header flags
46 # revlog header flags
47 REVLOGV0 = 0
47 REVLOGV0 = 0
48 REVLOGNG = 1
48 REVLOGNG = 1
49 REVLOGNGINLINEDATA = (1 << 16)
49 REVLOGNGINLINEDATA = (1 << 16)
50 REVLOGGENERALDELTA = (1 << 17)
50 REVLOGGENERALDELTA = (1 << 17)
51 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
51 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
52 REVLOG_DEFAULT_FORMAT = REVLOGNG
52 REVLOG_DEFAULT_FORMAT = REVLOGNG
53 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
53 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
54 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
54 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
55
55
56 # revlog index flags
56 # revlog index flags
57 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
57 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
58 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
58 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
59 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
59 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
60 REVIDX_DEFAULT_FLAGS = 0
60 REVIDX_DEFAULT_FLAGS = 0
61 # stable order in which flags need to be processed and their processors applied
61 # stable order in which flags need to be processed and their processors applied
62 REVIDX_FLAGS_ORDER = [
62 REVIDX_FLAGS_ORDER = [
63 REVIDX_ISCENSORED,
63 REVIDX_ISCENSORED,
64 REVIDX_ELLIPSIS,
64 REVIDX_ELLIPSIS,
65 REVIDX_EXTSTORED,
65 REVIDX_EXTSTORED,
66 ]
66 ]
67 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
67 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
68
68
69 # max size of revlog with inline data
69 # max size of revlog with inline data
70 _maxinline = 131072
70 _maxinline = 131072
71 _chunksize = 1048576
71 _chunksize = 1048576
72
72
73 RevlogError = error.RevlogError
73 RevlogError = error.RevlogError
74 LookupError = error.LookupError
74 LookupError = error.LookupError
75 CensoredNodeError = error.CensoredNodeError
75 CensoredNodeError = error.CensoredNodeError
76 ProgrammingError = error.ProgrammingError
76 ProgrammingError = error.ProgrammingError
77
77
78 # Store flag processors (cf. 'addflagprocessor()' to register)
78 # Store flag processors (cf. 'addflagprocessor()' to register)
79 _flagprocessors = {
79 _flagprocessors = {
80 REVIDX_ISCENSORED: None,
80 REVIDX_ISCENSORED: None,
81 }
81 }
82
82
83 def addflagprocessor(flag, processor):
83 def addflagprocessor(flag, processor):
84 """Register a flag processor on a revision data flag.
84 """Register a flag processor on a revision data flag.
85
85
86 Invariant:
86 Invariant:
87 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
87 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
88 - Only one flag processor can be registered on a specific flag.
88 - Only one flag processor can be registered on a specific flag.
89 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
89 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
90 following signatures:
90 following signatures:
91 - (read) f(self, rawtext) -> text, bool
91 - (read) f(self, rawtext) -> text, bool
92 - (write) f(self, text) -> rawtext, bool
92 - (write) f(self, text) -> rawtext, bool
93 - (raw) f(self, rawtext) -> bool
93 - (raw) f(self, rawtext) -> bool
94 "text" is presented to the user. "rawtext" is stored in revlog data, not
94 "text" is presented to the user. "rawtext" is stored in revlog data, not
95 directly visible to the user.
95 directly visible to the user.
96 The boolean returned by these transforms is used to determine whether
96 The boolean returned by these transforms is used to determine whether
97 the returned text can be used for hash integrity checking. For example,
97 the returned text can be used for hash integrity checking. For example,
98 if "write" returns False, then "text" is used to generate hash. If
98 if "write" returns False, then "text" is used to generate hash. If
99 "write" returns True, that basically means "rawtext" returned by "write"
99 "write" returns True, that basically means "rawtext" returned by "write"
100 should be used to generate hash. Usually, "write" and "read" return
100 should be used to generate hash. Usually, "write" and "read" return
101 different booleans. And "raw" returns a same boolean as "write".
101 different booleans. And "raw" returns a same boolean as "write".
102
102
103 Note: The 'raw' transform is used for changegroup generation and in some
103 Note: The 'raw' transform is used for changegroup generation and in some
104 debug commands. In this case the transform only indicates whether the
104 debug commands. In this case the transform only indicates whether the
105 contents can be used for hash integrity checks.
105 contents can be used for hash integrity checks.
106 """
106 """
107 if not flag & REVIDX_KNOWN_FLAGS:
107 if not flag & REVIDX_KNOWN_FLAGS:
108 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
108 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
109 raise ProgrammingError(msg)
109 raise ProgrammingError(msg)
110 if flag not in REVIDX_FLAGS_ORDER:
110 if flag not in REVIDX_FLAGS_ORDER:
111 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
111 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
112 raise ProgrammingError(msg)
112 raise ProgrammingError(msg)
113 if flag in _flagprocessors:
113 if flag in _flagprocessors:
114 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
114 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
115 raise error.Abort(msg)
115 raise error.Abort(msg)
116 _flagprocessors[flag] = processor
116 _flagprocessors[flag] = processor
117
117
118 def getoffset(q):
118 def getoffset(q):
119 return int(q >> 16)
119 return int(q >> 16)
120
120
121 def gettype(q):
121 def gettype(q):
122 return int(q & 0xFFFF)
122 return int(q & 0xFFFF)
123
123
124 def offset_type(offset, type):
124 def offset_type(offset, type):
125 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
125 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
126 raise ValueError('unknown revlog index flags')
126 raise ValueError('unknown revlog index flags')
127 return int(int(offset) << 16 | type)
127 return int(int(offset) << 16 | type)
128
128
129 _nullhash = hashlib.sha1(nullid)
129 _nullhash = hashlib.sha1(nullid)
130
130
131 def hash(text, p1, p2):
131 def hash(text, p1, p2):
132 """generate a hash from the given text and its parent hashes
132 """generate a hash from the given text and its parent hashes
133
133
134 This hash combines both the current file contents and its history
134 This hash combines both the current file contents and its history
135 in a manner that makes it easy to distinguish nodes with the same
135 in a manner that makes it easy to distinguish nodes with the same
136 content in the revision graph.
136 content in the revision graph.
137 """
137 """
138 # As of now, if one of the parent node is null, p2 is null
138 # As of now, if one of the parent node is null, p2 is null
139 if p2 == nullid:
139 if p2 == nullid:
140 # deep copy of a hash is faster than creating one
140 # deep copy of a hash is faster than creating one
141 s = _nullhash.copy()
141 s = _nullhash.copy()
142 s.update(p1)
142 s.update(p1)
143 else:
143 else:
144 # none of the parent nodes are nullid
144 # none of the parent nodes are nullid
145 l = [p1, p2]
145 l = [p1, p2]
146 l.sort()
146 l.sort()
147 s = hashlib.sha1(l[0])
147 s = hashlib.sha1(l[0])
148 s.update(l[1])
148 s.update(l[1])
149 s.update(text)
149 s.update(text)
150 return s.digest()
150 return s.digest()
151
151
152 # index v0:
152 # index v0:
153 # 4 bytes: offset
153 # 4 bytes: offset
154 # 4 bytes: compressed length
154 # 4 bytes: compressed length
155 # 4 bytes: base rev
155 # 4 bytes: base rev
156 # 4 bytes: link rev
156 # 4 bytes: link rev
157 # 20 bytes: parent 1 nodeid
157 # 20 bytes: parent 1 nodeid
158 # 20 bytes: parent 2 nodeid
158 # 20 bytes: parent 2 nodeid
159 # 20 bytes: nodeid
159 # 20 bytes: nodeid
160 indexformatv0 = ">4l20s20s20s"
160 indexformatv0 = ">4l20s20s20s"
161
161
162 class revlogoldio(object):
162 class revlogoldio(object):
163 def __init__(self):
163 def __init__(self):
164 self.size = struct.calcsize(indexformatv0)
164 self.size = struct.calcsize(indexformatv0)
165
165
166 def parseindex(self, data, inline):
166 def parseindex(self, data, inline):
167 s = self.size
167 s = self.size
168 index = []
168 index = []
169 nodemap = {nullid: nullrev}
169 nodemap = {nullid: nullrev}
170 n = off = 0
170 n = off = 0
171 l = len(data)
171 l = len(data)
172 while off + s <= l:
172 while off + s <= l:
173 cur = data[off:off + s]
173 cur = data[off:off + s]
174 off += s
174 off += s
175 e = _unpack(indexformatv0, cur)
175 e = _unpack(indexformatv0, cur)
176 # transform to revlogv1 format
176 # transform to revlogv1 format
177 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
177 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
178 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
178 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
179 index.append(e2)
179 index.append(e2)
180 nodemap[e[6]] = n
180 nodemap[e[6]] = n
181 n += 1
181 n += 1
182
182
183 # add the magic null revision at -1
183 # add the magic null revision at -1
184 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
184 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
185
185
186 return index, nodemap, None
186 return index, nodemap, None
187
187
188 def packentry(self, entry, node, version, rev):
188 def packentry(self, entry, node, version, rev):
189 if gettype(entry[0]):
189 if gettype(entry[0]):
190 raise RevlogError(_("index entry flags need RevlogNG"))
190 raise RevlogError(_("index entry flags need RevlogNG"))
191 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
191 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
192 node(entry[5]), node(entry[6]), entry[7])
192 node(entry[5]), node(entry[6]), entry[7])
193 return _pack(indexformatv0, *e2)
193 return _pack(indexformatv0, *e2)
194
194
195 # index ng:
195 # index ng:
196 # 6 bytes: offset
196 # 6 bytes: offset
197 # 2 bytes: flags
197 # 2 bytes: flags
198 # 4 bytes: compressed length
198 # 4 bytes: compressed length
199 # 4 bytes: uncompressed length
199 # 4 bytes: uncompressed length
200 # 4 bytes: base rev
200 # 4 bytes: base rev
201 # 4 bytes: link rev
201 # 4 bytes: link rev
202 # 4 bytes: parent 1 rev
202 # 4 bytes: parent 1 rev
203 # 4 bytes: parent 2 rev
203 # 4 bytes: parent 2 rev
204 # 32 bytes: nodeid
204 # 32 bytes: nodeid
205 indexformatng = ">Qiiiiii20s12x"
205 indexformatng = ">Qiiiiii20s12x"
206 versionformat = ">I"
206 versionformat = ">I"
207
207
208 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
208 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
209 # signed integer)
209 # signed integer)
210 _maxentrysize = 0x7fffffff
210 _maxentrysize = 0x7fffffff
211
211
212 class revlogio(object):
212 class revlogio(object):
213 def __init__(self):
213 def __init__(self):
214 self.size = struct.calcsize(indexformatng)
214 self.size = struct.calcsize(indexformatng)
215
215
216 def parseindex(self, data, inline):
216 def parseindex(self, data, inline):
217 # call the C implementation to parse the index data
217 # call the C implementation to parse the index data
218 index, cache = parsers.parse_index2(data, inline)
218 index, cache = parsers.parse_index2(data, inline)
219 return index, getattr(index, 'nodemap', None), cache
219 return index, getattr(index, 'nodemap', None), cache
220
220
221 def packentry(self, entry, node, version, rev):
221 def packentry(self, entry, node, version, rev):
222 p = _pack(indexformatng, *entry)
222 p = _pack(indexformatng, *entry)
223 if rev == 0:
223 if rev == 0:
224 p = _pack(versionformat, version) + p[4:]
224 p = _pack(versionformat, version) + p[4:]
225 return p
225 return p
226
226
227 class revlog(object):
227 class revlog(object):
228 """
228 """
229 the underlying revision storage object
229 the underlying revision storage object
230
230
231 A revlog consists of two parts, an index and the revision data.
231 A revlog consists of two parts, an index and the revision data.
232
232
233 The index is a file with a fixed record size containing
233 The index is a file with a fixed record size containing
234 information on each revision, including its nodeid (hash), the
234 information on each revision, including its nodeid (hash), the
235 nodeids of its parents, the position and offset of its data within
235 nodeids of its parents, the position and offset of its data within
236 the data file, and the revision it's based on. Finally, each entry
236 the data file, and the revision it's based on. Finally, each entry
237 contains a linkrev entry that can serve as a pointer to external
237 contains a linkrev entry that can serve as a pointer to external
238 data.
238 data.
239
239
240 The revision data itself is a linear collection of data chunks.
240 The revision data itself is a linear collection of data chunks.
241 Each chunk represents a revision and is usually represented as a
241 Each chunk represents a revision and is usually represented as a
242 delta against the previous chunk. To bound lookup time, runs of
242 delta against the previous chunk. To bound lookup time, runs of
243 deltas are limited to about 2 times the length of the original
243 deltas are limited to about 2 times the length of the original
244 version data. This makes retrieval of a version proportional to
244 version data. This makes retrieval of a version proportional to
245 its size, or O(1) relative to the number of revisions.
245 its size, or O(1) relative to the number of revisions.
246
246
247 Both pieces of the revlog are written to in an append-only
247 Both pieces of the revlog are written to in an append-only
248 fashion, which means we never need to rewrite a file to insert or
248 fashion, which means we never need to rewrite a file to insert or
249 remove data, and can use some simple techniques to avoid the need
249 remove data, and can use some simple techniques to avoid the need
250 for locking while reading.
250 for locking while reading.
251
251
252 If checkambig, indexfile is opened with checkambig=True at
252 If checkambig, indexfile is opened with checkambig=True at
253 writing, to avoid file stat ambiguity.
253 writing, to avoid file stat ambiguity.
254 """
254 """
255 def __init__(self, opener, indexfile, checkambig=False):
255 def __init__(self, opener, indexfile, checkambig=False):
256 """
256 """
257 create a revlog object
257 create a revlog object
258
258
259 opener is a function that abstracts the file opening operation
259 opener is a function that abstracts the file opening operation
260 and can be used to implement COW semantics or the like.
260 and can be used to implement COW semantics or the like.
261 """
261 """
262 self.indexfile = indexfile
262 self.indexfile = indexfile
263 self.datafile = indexfile[:-2] + ".d"
263 self.datafile = indexfile[:-2] + ".d"
264 self.opener = opener
264 self.opener = opener
265 # When True, indexfile is opened with checkambig=True at writing, to
265 # When True, indexfile is opened with checkambig=True at writing, to
266 # avoid file stat ambiguity.
266 # avoid file stat ambiguity.
267 self._checkambig = checkambig
267 self._checkambig = checkambig
268 # 3-tuple of (node, rev, text) for a raw revision.
268 # 3-tuple of (node, rev, text) for a raw revision.
269 self._cache = None
269 self._cache = None
270 # Maps rev to chain base rev.
270 # Maps rev to chain base rev.
271 self._chainbasecache = util.lrucachedict(100)
271 self._chainbasecache = util.lrucachedict(100)
272 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
272 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
273 self._chunkcache = (0, '')
273 self._chunkcache = (0, '')
274 # How much data to read and cache into the raw revlog data cache.
274 # How much data to read and cache into the raw revlog data cache.
275 self._chunkcachesize = 65536
275 self._chunkcachesize = 65536
276 self._maxchainlen = None
276 self._maxchainlen = None
277 self._aggressivemergedeltas = False
277 self._aggressivemergedeltas = False
278 self.index = []
278 self.index = []
279 # Mapping of partial identifiers to full nodes.
279 # Mapping of partial identifiers to full nodes.
280 self._pcache = {}
280 self._pcache = {}
281 # Mapping of revision integer to full node.
281 # Mapping of revision integer to full node.
282 self._nodecache = {nullid: nullrev}
282 self._nodecache = {nullid: nullrev}
283 self._nodepos = None
283 self._nodepos = None
284 self._compengine = 'zlib'
284 self._compengine = 'zlib'
285
285
286 v = REVLOG_DEFAULT_VERSION
286 v = REVLOG_DEFAULT_VERSION
287 opts = getattr(opener, 'options', None)
287 opts = getattr(opener, 'options', None)
288 if opts is not None:
288 if opts is not None:
289 if 'revlogv1' in opts:
289 if 'revlogv1' in opts:
290 if 'generaldelta' in opts:
290 if 'generaldelta' in opts:
291 v |= REVLOGGENERALDELTA
291 v |= REVLOGGENERALDELTA
292 else:
292 else:
293 v = 0
293 v = 0
294 if 'chunkcachesize' in opts:
294 if 'chunkcachesize' in opts:
295 self._chunkcachesize = opts['chunkcachesize']
295 self._chunkcachesize = opts['chunkcachesize']
296 if 'maxchainlen' in opts:
296 if 'maxchainlen' in opts:
297 self._maxchainlen = opts['maxchainlen']
297 self._maxchainlen = opts['maxchainlen']
298 if 'aggressivemergedeltas' in opts:
298 if 'aggressivemergedeltas' in opts:
299 self._aggressivemergedeltas = opts['aggressivemergedeltas']
299 self._aggressivemergedeltas = opts['aggressivemergedeltas']
300 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
300 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
301 if 'compengine' in opts:
301 if 'compengine' in opts:
302 self._compengine = opts['compengine']
302 self._compengine = opts['compengine']
303
303
304 if self._chunkcachesize <= 0:
304 if self._chunkcachesize <= 0:
305 raise RevlogError(_('revlog chunk cache size %r is not greater '
305 raise RevlogError(_('revlog chunk cache size %r is not greater '
306 'than 0') % self._chunkcachesize)
306 'than 0') % self._chunkcachesize)
307 elif self._chunkcachesize & (self._chunkcachesize - 1):
307 elif self._chunkcachesize & (self._chunkcachesize - 1):
308 raise RevlogError(_('revlog chunk cache size %r is not a power '
308 raise RevlogError(_('revlog chunk cache size %r is not a power '
309 'of 2') % self._chunkcachesize)
309 'of 2') % self._chunkcachesize)
310
310
311 indexdata = ''
311 indexdata = ''
312 self._initempty = True
312 self._initempty = True
313 try:
313 try:
314 f = self.opener(self.indexfile)
314 f = self.opener(self.indexfile)
315 indexdata = f.read()
315 indexdata = f.read()
316 f.close()
316 f.close()
317 if len(indexdata) > 0:
317 if len(indexdata) > 0:
318 v = struct.unpack(versionformat, indexdata[:4])[0]
318 v = struct.unpack(versionformat, indexdata[:4])[0]
319 self._initempty = False
319 self._initempty = False
320 except IOError as inst:
320 except IOError as inst:
321 if inst.errno != errno.ENOENT:
321 if inst.errno != errno.ENOENT:
322 raise
322 raise
323
323
324 self.version = v
324 self.version = v
325 self._inline = v & REVLOGNGINLINEDATA
325 self._inline = v & REVLOGNGINLINEDATA
326 self._generaldelta = v & REVLOGGENERALDELTA
326 self._generaldelta = v & REVLOGGENERALDELTA
327 flags = v & ~0xFFFF
327 flags = v & ~0xFFFF
328 fmt = v & 0xFFFF
328 fmt = v & 0xFFFF
329 if fmt == REVLOGV0 and flags:
329 if fmt == REVLOGV0 and flags:
330 raise RevlogError(_("index %s unknown flags %#04x for format v0")
330 raise RevlogError(_("index %s unknown flags %#04x for format v0")
331 % (self.indexfile, flags >> 16))
331 % (self.indexfile, flags >> 16))
332 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
332 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
333 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
333 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
334 % (self.indexfile, flags >> 16))
334 % (self.indexfile, flags >> 16))
335 elif fmt > REVLOGNG:
335 elif fmt > REVLOGNG:
336 raise RevlogError(_("index %s unknown format %d")
336 raise RevlogError(_("index %s unknown format %d")
337 % (self.indexfile, fmt))
337 % (self.indexfile, fmt))
338
338
339 self.storedeltachains = True
339 self.storedeltachains = True
340
340
341 self._io = revlogio()
341 self._io = revlogio()
342 if self.version == REVLOGV0:
342 if self.version == REVLOGV0:
343 self._io = revlogoldio()
343 self._io = revlogoldio()
344 try:
344 try:
345 d = self._io.parseindex(indexdata, self._inline)
345 d = self._io.parseindex(indexdata, self._inline)
346 except (ValueError, IndexError):
346 except (ValueError, IndexError):
347 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
347 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
348 self.index, nodemap, self._chunkcache = d
348 self.index, nodemap, self._chunkcache = d
349 if nodemap is not None:
349 if nodemap is not None:
350 self.nodemap = self._nodecache = nodemap
350 self.nodemap = self._nodecache = nodemap
351 if not self._chunkcache:
351 if not self._chunkcache:
352 self._chunkclear()
352 self._chunkclear()
353 # revnum -> (chain-length, sum-delta-length)
353 # revnum -> (chain-length, sum-delta-length)
354 self._chaininfocache = {}
354 self._chaininfocache = {}
355 # revlog header -> revlog compressor
355 # revlog header -> revlog compressor
356 self._decompressors = {}
356 self._decompressors = {}
357
357
358 @util.propertycache
358 @util.propertycache
359 def _compressor(self):
359 def _compressor(self):
360 return util.compengines[self._compengine].revlogcompressor()
360 return util.compengines[self._compengine].revlogcompressor()
361
361
362 def tip(self):
362 def tip(self):
363 return self.node(len(self.index) - 2)
363 return self.node(len(self.index) - 2)
364 def __contains__(self, rev):
364 def __contains__(self, rev):
365 return 0 <= rev < len(self)
365 return 0 <= rev < len(self)
366 def __len__(self):
366 def __len__(self):
367 return len(self.index) - 1
367 return len(self.index) - 1
368 def __iter__(self):
368 def __iter__(self):
369 return iter(xrange(len(self)))
369 return iter(xrange(len(self)))
370 def revs(self, start=0, stop=None):
370 def revs(self, start=0, stop=None):
371 """iterate over all rev in this revlog (from start to stop)"""
371 """iterate over all rev in this revlog (from start to stop)"""
372 step = 1
372 step = 1
373 if stop is not None:
373 if stop is not None:
374 if start > stop:
374 if start > stop:
375 step = -1
375 step = -1
376 stop += step
376 stop += step
377 else:
377 else:
378 stop = len(self)
378 stop = len(self)
379 return xrange(start, stop, step)
379 return xrange(start, stop, step)
380
380
381 @util.propertycache
381 @util.propertycache
382 def nodemap(self):
382 def nodemap(self):
383 self.rev(self.node(0))
383 self.rev(self.node(0))
384 return self._nodecache
384 return self._nodecache
385
385
386 def hasnode(self, node):
386 def hasnode(self, node):
387 try:
387 try:
388 self.rev(node)
388 self.rev(node)
389 return True
389 return True
390 except KeyError:
390 except KeyError:
391 return False
391 return False
392
392
393 def clearcaches(self):
393 def clearcaches(self):
394 self._cache = None
394 self._cache = None
395 self._chainbasecache.clear()
395 self._chainbasecache.clear()
396 self._chunkcache = (0, '')
396 self._chunkcache = (0, '')
397 self._pcache = {}
397 self._pcache = {}
398
398
399 try:
399 try:
400 self._nodecache.clearcaches()
400 self._nodecache.clearcaches()
401 except AttributeError:
401 except AttributeError:
402 self._nodecache = {nullid: nullrev}
402 self._nodecache = {nullid: nullrev}
403 self._nodepos = None
403 self._nodepos = None
404
404
405 def rev(self, node):
405 def rev(self, node):
406 try:
406 try:
407 return self._nodecache[node]
407 return self._nodecache[node]
408 except TypeError:
408 except TypeError:
409 raise
409 raise
410 except RevlogError:
410 except RevlogError:
411 # parsers.c radix tree lookup failed
411 # parsers.c radix tree lookup failed
412 raise LookupError(node, self.indexfile, _('no node'))
412 raise LookupError(node, self.indexfile, _('no node'))
413 except KeyError:
413 except KeyError:
414 # pure python cache lookup failed
414 # pure python cache lookup failed
415 n = self._nodecache
415 n = self._nodecache
416 i = self.index
416 i = self.index
417 p = self._nodepos
417 p = self._nodepos
418 if p is None:
418 if p is None:
419 p = len(i) - 2
419 p = len(i) - 2
420 for r in xrange(p, -1, -1):
420 for r in xrange(p, -1, -1):
421 v = i[r][7]
421 v = i[r][7]
422 n[v] = r
422 n[v] = r
423 if v == node:
423 if v == node:
424 self._nodepos = r - 1
424 self._nodepos = r - 1
425 return r
425 return r
426 raise LookupError(node, self.indexfile, _('no node'))
426 raise LookupError(node, self.indexfile, _('no node'))
427
427
428 # Accessors for index entries.
428 # Accessors for index entries.
429
429
430 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
430 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
431 # are flags.
431 # are flags.
432 def start(self, rev):
432 def start(self, rev):
433 return int(self.index[rev][0] >> 16)
433 return int(self.index[rev][0] >> 16)
434
434
435 def flags(self, rev):
435 def flags(self, rev):
436 return self.index[rev][0] & 0xFFFF
436 return self.index[rev][0] & 0xFFFF
437
437
438 def length(self, rev):
438 def length(self, rev):
439 return self.index[rev][1]
439 return self.index[rev][1]
440
440
441 def rawsize(self, rev):
441 def rawsize(self, rev):
442 """return the length of the uncompressed text for a given revision"""
442 """return the length of the uncompressed text for a given revision"""
443 l = self.index[rev][2]
443 l = self.index[rev][2]
444 if l >= 0:
444 if l >= 0:
445 return l
445 return l
446
446
447 t = self.revision(rev, raw=True)
447 t = self.revision(rev, raw=True)
448 return len(t)
448 return len(t)
449
449
450 def size(self, rev):
450 def size(self, rev):
451 """length of non-raw text (processed by a "read" flag processor)"""
451 """length of non-raw text (processed by a "read" flag processor)"""
452 # fast path: if no "read" flag processor could change the content,
452 # fast path: if no "read" flag processor could change the content,
453 # size is rawsize. note: ELLIPSIS is known to not change the content.
453 # size is rawsize. note: ELLIPSIS is known to not change the content.
454 flags = self.flags(rev)
454 flags = self.flags(rev)
455 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
455 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
456 return self.rawsize(rev)
456 return self.rawsize(rev)
457
457
458 return len(self.revision(rev, raw=False))
458 return len(self.revision(rev, raw=False))
459
459
460 def chainbase(self, rev):
460 def chainbase(self, rev):
461 base = self._chainbasecache.get(rev)
461 base = self._chainbasecache.get(rev)
462 if base is not None:
462 if base is not None:
463 return base
463 return base
464
464
465 index = self.index
465 index = self.index
466 base = index[rev][3]
466 base = index[rev][3]
467 while base != rev:
467 while base != rev:
468 rev = base
468 rev = base
469 base = index[rev][3]
469 base = index[rev][3]
470
470
471 self._chainbasecache[rev] = base
471 self._chainbasecache[rev] = base
472 return base
472 return base
473
473
474 def linkrev(self, rev):
474 def linkrev(self, rev):
475 return self.index[rev][4]
475 return self.index[rev][4]
476
476
477 def parentrevs(self, rev):
477 def parentrevs(self, rev):
478 return self.index[rev][5:7]
478 return self.index[rev][5:7]
479
479
480 def node(self, rev):
480 def node(self, rev):
481 return self.index[rev][7]
481 return self.index[rev][7]
482
482
483 # Derived from index values.
483 # Derived from index values.
484
484
485 def end(self, rev):
485 def end(self, rev):
486 return self.start(rev) + self.length(rev)
486 return self.start(rev) + self.length(rev)
487
487
488 def parents(self, node):
488 def parents(self, node):
489 i = self.index
489 i = self.index
490 d = i[self.rev(node)]
490 d = i[self.rev(node)]
491 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
491 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
492
492
493 def chainlen(self, rev):
493 def chainlen(self, rev):
494 return self._chaininfo(rev)[0]
494 return self._chaininfo(rev)[0]
495
495
496 def _chaininfo(self, rev):
496 def _chaininfo(self, rev):
497 chaininfocache = self._chaininfocache
497 chaininfocache = self._chaininfocache
498 if rev in chaininfocache:
498 if rev in chaininfocache:
499 return chaininfocache[rev]
499 return chaininfocache[rev]
500 index = self.index
500 index = self.index
501 generaldelta = self._generaldelta
501 generaldelta = self._generaldelta
502 iterrev = rev
502 iterrev = rev
503 e = index[iterrev]
503 e = index[iterrev]
504 clen = 0
504 clen = 0
505 compresseddeltalen = 0
505 compresseddeltalen = 0
506 while iterrev != e[3]:
506 while iterrev != e[3]:
507 clen += 1
507 clen += 1
508 compresseddeltalen += e[1]
508 compresseddeltalen += e[1]
509 if generaldelta:
509 if generaldelta:
510 iterrev = e[3]
510 iterrev = e[3]
511 else:
511 else:
512 iterrev -= 1
512 iterrev -= 1
513 if iterrev in chaininfocache:
513 if iterrev in chaininfocache:
514 t = chaininfocache[iterrev]
514 t = chaininfocache[iterrev]
515 clen += t[0]
515 clen += t[0]
516 compresseddeltalen += t[1]
516 compresseddeltalen += t[1]
517 break
517 break
518 e = index[iterrev]
518 e = index[iterrev]
519 else:
519 else:
520 # Add text length of base since decompressing that also takes
520 # Add text length of base since decompressing that also takes
521 # work. For cache hits the length is already included.
521 # work. For cache hits the length is already included.
522 compresseddeltalen += e[1]
522 compresseddeltalen += e[1]
523 r = (clen, compresseddeltalen)
523 r = (clen, compresseddeltalen)
524 chaininfocache[rev] = r
524 chaininfocache[rev] = r
525 return r
525 return r
526
526
527 def _deltachain(self, rev, stoprev=None):
527 def _deltachain(self, rev, stoprev=None):
528 """Obtain the delta chain for a revision.
528 """Obtain the delta chain for a revision.
529
529
530 ``stoprev`` specifies a revision to stop at. If not specified, we
530 ``stoprev`` specifies a revision to stop at. If not specified, we
531 stop at the base of the chain.
531 stop at the base of the chain.
532
532
533 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
533 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
534 revs in ascending order and ``stopped`` is a bool indicating whether
534 revs in ascending order and ``stopped`` is a bool indicating whether
535 ``stoprev`` was hit.
535 ``stoprev`` was hit.
536 """
536 """
537 chain = []
537 chain = []
538
538
539 # Alias to prevent attribute lookup in tight loop.
539 # Alias to prevent attribute lookup in tight loop.
540 index = self.index
540 index = self.index
541 generaldelta = self._generaldelta
541 generaldelta = self._generaldelta
542
542
543 iterrev = rev
543 iterrev = rev
544 e = index[iterrev]
544 e = index[iterrev]
545 while iterrev != e[3] and iterrev != stoprev:
545 while iterrev != e[3] and iterrev != stoprev:
546 chain.append(iterrev)
546 chain.append(iterrev)
547 if generaldelta:
547 if generaldelta:
548 iterrev = e[3]
548 iterrev = e[3]
549 else:
549 else:
550 iterrev -= 1
550 iterrev -= 1
551 e = index[iterrev]
551 e = index[iterrev]
552
552
553 if iterrev == stoprev:
553 if iterrev == stoprev:
554 stopped = True
554 stopped = True
555 else:
555 else:
556 chain.append(iterrev)
556 chain.append(iterrev)
557 stopped = False
557 stopped = False
558
558
559 chain.reverse()
559 chain.reverse()
560 return chain, stopped
560 return chain, stopped
561
561
562 def ancestors(self, revs, stoprev=0, inclusive=False):
562 def ancestors(self, revs, stoprev=0, inclusive=False):
563 """Generate the ancestors of 'revs' in reverse topological order.
563 """Generate the ancestors of 'revs' in reverse topological order.
564 Does not generate revs lower than stoprev.
564 Does not generate revs lower than stoprev.
565
565
566 See the documentation for ancestor.lazyancestors for more details."""
566 See the documentation for ancestor.lazyancestors for more details."""
567
567
568 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
568 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
569 inclusive=inclusive)
569 inclusive=inclusive)
570
570
571 def descendants(self, revs):
571 def descendants(self, revs):
572 """Generate the descendants of 'revs' in revision order.
572 """Generate the descendants of 'revs' in revision order.
573
573
574 Yield a sequence of revision numbers starting with a child of
574 Yield a sequence of revision numbers starting with a child of
575 some rev in revs, i.e., each revision is *not* considered a
575 some rev in revs, i.e., each revision is *not* considered a
576 descendant of itself. Results are ordered by revision number (a
576 descendant of itself. Results are ordered by revision number (a
577 topological sort)."""
577 topological sort)."""
578 first = min(revs)
578 first = min(revs)
579 if first == nullrev:
579 if first == nullrev:
580 for i in self:
580 for i in self:
581 yield i
581 yield i
582 return
582 return
583
583
584 seen = set(revs)
584 seen = set(revs)
585 for i in self.revs(start=first + 1):
585 for i in self.revs(start=first + 1):
586 for x in self.parentrevs(i):
586 for x in self.parentrevs(i):
587 if x != nullrev and x in seen:
587 if x != nullrev and x in seen:
588 seen.add(i)
588 seen.add(i)
589 yield i
589 yield i
590 break
590 break
591
591
592 def findcommonmissing(self, common=None, heads=None):
592 def findcommonmissing(self, common=None, heads=None):
593 """Return a tuple of the ancestors of common and the ancestors of heads
593 """Return a tuple of the ancestors of common and the ancestors of heads
594 that are not ancestors of common. In revset terminology, we return the
594 that are not ancestors of common. In revset terminology, we return the
595 tuple:
595 tuple:
596
596
597 ::common, (::heads) - (::common)
597 ::common, (::heads) - (::common)
598
598
599 The list is sorted by revision number, meaning it is
599 The list is sorted by revision number, meaning it is
600 topologically sorted.
600 topologically sorted.
601
601
602 'heads' and 'common' are both lists of node IDs. If heads is
602 'heads' and 'common' are both lists of node IDs. If heads is
603 not supplied, uses all of the revlog's heads. If common is not
603 not supplied, uses all of the revlog's heads. If common is not
604 supplied, uses nullid."""
604 supplied, uses nullid."""
605 if common is None:
605 if common is None:
606 common = [nullid]
606 common = [nullid]
607 if heads is None:
607 if heads is None:
608 heads = self.heads()
608 heads = self.heads()
609
609
610 common = [self.rev(n) for n in common]
610 common = [self.rev(n) for n in common]
611 heads = [self.rev(n) for n in heads]
611 heads = [self.rev(n) for n in heads]
612
612
613 # we want the ancestors, but inclusive
613 # we want the ancestors, but inclusive
614 class lazyset(object):
614 class lazyset(object):
615 def __init__(self, lazyvalues):
615 def __init__(self, lazyvalues):
616 self.addedvalues = set()
616 self.addedvalues = set()
617 self.lazyvalues = lazyvalues
617 self.lazyvalues = lazyvalues
618
618
619 def __contains__(self, value):
619 def __contains__(self, value):
620 return value in self.addedvalues or value in self.lazyvalues
620 return value in self.addedvalues or value in self.lazyvalues
621
621
622 def __iter__(self):
622 def __iter__(self):
623 added = self.addedvalues
623 added = self.addedvalues
624 for r in added:
624 for r in added:
625 yield r
625 yield r
626 for r in self.lazyvalues:
626 for r in self.lazyvalues:
627 if not r in added:
627 if not r in added:
628 yield r
628 yield r
629
629
630 def add(self, value):
630 def add(self, value):
631 self.addedvalues.add(value)
631 self.addedvalues.add(value)
632
632
633 def update(self, values):
633 def update(self, values):
634 self.addedvalues.update(values)
634 self.addedvalues.update(values)
635
635
636 has = lazyset(self.ancestors(common))
636 has = lazyset(self.ancestors(common))
637 has.add(nullrev)
637 has.add(nullrev)
638 has.update(common)
638 has.update(common)
639
639
640 # take all ancestors from heads that aren't in has
640 # take all ancestors from heads that aren't in has
641 missing = set()
641 missing = set()
642 visit = collections.deque(r for r in heads if r not in has)
642 visit = collections.deque(r for r in heads if r not in has)
643 while visit:
643 while visit:
644 r = visit.popleft()
644 r = visit.popleft()
645 if r in missing:
645 if r in missing:
646 continue
646 continue
647 else:
647 else:
648 missing.add(r)
648 missing.add(r)
649 for p in self.parentrevs(r):
649 for p in self.parentrevs(r):
650 if p not in has:
650 if p not in has:
651 visit.append(p)
651 visit.append(p)
652 missing = list(missing)
652 missing = list(missing)
653 missing.sort()
653 missing.sort()
654 return has, [self.node(miss) for miss in missing]
654 return has, [self.node(miss) for miss in missing]
655
655
656 def incrementalmissingrevs(self, common=None):
656 def incrementalmissingrevs(self, common=None):
657 """Return an object that can be used to incrementally compute the
657 """Return an object that can be used to incrementally compute the
658 revision numbers of the ancestors of arbitrary sets that are not
658 revision numbers of the ancestors of arbitrary sets that are not
659 ancestors of common. This is an ancestor.incrementalmissingancestors
659 ancestors of common. This is an ancestor.incrementalmissingancestors
660 object.
660 object.
661
661
662 'common' is a list of revision numbers. If common is not supplied, uses
662 'common' is a list of revision numbers. If common is not supplied, uses
663 nullrev.
663 nullrev.
664 """
664 """
665 if common is None:
665 if common is None:
666 common = [nullrev]
666 common = [nullrev]
667
667
668 return ancestor.incrementalmissingancestors(self.parentrevs, common)
668 return ancestor.incrementalmissingancestors(self.parentrevs, common)
669
669
670 def findmissingrevs(self, common=None, heads=None):
670 def findmissingrevs(self, common=None, heads=None):
671 """Return the revision numbers of the ancestors of heads that
671 """Return the revision numbers of the ancestors of heads that
672 are not ancestors of common.
672 are not ancestors of common.
673
673
674 More specifically, return a list of revision numbers corresponding to
674 More specifically, return a list of revision numbers corresponding to
675 nodes N such that every N satisfies the following constraints:
675 nodes N such that every N satisfies the following constraints:
676
676
677 1. N is an ancestor of some node in 'heads'
677 1. N is an ancestor of some node in 'heads'
678 2. N is not an ancestor of any node in 'common'
678 2. N is not an ancestor of any node in 'common'
679
679
680 The list is sorted by revision number, meaning it is
680 The list is sorted by revision number, meaning it is
681 topologically sorted.
681 topologically sorted.
682
682
683 'heads' and 'common' are both lists of revision numbers. If heads is
683 'heads' and 'common' are both lists of revision numbers. If heads is
684 not supplied, uses all of the revlog's heads. If common is not
684 not supplied, uses all of the revlog's heads. If common is not
685 supplied, uses nullid."""
685 supplied, uses nullid."""
686 if common is None:
686 if common is None:
687 common = [nullrev]
687 common = [nullrev]
688 if heads is None:
688 if heads is None:
689 heads = self.headrevs()
689 heads = self.headrevs()
690
690
691 inc = self.incrementalmissingrevs(common=common)
691 inc = self.incrementalmissingrevs(common=common)
692 return inc.missingancestors(heads)
692 return inc.missingancestors(heads)
693
693
694 def findmissing(self, common=None, heads=None):
694 def findmissing(self, common=None, heads=None):
695 """Return the ancestors of heads that are not ancestors of common.
695 """Return the ancestors of heads that are not ancestors of common.
696
696
697 More specifically, return a list of nodes N such that every N
697 More specifically, return a list of nodes N such that every N
698 satisfies the following constraints:
698 satisfies the following constraints:
699
699
700 1. N is an ancestor of some node in 'heads'
700 1. N is an ancestor of some node in 'heads'
701 2. N is not an ancestor of any node in 'common'
701 2. N is not an ancestor of any node in 'common'
702
702
703 The list is sorted by revision number, meaning it is
703 The list is sorted by revision number, meaning it is
704 topologically sorted.
704 topologically sorted.
705
705
706 'heads' and 'common' are both lists of node IDs. If heads is
706 'heads' and 'common' are both lists of node IDs. If heads is
707 not supplied, uses all of the revlog's heads. If common is not
707 not supplied, uses all of the revlog's heads. If common is not
708 supplied, uses nullid."""
708 supplied, uses nullid."""
709 if common is None:
709 if common is None:
710 common = [nullid]
710 common = [nullid]
711 if heads is None:
711 if heads is None:
712 heads = self.heads()
712 heads = self.heads()
713
713
714 common = [self.rev(n) for n in common]
714 common = [self.rev(n) for n in common]
715 heads = [self.rev(n) for n in heads]
715 heads = [self.rev(n) for n in heads]
716
716
717 inc = self.incrementalmissingrevs(common=common)
717 inc = self.incrementalmissingrevs(common=common)
718 return [self.node(r) for r in inc.missingancestors(heads)]
718 return [self.node(r) for r in inc.missingancestors(heads)]
719
719
720 def nodesbetween(self, roots=None, heads=None):
720 def nodesbetween(self, roots=None, heads=None):
721 """Return a topological path from 'roots' to 'heads'.
721 """Return a topological path from 'roots' to 'heads'.
722
722
723 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
723 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
724 topologically sorted list of all nodes N that satisfy both of
724 topologically sorted list of all nodes N that satisfy both of
725 these constraints:
725 these constraints:
726
726
727 1. N is a descendant of some node in 'roots'
727 1. N is a descendant of some node in 'roots'
728 2. N is an ancestor of some node in 'heads'
728 2. N is an ancestor of some node in 'heads'
729
729
730 Every node is considered to be both a descendant and an ancestor
730 Every node is considered to be both a descendant and an ancestor
731 of itself, so every reachable node in 'roots' and 'heads' will be
731 of itself, so every reachable node in 'roots' and 'heads' will be
732 included in 'nodes'.
732 included in 'nodes'.
733
733
734 'outroots' is the list of reachable nodes in 'roots', i.e., the
734 'outroots' is the list of reachable nodes in 'roots', i.e., the
735 subset of 'roots' that is returned in 'nodes'. Likewise,
735 subset of 'roots' that is returned in 'nodes'. Likewise,
736 'outheads' is the subset of 'heads' that is also in 'nodes'.
736 'outheads' is the subset of 'heads' that is also in 'nodes'.
737
737
738 'roots' and 'heads' are both lists of node IDs. If 'roots' is
738 'roots' and 'heads' are both lists of node IDs. If 'roots' is
739 unspecified, uses nullid as the only root. If 'heads' is
739 unspecified, uses nullid as the only root. If 'heads' is
740 unspecified, uses list of all of the revlog's heads."""
740 unspecified, uses list of all of the revlog's heads."""
741 nonodes = ([], [], [])
741 nonodes = ([], [], [])
742 if roots is not None:
742 if roots is not None:
743 roots = list(roots)
743 roots = list(roots)
744 if not roots:
744 if not roots:
745 return nonodes
745 return nonodes
746 lowestrev = min([self.rev(n) for n in roots])
746 lowestrev = min([self.rev(n) for n in roots])
747 else:
747 else:
748 roots = [nullid] # Everybody's a descendant of nullid
748 roots = [nullid] # Everybody's a descendant of nullid
749 lowestrev = nullrev
749 lowestrev = nullrev
750 if (lowestrev == nullrev) and (heads is None):
750 if (lowestrev == nullrev) and (heads is None):
751 # We want _all_ the nodes!
751 # We want _all_ the nodes!
752 return ([self.node(r) for r in self], [nullid], list(self.heads()))
752 return ([self.node(r) for r in self], [nullid], list(self.heads()))
753 if heads is None:
753 if heads is None:
754 # All nodes are ancestors, so the latest ancestor is the last
754 # All nodes are ancestors, so the latest ancestor is the last
755 # node.
755 # node.
756 highestrev = len(self) - 1
756 highestrev = len(self) - 1
757 # Set ancestors to None to signal that every node is an ancestor.
757 # Set ancestors to None to signal that every node is an ancestor.
758 ancestors = None
758 ancestors = None
759 # Set heads to an empty dictionary for later discovery of heads
759 # Set heads to an empty dictionary for later discovery of heads
760 heads = {}
760 heads = {}
761 else:
761 else:
762 heads = list(heads)
762 heads = list(heads)
763 if not heads:
763 if not heads:
764 return nonodes
764 return nonodes
765 ancestors = set()
765 ancestors = set()
766 # Turn heads into a dictionary so we can remove 'fake' heads.
766 # Turn heads into a dictionary so we can remove 'fake' heads.
767 # Also, later we will be using it to filter out the heads we can't
767 # Also, later we will be using it to filter out the heads we can't
768 # find from roots.
768 # find from roots.
769 heads = dict.fromkeys(heads, False)
769 heads = dict.fromkeys(heads, False)
770 # Start at the top and keep marking parents until we're done.
770 # Start at the top and keep marking parents until we're done.
771 nodestotag = set(heads)
771 nodestotag = set(heads)
772 # Remember where the top was so we can use it as a limit later.
772 # Remember where the top was so we can use it as a limit later.
773 highestrev = max([self.rev(n) for n in nodestotag])
773 highestrev = max([self.rev(n) for n in nodestotag])
774 while nodestotag:
774 while nodestotag:
775 # grab a node to tag
775 # grab a node to tag
776 n = nodestotag.pop()
776 n = nodestotag.pop()
777 # Never tag nullid
777 # Never tag nullid
778 if n == nullid:
778 if n == nullid:
779 continue
779 continue
780 # A node's revision number represents its place in a
780 # A node's revision number represents its place in a
781 # topologically sorted list of nodes.
781 # topologically sorted list of nodes.
782 r = self.rev(n)
782 r = self.rev(n)
783 if r >= lowestrev:
783 if r >= lowestrev:
784 if n not in ancestors:
784 if n not in ancestors:
785 # If we are possibly a descendant of one of the roots
785 # If we are possibly a descendant of one of the roots
786 # and we haven't already been marked as an ancestor
786 # and we haven't already been marked as an ancestor
787 ancestors.add(n) # Mark as ancestor
787 ancestors.add(n) # Mark as ancestor
788 # Add non-nullid parents to list of nodes to tag.
788 # Add non-nullid parents to list of nodes to tag.
789 nodestotag.update([p for p in self.parents(n) if
789 nodestotag.update([p for p in self.parents(n) if
790 p != nullid])
790 p != nullid])
791 elif n in heads: # We've seen it before, is it a fake head?
791 elif n in heads: # We've seen it before, is it a fake head?
792 # So it is, real heads should not be the ancestors of
792 # So it is, real heads should not be the ancestors of
793 # any other heads.
793 # any other heads.
794 heads.pop(n)
794 heads.pop(n)
795 if not ancestors:
795 if not ancestors:
796 return nonodes
796 return nonodes
797 # Now that we have our set of ancestors, we want to remove any
797 # Now that we have our set of ancestors, we want to remove any
798 # roots that are not ancestors.
798 # roots that are not ancestors.
799
799
800 # If one of the roots was nullid, everything is included anyway.
800 # If one of the roots was nullid, everything is included anyway.
801 if lowestrev > nullrev:
801 if lowestrev > nullrev:
802 # But, since we weren't, let's recompute the lowest rev to not
802 # But, since we weren't, let's recompute the lowest rev to not
803 # include roots that aren't ancestors.
803 # include roots that aren't ancestors.
804
804
805 # Filter out roots that aren't ancestors of heads
805 # Filter out roots that aren't ancestors of heads
806 roots = [root for root in roots if root in ancestors]
806 roots = [root for root in roots if root in ancestors]
807 # Recompute the lowest revision
807 # Recompute the lowest revision
808 if roots:
808 if roots:
809 lowestrev = min([self.rev(root) for root in roots])
809 lowestrev = min([self.rev(root) for root in roots])
810 else:
810 else:
811 # No more roots? Return empty list
811 # No more roots? Return empty list
812 return nonodes
812 return nonodes
813 else:
813 else:
814 # We are descending from nullid, and don't need to care about
814 # We are descending from nullid, and don't need to care about
815 # any other roots.
815 # any other roots.
816 lowestrev = nullrev
816 lowestrev = nullrev
817 roots = [nullid]
817 roots = [nullid]
818 # Transform our roots list into a set.
818 # Transform our roots list into a set.
819 descendants = set(roots)
819 descendants = set(roots)
820 # Also, keep the original roots so we can filter out roots that aren't
820 # Also, keep the original roots so we can filter out roots that aren't
821 # 'real' roots (i.e. are descended from other roots).
821 # 'real' roots (i.e. are descended from other roots).
822 roots = descendants.copy()
822 roots = descendants.copy()
823 # Our topologically sorted list of output nodes.
823 # Our topologically sorted list of output nodes.
824 orderedout = []
824 orderedout = []
825 # Don't start at nullid since we don't want nullid in our output list,
825 # Don't start at nullid since we don't want nullid in our output list,
826 # and if nullid shows up in descendants, empty parents will look like
826 # and if nullid shows up in descendants, empty parents will look like
827 # they're descendants.
827 # they're descendants.
828 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
828 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
829 n = self.node(r)
829 n = self.node(r)
830 isdescendant = False
830 isdescendant = False
831 if lowestrev == nullrev: # Everybody is a descendant of nullid
831 if lowestrev == nullrev: # Everybody is a descendant of nullid
832 isdescendant = True
832 isdescendant = True
833 elif n in descendants:
833 elif n in descendants:
834 # n is already a descendant
834 # n is already a descendant
835 isdescendant = True
835 isdescendant = True
836 # This check only needs to be done here because all the roots
836 # This check only needs to be done here because all the roots
837 # will start being marked is descendants before the loop.
837 # will start being marked is descendants before the loop.
838 if n in roots:
838 if n in roots:
839 # If n was a root, check if it's a 'real' root.
839 # If n was a root, check if it's a 'real' root.
840 p = tuple(self.parents(n))
840 p = tuple(self.parents(n))
841 # If any of its parents are descendants, it's not a root.
841 # If any of its parents are descendants, it's not a root.
842 if (p[0] in descendants) or (p[1] in descendants):
842 if (p[0] in descendants) or (p[1] in descendants):
843 roots.remove(n)
843 roots.remove(n)
844 else:
844 else:
845 p = tuple(self.parents(n))
845 p = tuple(self.parents(n))
846 # A node is a descendant if either of its parents are
846 # A node is a descendant if either of its parents are
847 # descendants. (We seeded the dependents list with the roots
847 # descendants. (We seeded the dependents list with the roots
848 # up there, remember?)
848 # up there, remember?)
849 if (p[0] in descendants) or (p[1] in descendants):
849 if (p[0] in descendants) or (p[1] in descendants):
850 descendants.add(n)
850 descendants.add(n)
851 isdescendant = True
851 isdescendant = True
852 if isdescendant and ((ancestors is None) or (n in ancestors)):
852 if isdescendant and ((ancestors is None) or (n in ancestors)):
853 # Only include nodes that are both descendants and ancestors.
853 # Only include nodes that are both descendants and ancestors.
854 orderedout.append(n)
854 orderedout.append(n)
855 if (ancestors is not None) and (n in heads):
855 if (ancestors is not None) and (n in heads):
856 # We're trying to figure out which heads are reachable
856 # We're trying to figure out which heads are reachable
857 # from roots.
857 # from roots.
858 # Mark this head as having been reached
858 # Mark this head as having been reached
859 heads[n] = True
859 heads[n] = True
860 elif ancestors is None:
860 elif ancestors is None:
861 # Otherwise, we're trying to discover the heads.
861 # Otherwise, we're trying to discover the heads.
862 # Assume this is a head because if it isn't, the next step
862 # Assume this is a head because if it isn't, the next step
863 # will eventually remove it.
863 # will eventually remove it.
864 heads[n] = True
864 heads[n] = True
865 # But, obviously its parents aren't.
865 # But, obviously its parents aren't.
866 for p in self.parents(n):
866 for p in self.parents(n):
867 heads.pop(p, None)
867 heads.pop(p, None)
868 heads = [head for head, flag in heads.iteritems() if flag]
868 heads = [head for head, flag in heads.iteritems() if flag]
869 roots = list(roots)
869 roots = list(roots)
870 assert orderedout
870 assert orderedout
871 assert roots
871 assert roots
872 assert heads
872 assert heads
873 return (orderedout, roots, heads)
873 return (orderedout, roots, heads)
874
874
875 def headrevs(self):
875 def headrevs(self):
876 try:
876 try:
877 return self.index.headrevs()
877 return self.index.headrevs()
878 except AttributeError:
878 except AttributeError:
879 return self._headrevs()
879 return self._headrevs()
880
880
881 def computephases(self, roots):
881 def computephases(self, roots):
882 return self.index.computephasesmapsets(roots)
882 return self.index.computephasesmapsets(roots)
883
883
884 def _headrevs(self):
884 def _headrevs(self):
885 count = len(self)
885 count = len(self)
886 if not count:
886 if not count:
887 return [nullrev]
887 return [nullrev]
888 # we won't iter over filtered rev so nobody is a head at start
888 # we won't iter over filtered rev so nobody is a head at start
889 ishead = [0] * (count + 1)
889 ishead = [0] * (count + 1)
890 index = self.index
890 index = self.index
891 for r in self:
891 for r in self:
892 ishead[r] = 1 # I may be an head
892 ishead[r] = 1 # I may be an head
893 e = index[r]
893 e = index[r]
894 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
894 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
895 return [r for r, val in enumerate(ishead) if val]
895 return [r for r, val in enumerate(ishead) if val]
896
896
897 def heads(self, start=None, stop=None):
897 def heads(self, start=None, stop=None):
898 """return the list of all nodes that have no children
898 """return the list of all nodes that have no children
899
899
900 if start is specified, only heads that are descendants of
900 if start is specified, only heads that are descendants of
901 start will be returned
901 start will be returned
902 if stop is specified, it will consider all the revs from stop
902 if stop is specified, it will consider all the revs from stop
903 as if they had no children
903 as if they had no children
904 """
904 """
905 if start is None and stop is None:
905 if start is None and stop is None:
906 if not len(self):
906 if not len(self):
907 return [nullid]
907 return [nullid]
908 return [self.node(r) for r in self.headrevs()]
908 return [self.node(r) for r in self.headrevs()]
909
909
910 if start is None:
910 if start is None:
911 start = nullid
911 start = nullid
912 if stop is None:
912 if stop is None:
913 stop = []
913 stop = []
914 stoprevs = set([self.rev(n) for n in stop])
914 stoprevs = set([self.rev(n) for n in stop])
915 startrev = self.rev(start)
915 startrev = self.rev(start)
916 reachable = set((startrev,))
916 reachable = set((startrev,))
917 heads = set((startrev,))
917 heads = set((startrev,))
918
918
919 parentrevs = self.parentrevs
919 parentrevs = self.parentrevs
920 for r in self.revs(start=startrev + 1):
920 for r in self.revs(start=startrev + 1):
921 for p in parentrevs(r):
921 for p in parentrevs(r):
922 if p in reachable:
922 if p in reachable:
923 if r not in stoprevs:
923 if r not in stoprevs:
924 reachable.add(r)
924 reachable.add(r)
925 heads.add(r)
925 heads.add(r)
926 if p in heads and p not in stoprevs:
926 if p in heads and p not in stoprevs:
927 heads.remove(p)
927 heads.remove(p)
928
928
929 return [self.node(r) for r in heads]
929 return [self.node(r) for r in heads]
930
930
931 def children(self, node):
931 def children(self, node):
932 """find the children of a given node"""
932 """find the children of a given node"""
933 c = []
933 c = []
934 p = self.rev(node)
934 p = self.rev(node)
935 for r in self.revs(start=p + 1):
935 for r in self.revs(start=p + 1):
936 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
936 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
937 if prevs:
937 if prevs:
938 for pr in prevs:
938 for pr in prevs:
939 if pr == p:
939 if pr == p:
940 c.append(self.node(r))
940 c.append(self.node(r))
941 elif p == nullrev:
941 elif p == nullrev:
942 c.append(self.node(r))
942 c.append(self.node(r))
943 return c
943 return c
944
944
945 def descendant(self, start, end):
945 def descendant(self, start, end):
946 if start == nullrev:
946 if start == nullrev:
947 return True
947 return True
948 for i in self.descendants([start]):
948 for i in self.descendants([start]):
949 if i == end:
949 if i == end:
950 return True
950 return True
951 elif i > end:
951 elif i > end:
952 break
952 break
953 return False
953 return False
954
954
955 def commonancestorsheads(self, a, b):
955 def commonancestorsheads(self, a, b):
956 """calculate all the heads of the common ancestors of nodes a and b"""
956 """calculate all the heads of the common ancestors of nodes a and b"""
957 a, b = self.rev(a), self.rev(b)
957 a, b = self.rev(a), self.rev(b)
958 try:
958 try:
959 ancs = self.index.commonancestorsheads(a, b)
959 ancs = self.index.commonancestorsheads(a, b)
960 except (AttributeError, OverflowError): # C implementation failed
960 except (AttributeError, OverflowError): # C implementation failed
961 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
961 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
962 return pycompat.maplist(self.node, ancs)
962 return pycompat.maplist(self.node, ancs)
963
963
964 def isancestor(self, a, b):
964 def isancestor(self, a, b):
965 """return True if node a is an ancestor of node b
965 """return True if node a is an ancestor of node b
966
966
967 The implementation of this is trivial but the use of
967 The implementation of this is trivial but the use of
968 commonancestorsheads is not."""
968 commonancestorsheads is not."""
969 return a in self.commonancestorsheads(a, b)
969 return a in self.commonancestorsheads(a, b)
970
970
971 def ancestor(self, a, b):
971 def ancestor(self, a, b):
972 """calculate the "best" common ancestor of nodes a and b"""
972 """calculate the "best" common ancestor of nodes a and b"""
973
973
974 a, b = self.rev(a), self.rev(b)
974 a, b = self.rev(a), self.rev(b)
975 try:
975 try:
976 ancs = self.index.ancestors(a, b)
976 ancs = self.index.ancestors(a, b)
977 except (AttributeError, OverflowError):
977 except (AttributeError, OverflowError):
978 ancs = ancestor.ancestors(self.parentrevs, a, b)
978 ancs = ancestor.ancestors(self.parentrevs, a, b)
979 if ancs:
979 if ancs:
980 # choose a consistent winner when there's a tie
980 # choose a consistent winner when there's a tie
981 return min(map(self.node, ancs))
981 return min(map(self.node, ancs))
982 return nullid
982 return nullid
983
983
984 def _match(self, id):
984 def _match(self, id):
985 if isinstance(id, int):
985 if isinstance(id, int):
986 # rev
986 # rev
987 return self.node(id)
987 return self.node(id)
988 if len(id) == 20:
988 if len(id) == 20:
989 # possibly a binary node
989 # possibly a binary node
990 # odds of a binary node being all hex in ASCII are 1 in 10**25
990 # odds of a binary node being all hex in ASCII are 1 in 10**25
991 try:
991 try:
992 node = id
992 node = id
993 self.rev(node) # quick search the index
993 self.rev(node) # quick search the index
994 return node
994 return node
995 except LookupError:
995 except LookupError:
996 pass # may be partial hex id
996 pass # may be partial hex id
997 try:
997 try:
998 # str(rev)
998 # str(rev)
999 rev = int(id)
999 rev = int(id)
1000 if str(rev) != id:
1000 if str(rev) != id:
1001 raise ValueError
1001 raise ValueError
1002 if rev < 0:
1002 if rev < 0:
1003 rev = len(self) + rev
1003 rev = len(self) + rev
1004 if rev < 0 or rev >= len(self):
1004 if rev < 0 or rev >= len(self):
1005 raise ValueError
1005 raise ValueError
1006 return self.node(rev)
1006 return self.node(rev)
1007 except (ValueError, OverflowError):
1007 except (ValueError, OverflowError):
1008 pass
1008 pass
1009 if len(id) == 40:
1009 if len(id) == 40:
1010 try:
1010 try:
1011 # a full hex nodeid?
1011 # a full hex nodeid?
1012 node = bin(id)
1012 node = bin(id)
1013 self.rev(node)
1013 self.rev(node)
1014 return node
1014 return node
1015 except (TypeError, LookupError):
1015 except (TypeError, LookupError):
1016 pass
1016 pass
1017
1017
1018 def _partialmatch(self, id):
1018 def _partialmatch(self, id):
1019 try:
1019 try:
1020 partial = self.index.partialmatch(id)
1020 partial = self.index.partialmatch(id)
1021 if partial and self.hasnode(partial):
1021 if partial and self.hasnode(partial):
1022 return partial
1022 return partial
1023 return None
1023 return None
1024 except RevlogError:
1024 except RevlogError:
1025 # parsers.c radix tree lookup gave multiple matches
1025 # parsers.c radix tree lookup gave multiple matches
1026 # fast path: for unfiltered changelog, radix tree is accurate
1026 # fast path: for unfiltered changelog, radix tree is accurate
1027 if not getattr(self, 'filteredrevs', None):
1027 if not getattr(self, 'filteredrevs', None):
1028 raise LookupError(id, self.indexfile,
1028 raise LookupError(id, self.indexfile,
1029 _('ambiguous identifier'))
1029 _('ambiguous identifier'))
1030 # fall through to slow path that filters hidden revisions
1030 # fall through to slow path that filters hidden revisions
1031 except (AttributeError, ValueError):
1031 except (AttributeError, ValueError):
1032 # we are pure python, or key was too short to search radix tree
1032 # we are pure python, or key was too short to search radix tree
1033 pass
1033 pass
1034
1034
1035 if id in self._pcache:
1035 if id in self._pcache:
1036 return self._pcache[id]
1036 return self._pcache[id]
1037
1037
1038 if len(id) < 40:
1038 if len(id) < 40:
1039 try:
1039 try:
1040 # hex(node)[:...]
1040 # hex(node)[:...]
1041 l = len(id) // 2 # grab an even number of digits
1041 l = len(id) // 2 # grab an even number of digits
1042 prefix = bin(id[:l * 2])
1042 prefix = bin(id[:l * 2])
1043 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1043 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1044 nl = [n for n in nl if hex(n).startswith(id) and
1044 nl = [n for n in nl if hex(n).startswith(id) and
1045 self.hasnode(n)]
1045 self.hasnode(n)]
1046 if len(nl) > 0:
1046 if len(nl) > 0:
1047 if len(nl) == 1:
1047 if len(nl) == 1:
1048 self._pcache[id] = nl[0]
1048 self._pcache[id] = nl[0]
1049 return nl[0]
1049 return nl[0]
1050 raise LookupError(id, self.indexfile,
1050 raise LookupError(id, self.indexfile,
1051 _('ambiguous identifier'))
1051 _('ambiguous identifier'))
1052 return None
1052 return None
1053 except TypeError:
1053 except TypeError:
1054 pass
1054 pass
1055
1055
1056 def lookup(self, id):
1056 def lookup(self, id):
1057 """locate a node based on:
1057 """locate a node based on:
1058 - revision number or str(revision number)
1058 - revision number or str(revision number)
1059 - nodeid or subset of hex nodeid
1059 - nodeid or subset of hex nodeid
1060 """
1060 """
1061 n = self._match(id)
1061 n = self._match(id)
1062 if n is not None:
1062 if n is not None:
1063 return n
1063 return n
1064 n = self._partialmatch(id)
1064 n = self._partialmatch(id)
1065 if n:
1065 if n:
1066 return n
1066 return n
1067
1067
1068 raise LookupError(id, self.indexfile, _('no match found'))
1068 raise LookupError(id, self.indexfile, _('no match found'))
1069
1069
1070 def cmp(self, node, text):
1070 def cmp(self, node, text):
1071 """compare text with a given file revision
1071 """compare text with a given file revision
1072
1072
1073 returns True if text is different than what is stored.
1073 returns True if text is different than what is stored.
1074 """
1074 """
1075 p1, p2 = self.parents(node)
1075 p1, p2 = self.parents(node)
1076 return hash(text, p1, p2) != node
1076 return hash(text, p1, p2) != node
1077
1077
1078 def _cachesegment(self, offset, data):
1078 def _cachesegment(self, offset, data):
1079 """Add a segment to the revlog cache.
1079 """Add a segment to the revlog cache.
1080
1080
1081 Accepts an absolute offset and the data that is at that location.
1081 Accepts an absolute offset and the data that is at that location.
1082 """
1082 """
1083 o, d = self._chunkcache
1083 o, d = self._chunkcache
1084 # try to add to existing cache
1084 # try to add to existing cache
1085 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1085 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1086 self._chunkcache = o, d + data
1086 self._chunkcache = o, d + data
1087 else:
1087 else:
1088 self._chunkcache = offset, data
1088 self._chunkcache = offset, data
1089
1089
1090 def _readsegment(self, offset, length, df=None):
1090 def _readsegment(self, offset, length, df=None):
1091 """Load a segment of raw data from the revlog.
1091 """Load a segment of raw data from the revlog.
1092
1092
1093 Accepts an absolute offset, length to read, and an optional existing
1093 Accepts an absolute offset, length to read, and an optional existing
1094 file handle to read from.
1094 file handle to read from.
1095
1095
1096 If an existing file handle is passed, it will be seeked and the
1096 If an existing file handle is passed, it will be seeked and the
1097 original seek position will NOT be restored.
1097 original seek position will NOT be restored.
1098
1098
1099 Returns a str or buffer of raw byte data.
1099 Returns a str or buffer of raw byte data.
1100 """
1100 """
1101 if df is not None:
1101 if df is not None:
1102 closehandle = False
1102 closehandle = False
1103 else:
1103 else:
1104 if self._inline:
1104 if self._inline:
1105 df = self.opener(self.indexfile)
1105 df = self.opener(self.indexfile)
1106 else:
1106 else:
1107 df = self.opener(self.datafile)
1107 df = self.opener(self.datafile)
1108 closehandle = True
1108 closehandle = True
1109
1109
1110 # Cache data both forward and backward around the requested
1110 # Cache data both forward and backward around the requested
1111 # data, in a fixed size window. This helps speed up operations
1111 # data, in a fixed size window. This helps speed up operations
1112 # involving reading the revlog backwards.
1112 # involving reading the revlog backwards.
1113 cachesize = self._chunkcachesize
1113 cachesize = self._chunkcachesize
1114 realoffset = offset & ~(cachesize - 1)
1114 realoffset = offset & ~(cachesize - 1)
1115 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1115 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1116 - realoffset)
1116 - realoffset)
1117 df.seek(realoffset)
1117 df.seek(realoffset)
1118 d = df.read(reallength)
1118 d = df.read(reallength)
1119 if closehandle:
1119 if closehandle:
1120 df.close()
1120 df.close()
1121 self._cachesegment(realoffset, d)
1121 self._cachesegment(realoffset, d)
1122 if offset != realoffset or reallength != length:
1122 if offset != realoffset or reallength != length:
1123 return util.buffer(d, offset - realoffset, length)
1123 return util.buffer(d, offset - realoffset, length)
1124 return d
1124 return d
1125
1125
1126 def _getsegment(self, offset, length, df=None):
1126 def _getsegment(self, offset, length, df=None):
1127 """Obtain a segment of raw data from the revlog.
1127 """Obtain a segment of raw data from the revlog.
1128
1128
1129 Accepts an absolute offset, length of bytes to obtain, and an
1129 Accepts an absolute offset, length of bytes to obtain, and an
1130 optional file handle to the already-opened revlog. If the file
1130 optional file handle to the already-opened revlog. If the file
1131 handle is used, it's original seek position will not be preserved.
1131 handle is used, it's original seek position will not be preserved.
1132
1132
1133 Requests for data may be returned from a cache.
1133 Requests for data may be returned from a cache.
1134
1134
1135 Returns a str or a buffer instance of raw byte data.
1135 Returns a str or a buffer instance of raw byte data.
1136 """
1136 """
1137 o, d = self._chunkcache
1137 o, d = self._chunkcache
1138 l = len(d)
1138 l = len(d)
1139
1139
1140 # is it in the cache?
1140 # is it in the cache?
1141 cachestart = offset - o
1141 cachestart = offset - o
1142 cacheend = cachestart + length
1142 cacheend = cachestart + length
1143 if cachestart >= 0 and cacheend <= l:
1143 if cachestart >= 0 and cacheend <= l:
1144 if cachestart == 0 and cacheend == l:
1144 if cachestart == 0 and cacheend == l:
1145 return d # avoid a copy
1145 return d # avoid a copy
1146 return util.buffer(d, cachestart, cacheend - cachestart)
1146 return util.buffer(d, cachestart, cacheend - cachestart)
1147
1147
1148 return self._readsegment(offset, length, df=df)
1148 return self._readsegment(offset, length, df=df)
1149
1149
1150 def _chunkraw(self, startrev, endrev, df=None):
1150 def _getsegmentforrevs(self, startrev, endrev, df=None):
1151 """Obtain a segment of raw data corresponding to a range of revisions.
1151 """Obtain a segment of raw data corresponding to a range of revisions.
1152
1152
1153 Accepts the start and end revisions and an optional already-open
1153 Accepts the start and end revisions and an optional already-open
1154 file handle to be used for reading. If the file handle is read, its
1154 file handle to be used for reading. If the file handle is read, its
1155 seek position will not be preserved.
1155 seek position will not be preserved.
1156
1156
1157 Requests for data may be satisfied by a cache.
1157 Requests for data may be satisfied by a cache.
1158
1158
1159 Returns a 2-tuple of (offset, data) for the requested range of
1159 Returns a 2-tuple of (offset, data) for the requested range of
1160 revisions. Offset is the integer offset from the beginning of the
1160 revisions. Offset is the integer offset from the beginning of the
1161 revlog and data is a str or buffer of the raw byte data.
1161 revlog and data is a str or buffer of the raw byte data.
1162
1162
1163 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1163 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1164 to determine where each revision's data begins and ends.
1164 to determine where each revision's data begins and ends.
1165 """
1165 """
1166 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1166 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1167 # (functions are expensive).
1167 # (functions are expensive).
1168 index = self.index
1168 index = self.index
1169 istart = index[startrev]
1169 istart = index[startrev]
1170 start = int(istart[0] >> 16)
1170 start = int(istart[0] >> 16)
1171 if startrev == endrev:
1171 if startrev == endrev:
1172 end = start + istart[1]
1172 end = start + istart[1]
1173 else:
1173 else:
1174 iend = index[endrev]
1174 iend = index[endrev]
1175 end = int(iend[0] >> 16) + iend[1]
1175 end = int(iend[0] >> 16) + iend[1]
1176
1176
1177 if self._inline:
1177 if self._inline:
1178 start += (startrev + 1) * self._io.size
1178 start += (startrev + 1) * self._io.size
1179 end += (endrev + 1) * self._io.size
1179 end += (endrev + 1) * self._io.size
1180 length = end - start
1180 length = end - start
1181
1181
1182 return start, self._getsegment(start, length, df=df)
1182 return start, self._getsegment(start, length, df=df)
1183
1183
1184 def _chunk(self, rev, df=None):
1184 def _chunk(self, rev, df=None):
1185 """Obtain a single decompressed chunk for a revision.
1185 """Obtain a single decompressed chunk for a revision.
1186
1186
1187 Accepts an integer revision and an optional already-open file handle
1187 Accepts an integer revision and an optional already-open file handle
1188 to be used for reading. If used, the seek position of the file will not
1188 to be used for reading. If used, the seek position of the file will not
1189 be preserved.
1189 be preserved.
1190
1190
1191 Returns a str holding uncompressed data for the requested revision.
1191 Returns a str holding uncompressed data for the requested revision.
1192 """
1192 """
1193 return self.decompress(self._chunkraw(rev, rev, df=df)[1])
1193 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1194
1194
1195 def _chunks(self, revs, df=None):
1195 def _chunks(self, revs, df=None):
1196 """Obtain decompressed chunks for the specified revisions.
1196 """Obtain decompressed chunks for the specified revisions.
1197
1197
1198 Accepts an iterable of numeric revisions that are assumed to be in
1198 Accepts an iterable of numeric revisions that are assumed to be in
1199 ascending order. Also accepts an optional already-open file handle
1199 ascending order. Also accepts an optional already-open file handle
1200 to be used for reading. If used, the seek position of the file will
1200 to be used for reading. If used, the seek position of the file will
1201 not be preserved.
1201 not be preserved.
1202
1202
1203 This function is similar to calling ``self._chunk()`` multiple times,
1203 This function is similar to calling ``self._chunk()`` multiple times,
1204 but is faster.
1204 but is faster.
1205
1205
1206 Returns a list with decompressed data for each requested revision.
1206 Returns a list with decompressed data for each requested revision.
1207 """
1207 """
1208 if not revs:
1208 if not revs:
1209 return []
1209 return []
1210 start = self.start
1210 start = self.start
1211 length = self.length
1211 length = self.length
1212 inline = self._inline
1212 inline = self._inline
1213 iosize = self._io.size
1213 iosize = self._io.size
1214 buffer = util.buffer
1214 buffer = util.buffer
1215
1215
1216 l = []
1216 l = []
1217 ladd = l.append
1217 ladd = l.append
1218
1218
1219 try:
1219 try:
1220 offset, data = self._chunkraw(revs[0], revs[-1], df=df)
1220 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1221 except OverflowError:
1221 except OverflowError:
1222 # issue4215 - we can't cache a run of chunks greater than
1222 # issue4215 - we can't cache a run of chunks greater than
1223 # 2G on Windows
1223 # 2G on Windows
1224 return [self._chunk(rev, df=df) for rev in revs]
1224 return [self._chunk(rev, df=df) for rev in revs]
1225
1225
1226 decomp = self.decompress
1226 decomp = self.decompress
1227 for rev in revs:
1227 for rev in revs:
1228 chunkstart = start(rev)
1228 chunkstart = start(rev)
1229 if inline:
1229 if inline:
1230 chunkstart += (rev + 1) * iosize
1230 chunkstart += (rev + 1) * iosize
1231 chunklength = length(rev)
1231 chunklength = length(rev)
1232 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1232 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1233
1233
1234 return l
1234 return l
1235
1235
1236 def _chunkclear(self):
1236 def _chunkclear(self):
1237 """Clear the raw chunk cache."""
1237 """Clear the raw chunk cache."""
1238 self._chunkcache = (0, '')
1238 self._chunkcache = (0, '')
1239
1239
1240 def deltaparent(self, rev):
1240 def deltaparent(self, rev):
1241 """return deltaparent of the given revision"""
1241 """return deltaparent of the given revision"""
1242 base = self.index[rev][3]
1242 base = self.index[rev][3]
1243 if base == rev:
1243 if base == rev:
1244 return nullrev
1244 return nullrev
1245 elif self._generaldelta:
1245 elif self._generaldelta:
1246 return base
1246 return base
1247 else:
1247 else:
1248 return rev - 1
1248 return rev - 1
1249
1249
1250 def revdiff(self, rev1, rev2):
1250 def revdiff(self, rev1, rev2):
1251 """return or calculate a delta between two revisions
1251 """return or calculate a delta between two revisions
1252
1252
1253 The delta calculated is in binary form and is intended to be written to
1253 The delta calculated is in binary form and is intended to be written to
1254 revlog data directly. So this function needs raw revision data.
1254 revlog data directly. So this function needs raw revision data.
1255 """
1255 """
1256 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1256 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1257 return bytes(self._chunk(rev2))
1257 return bytes(self._chunk(rev2))
1258
1258
1259 return mdiff.textdiff(self.revision(rev1, raw=True),
1259 return mdiff.textdiff(self.revision(rev1, raw=True),
1260 self.revision(rev2, raw=True))
1260 self.revision(rev2, raw=True))
1261
1261
1262 def revision(self, nodeorrev, _df=None, raw=False):
1262 def revision(self, nodeorrev, _df=None, raw=False):
1263 """return an uncompressed revision of a given node or revision
1263 """return an uncompressed revision of a given node or revision
1264 number.
1264 number.
1265
1265
1266 _df - an existing file handle to read from. (internal-only)
1266 _df - an existing file handle to read from. (internal-only)
1267 raw - an optional argument specifying if the revision data is to be
1267 raw - an optional argument specifying if the revision data is to be
1268 treated as raw data when applying flag transforms. 'raw' should be set
1268 treated as raw data when applying flag transforms. 'raw' should be set
1269 to True when generating changegroups or in debug commands.
1269 to True when generating changegroups or in debug commands.
1270 """
1270 """
1271 if isinstance(nodeorrev, int):
1271 if isinstance(nodeorrev, int):
1272 rev = nodeorrev
1272 rev = nodeorrev
1273 node = self.node(rev)
1273 node = self.node(rev)
1274 else:
1274 else:
1275 node = nodeorrev
1275 node = nodeorrev
1276 rev = None
1276 rev = None
1277
1277
1278 cachedrev = None
1278 cachedrev = None
1279 flags = None
1279 flags = None
1280 rawtext = None
1280 rawtext = None
1281 if node == nullid:
1281 if node == nullid:
1282 return ""
1282 return ""
1283 if self._cache:
1283 if self._cache:
1284 if self._cache[0] == node:
1284 if self._cache[0] == node:
1285 # _cache only stores rawtext
1285 # _cache only stores rawtext
1286 if raw:
1286 if raw:
1287 return self._cache[2]
1287 return self._cache[2]
1288 # duplicated, but good for perf
1288 # duplicated, but good for perf
1289 if rev is None:
1289 if rev is None:
1290 rev = self.rev(node)
1290 rev = self.rev(node)
1291 if flags is None:
1291 if flags is None:
1292 flags = self.flags(rev)
1292 flags = self.flags(rev)
1293 # no extra flags set, no flag processor runs, text = rawtext
1293 # no extra flags set, no flag processor runs, text = rawtext
1294 if flags == REVIDX_DEFAULT_FLAGS:
1294 if flags == REVIDX_DEFAULT_FLAGS:
1295 return self._cache[2]
1295 return self._cache[2]
1296 # rawtext is reusable. need to run flag processor
1296 # rawtext is reusable. need to run flag processor
1297 rawtext = self._cache[2]
1297 rawtext = self._cache[2]
1298
1298
1299 cachedrev = self._cache[1]
1299 cachedrev = self._cache[1]
1300
1300
1301 # look up what we need to read
1301 # look up what we need to read
1302 if rawtext is None:
1302 if rawtext is None:
1303 if rev is None:
1303 if rev is None:
1304 rev = self.rev(node)
1304 rev = self.rev(node)
1305
1305
1306 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1306 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1307 if stopped:
1307 if stopped:
1308 rawtext = self._cache[2]
1308 rawtext = self._cache[2]
1309
1309
1310 # drop cache to save memory
1310 # drop cache to save memory
1311 self._cache = None
1311 self._cache = None
1312
1312
1313 bins = self._chunks(chain, df=_df)
1313 bins = self._chunks(chain, df=_df)
1314 if rawtext is None:
1314 if rawtext is None:
1315 rawtext = bytes(bins[0])
1315 rawtext = bytes(bins[0])
1316 bins = bins[1:]
1316 bins = bins[1:]
1317
1317
1318 rawtext = mdiff.patches(rawtext, bins)
1318 rawtext = mdiff.patches(rawtext, bins)
1319 self._cache = (node, rev, rawtext)
1319 self._cache = (node, rev, rawtext)
1320
1320
1321 if flags is None:
1321 if flags is None:
1322 if rev is None:
1322 if rev is None:
1323 rev = self.rev(node)
1323 rev = self.rev(node)
1324 flags = self.flags(rev)
1324 flags = self.flags(rev)
1325
1325
1326 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1326 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1327 if validatehash:
1327 if validatehash:
1328 self.checkhash(text, node, rev=rev)
1328 self.checkhash(text, node, rev=rev)
1329
1329
1330 return text
1330 return text
1331
1331
1332 def hash(self, text, p1, p2):
1332 def hash(self, text, p1, p2):
1333 """Compute a node hash.
1333 """Compute a node hash.
1334
1334
1335 Available as a function so that subclasses can replace the hash
1335 Available as a function so that subclasses can replace the hash
1336 as needed.
1336 as needed.
1337 """
1337 """
1338 return hash(text, p1, p2)
1338 return hash(text, p1, p2)
1339
1339
1340 def _processflags(self, text, flags, operation, raw=False):
1340 def _processflags(self, text, flags, operation, raw=False):
1341 """Inspect revision data flags and applies transforms defined by
1341 """Inspect revision data flags and applies transforms defined by
1342 registered flag processors.
1342 registered flag processors.
1343
1343
1344 ``text`` - the revision data to process
1344 ``text`` - the revision data to process
1345 ``flags`` - the revision flags
1345 ``flags`` - the revision flags
1346 ``operation`` - the operation being performed (read or write)
1346 ``operation`` - the operation being performed (read or write)
1347 ``raw`` - an optional argument describing if the raw transform should be
1347 ``raw`` - an optional argument describing if the raw transform should be
1348 applied.
1348 applied.
1349
1349
1350 This method processes the flags in the order (or reverse order if
1350 This method processes the flags in the order (or reverse order if
1351 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1351 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1352 flag processors registered for present flags. The order of flags defined
1352 flag processors registered for present flags. The order of flags defined
1353 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1353 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1354
1354
1355 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1355 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1356 processed text and ``validatehash`` is a bool indicating whether the
1356 processed text and ``validatehash`` is a bool indicating whether the
1357 returned text should be checked for hash integrity.
1357 returned text should be checked for hash integrity.
1358
1358
1359 Note: If the ``raw`` argument is set, it has precedence over the
1359 Note: If the ``raw`` argument is set, it has precedence over the
1360 operation and will only update the value of ``validatehash``.
1360 operation and will only update the value of ``validatehash``.
1361 """
1361 """
1362 if not operation in ('read', 'write'):
1362 if not operation in ('read', 'write'):
1363 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1363 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1364 # Check all flags are known.
1364 # Check all flags are known.
1365 if flags & ~REVIDX_KNOWN_FLAGS:
1365 if flags & ~REVIDX_KNOWN_FLAGS:
1366 raise RevlogError(_("incompatible revision flag '%#x'") %
1366 raise RevlogError(_("incompatible revision flag '%#x'") %
1367 (flags & ~REVIDX_KNOWN_FLAGS))
1367 (flags & ~REVIDX_KNOWN_FLAGS))
1368 validatehash = True
1368 validatehash = True
1369 # Depending on the operation (read or write), the order might be
1369 # Depending on the operation (read or write), the order might be
1370 # reversed due to non-commutative transforms.
1370 # reversed due to non-commutative transforms.
1371 orderedflags = REVIDX_FLAGS_ORDER
1371 orderedflags = REVIDX_FLAGS_ORDER
1372 if operation == 'write':
1372 if operation == 'write':
1373 orderedflags = reversed(orderedflags)
1373 orderedflags = reversed(orderedflags)
1374
1374
1375 for flag in orderedflags:
1375 for flag in orderedflags:
1376 # If a flagprocessor has been registered for a known flag, apply the
1376 # If a flagprocessor has been registered for a known flag, apply the
1377 # related operation transform and update result tuple.
1377 # related operation transform and update result tuple.
1378 if flag & flags:
1378 if flag & flags:
1379 vhash = True
1379 vhash = True
1380
1380
1381 if flag not in _flagprocessors:
1381 if flag not in _flagprocessors:
1382 message = _("missing processor for flag '%#x'") % (flag)
1382 message = _("missing processor for flag '%#x'") % (flag)
1383 raise RevlogError(message)
1383 raise RevlogError(message)
1384
1384
1385 processor = _flagprocessors[flag]
1385 processor = _flagprocessors[flag]
1386 if processor is not None:
1386 if processor is not None:
1387 readtransform, writetransform, rawtransform = processor
1387 readtransform, writetransform, rawtransform = processor
1388
1388
1389 if raw:
1389 if raw:
1390 vhash = rawtransform(self, text)
1390 vhash = rawtransform(self, text)
1391 elif operation == 'read':
1391 elif operation == 'read':
1392 text, vhash = readtransform(self, text)
1392 text, vhash = readtransform(self, text)
1393 else: # write operation
1393 else: # write operation
1394 text, vhash = writetransform(self, text)
1394 text, vhash = writetransform(self, text)
1395 validatehash = validatehash and vhash
1395 validatehash = validatehash and vhash
1396
1396
1397 return text, validatehash
1397 return text, validatehash
1398
1398
1399 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1399 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1400 """Check node hash integrity.
1400 """Check node hash integrity.
1401
1401
1402 Available as a function so that subclasses can extend hash mismatch
1402 Available as a function so that subclasses can extend hash mismatch
1403 behaviors as needed.
1403 behaviors as needed.
1404 """
1404 """
1405 if p1 is None and p2 is None:
1405 if p1 is None and p2 is None:
1406 p1, p2 = self.parents(node)
1406 p1, p2 = self.parents(node)
1407 if node != self.hash(text, p1, p2):
1407 if node != self.hash(text, p1, p2):
1408 revornode = rev
1408 revornode = rev
1409 if revornode is None:
1409 if revornode is None:
1410 revornode = templatefilters.short(hex(node))
1410 revornode = templatefilters.short(hex(node))
1411 raise RevlogError(_("integrity check failed on %s:%s")
1411 raise RevlogError(_("integrity check failed on %s:%s")
1412 % (self.indexfile, revornode))
1412 % (self.indexfile, revornode))
1413
1413
1414 def checkinlinesize(self, tr, fp=None):
1414 def checkinlinesize(self, tr, fp=None):
1415 """Check if the revlog is too big for inline and convert if so.
1415 """Check if the revlog is too big for inline and convert if so.
1416
1416
1417 This should be called after revisions are added to the revlog. If the
1417 This should be called after revisions are added to the revlog. If the
1418 revlog has grown too large to be an inline revlog, it will convert it
1418 revlog has grown too large to be an inline revlog, it will convert it
1419 to use multiple index and data files.
1419 to use multiple index and data files.
1420 """
1420 """
1421 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1421 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1422 return
1422 return
1423
1423
1424 trinfo = tr.find(self.indexfile)
1424 trinfo = tr.find(self.indexfile)
1425 if trinfo is None:
1425 if trinfo is None:
1426 raise RevlogError(_("%s not found in the transaction")
1426 raise RevlogError(_("%s not found in the transaction")
1427 % self.indexfile)
1427 % self.indexfile)
1428
1428
1429 trindex = trinfo[2]
1429 trindex = trinfo[2]
1430 if trindex is not None:
1430 if trindex is not None:
1431 dataoff = self.start(trindex)
1431 dataoff = self.start(trindex)
1432 else:
1432 else:
1433 # revlog was stripped at start of transaction, use all leftover data
1433 # revlog was stripped at start of transaction, use all leftover data
1434 trindex = len(self) - 1
1434 trindex = len(self) - 1
1435 dataoff = self.end(-2)
1435 dataoff = self.end(-2)
1436
1436
1437 tr.add(self.datafile, dataoff)
1437 tr.add(self.datafile, dataoff)
1438
1438
1439 if fp:
1439 if fp:
1440 fp.flush()
1440 fp.flush()
1441 fp.close()
1441 fp.close()
1442
1442
1443 df = self.opener(self.datafile, 'w')
1443 df = self.opener(self.datafile, 'w')
1444 try:
1444 try:
1445 for r in self:
1445 for r in self:
1446 df.write(self._chunkraw(r, r)[1])
1446 df.write(self._getsegmentforrevs(r, r)[1])
1447 finally:
1447 finally:
1448 df.close()
1448 df.close()
1449
1449
1450 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1450 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1451 checkambig=self._checkambig)
1451 checkambig=self._checkambig)
1452 self.version &= ~(REVLOGNGINLINEDATA)
1452 self.version &= ~(REVLOGNGINLINEDATA)
1453 self._inline = False
1453 self._inline = False
1454 for i in self:
1454 for i in self:
1455 e = self._io.packentry(self.index[i], self.node, self.version, i)
1455 e = self._io.packentry(self.index[i], self.node, self.version, i)
1456 fp.write(e)
1456 fp.write(e)
1457
1457
1458 # if we don't call close, the temp file will never replace the
1458 # if we don't call close, the temp file will never replace the
1459 # real index
1459 # real index
1460 fp.close()
1460 fp.close()
1461
1461
1462 tr.replace(self.indexfile, trindex * self._io.size)
1462 tr.replace(self.indexfile, trindex * self._io.size)
1463 self._chunkclear()
1463 self._chunkclear()
1464
1464
1465 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1465 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1466 node=None, flags=REVIDX_DEFAULT_FLAGS):
1466 node=None, flags=REVIDX_DEFAULT_FLAGS):
1467 """add a revision to the log
1467 """add a revision to the log
1468
1468
1469 text - the revision data to add
1469 text - the revision data to add
1470 transaction - the transaction object used for rollback
1470 transaction - the transaction object used for rollback
1471 link - the linkrev data to add
1471 link - the linkrev data to add
1472 p1, p2 - the parent nodeids of the revision
1472 p1, p2 - the parent nodeids of the revision
1473 cachedelta - an optional precomputed delta
1473 cachedelta - an optional precomputed delta
1474 node - nodeid of revision; typically node is not specified, and it is
1474 node - nodeid of revision; typically node is not specified, and it is
1475 computed by default as hash(text, p1, p2), however subclasses might
1475 computed by default as hash(text, p1, p2), however subclasses might
1476 use different hashing method (and override checkhash() in such case)
1476 use different hashing method (and override checkhash() in such case)
1477 flags - the known flags to set on the revision
1477 flags - the known flags to set on the revision
1478 """
1478 """
1479 if link == nullrev:
1479 if link == nullrev:
1480 raise RevlogError(_("attempted to add linkrev -1 to %s")
1480 raise RevlogError(_("attempted to add linkrev -1 to %s")
1481 % self.indexfile)
1481 % self.indexfile)
1482
1482
1483 if flags:
1483 if flags:
1484 node = node or self.hash(text, p1, p2)
1484 node = node or self.hash(text, p1, p2)
1485
1485
1486 rawtext, validatehash = self._processflags(text, flags, 'write')
1486 rawtext, validatehash = self._processflags(text, flags, 'write')
1487
1487
1488 # If the flag processor modifies the revision data, ignore any provided
1488 # If the flag processor modifies the revision data, ignore any provided
1489 # cachedelta.
1489 # cachedelta.
1490 if rawtext != text:
1490 if rawtext != text:
1491 cachedelta = None
1491 cachedelta = None
1492
1492
1493 if len(rawtext) > _maxentrysize:
1493 if len(rawtext) > _maxentrysize:
1494 raise RevlogError(
1494 raise RevlogError(
1495 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1495 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1496 % (self.indexfile, len(rawtext)))
1496 % (self.indexfile, len(rawtext)))
1497
1497
1498 node = node or self.hash(rawtext, p1, p2)
1498 node = node or self.hash(rawtext, p1, p2)
1499 if node in self.nodemap:
1499 if node in self.nodemap:
1500 return node
1500 return node
1501
1501
1502 if validatehash:
1502 if validatehash:
1503 self.checkhash(rawtext, node, p1=p1, p2=p2)
1503 self.checkhash(rawtext, node, p1=p1, p2=p2)
1504
1504
1505 dfh = None
1505 dfh = None
1506 if not self._inline:
1506 if not self._inline:
1507 dfh = self.opener(self.datafile, "a+")
1507 dfh = self.opener(self.datafile, "a+")
1508 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1508 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1509 try:
1509 try:
1510 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1510 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1511 flags, cachedelta, ifh, dfh)
1511 flags, cachedelta, ifh, dfh)
1512 finally:
1512 finally:
1513 if dfh:
1513 if dfh:
1514 dfh.close()
1514 dfh.close()
1515 ifh.close()
1515 ifh.close()
1516
1516
1517 def compress(self, data):
1517 def compress(self, data):
1518 """Generate a possibly-compressed representation of data."""
1518 """Generate a possibly-compressed representation of data."""
1519 if not data:
1519 if not data:
1520 return '', data
1520 return '', data
1521
1521
1522 compressed = self._compressor.compress(data)
1522 compressed = self._compressor.compress(data)
1523
1523
1524 if compressed:
1524 if compressed:
1525 # The revlog compressor added the header in the returned data.
1525 # The revlog compressor added the header in the returned data.
1526 return '', compressed
1526 return '', compressed
1527
1527
1528 if data[0:1] == '\0':
1528 if data[0:1] == '\0':
1529 return '', data
1529 return '', data
1530 return 'u', data
1530 return 'u', data
1531
1531
1532 def decompress(self, data):
1532 def decompress(self, data):
1533 """Decompress a revlog chunk.
1533 """Decompress a revlog chunk.
1534
1534
1535 The chunk is expected to begin with a header identifying the
1535 The chunk is expected to begin with a header identifying the
1536 format type so it can be routed to an appropriate decompressor.
1536 format type so it can be routed to an appropriate decompressor.
1537 """
1537 """
1538 if not data:
1538 if not data:
1539 return data
1539 return data
1540
1540
1541 # Revlogs are read much more frequently than they are written and many
1541 # Revlogs are read much more frequently than they are written and many
1542 # chunks only take microseconds to decompress, so performance is
1542 # chunks only take microseconds to decompress, so performance is
1543 # important here.
1543 # important here.
1544 #
1544 #
1545 # We can make a few assumptions about revlogs:
1545 # We can make a few assumptions about revlogs:
1546 #
1546 #
1547 # 1) the majority of chunks will be compressed (as opposed to inline
1547 # 1) the majority of chunks will be compressed (as opposed to inline
1548 # raw data).
1548 # raw data).
1549 # 2) decompressing *any* data will likely by at least 10x slower than
1549 # 2) decompressing *any* data will likely by at least 10x slower than
1550 # returning raw inline data.
1550 # returning raw inline data.
1551 # 3) we want to prioritize common and officially supported compression
1551 # 3) we want to prioritize common and officially supported compression
1552 # engines
1552 # engines
1553 #
1553 #
1554 # It follows that we want to optimize for "decompress compressed data
1554 # It follows that we want to optimize for "decompress compressed data
1555 # when encoded with common and officially supported compression engines"
1555 # when encoded with common and officially supported compression engines"
1556 # case over "raw data" and "data encoded by less common or non-official
1556 # case over "raw data" and "data encoded by less common or non-official
1557 # compression engines." That is why we have the inline lookup first
1557 # compression engines." That is why we have the inline lookup first
1558 # followed by the compengines lookup.
1558 # followed by the compengines lookup.
1559 #
1559 #
1560 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1560 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1561 # compressed chunks. And this matters for changelog and manifest reads.
1561 # compressed chunks. And this matters for changelog and manifest reads.
1562 t = data[0:1]
1562 t = data[0:1]
1563
1563
1564 if t == 'x':
1564 if t == 'x':
1565 try:
1565 try:
1566 return _zlibdecompress(data)
1566 return _zlibdecompress(data)
1567 except zlib.error as e:
1567 except zlib.error as e:
1568 raise RevlogError(_('revlog decompress error: %s') % str(e))
1568 raise RevlogError(_('revlog decompress error: %s') % str(e))
1569 # '\0' is more common than 'u' so it goes first.
1569 # '\0' is more common than 'u' so it goes first.
1570 elif t == '\0':
1570 elif t == '\0':
1571 return data
1571 return data
1572 elif t == 'u':
1572 elif t == 'u':
1573 return util.buffer(data, 1)
1573 return util.buffer(data, 1)
1574
1574
1575 try:
1575 try:
1576 compressor = self._decompressors[t]
1576 compressor = self._decompressors[t]
1577 except KeyError:
1577 except KeyError:
1578 try:
1578 try:
1579 engine = util.compengines.forrevlogheader(t)
1579 engine = util.compengines.forrevlogheader(t)
1580 compressor = engine.revlogcompressor()
1580 compressor = engine.revlogcompressor()
1581 self._decompressors[t] = compressor
1581 self._decompressors[t] = compressor
1582 except KeyError:
1582 except KeyError:
1583 raise RevlogError(_('unknown compression type %r') % t)
1583 raise RevlogError(_('unknown compression type %r') % t)
1584
1584
1585 return compressor.decompress(data)
1585 return compressor.decompress(data)
1586
1586
1587 def _isgooddelta(self, d, textlen):
1587 def _isgooddelta(self, d, textlen):
1588 """Returns True if the given delta is good. Good means that it is within
1588 """Returns True if the given delta is good. Good means that it is within
1589 the disk span, disk size, and chain length bounds that we know to be
1589 the disk span, disk size, and chain length bounds that we know to be
1590 performant."""
1590 performant."""
1591 if d is None:
1591 if d is None:
1592 return False
1592 return False
1593
1593
1594 # - 'dist' is the distance from the base revision -- bounding it limits
1594 # - 'dist' is the distance from the base revision -- bounding it limits
1595 # the amount of I/O we need to do.
1595 # the amount of I/O we need to do.
1596 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1596 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1597 # to apply -- bounding it limits the amount of CPU we consume.
1597 # to apply -- bounding it limits the amount of CPU we consume.
1598 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1598 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1599 if (dist > textlen * 4 or l > textlen or
1599 if (dist > textlen * 4 or l > textlen or
1600 compresseddeltalen > textlen * 2 or
1600 compresseddeltalen > textlen * 2 or
1601 (self._maxchainlen and chainlen > self._maxchainlen)):
1601 (self._maxchainlen and chainlen > self._maxchainlen)):
1602 return False
1602 return False
1603
1603
1604 return True
1604 return True
1605
1605
1606 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1606 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1607 cachedelta, ifh, dfh, alwayscache=False):
1607 cachedelta, ifh, dfh, alwayscache=False):
1608 """internal function to add revisions to the log
1608 """internal function to add revisions to the log
1609
1609
1610 see addrevision for argument descriptions.
1610 see addrevision for argument descriptions.
1611
1611
1612 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1612 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1613
1613
1614 invariants:
1614 invariants:
1615 - rawtext is optional (can be None); if not set, cachedelta must be set.
1615 - rawtext is optional (can be None); if not set, cachedelta must be set.
1616 if both are set, they must correspond to each other.
1616 if both are set, they must correspond to each other.
1617 """
1617 """
1618 btext = [rawtext]
1618 btext = [rawtext]
1619 def buildtext():
1619 def buildtext():
1620 if btext[0] is not None:
1620 if btext[0] is not None:
1621 return btext[0]
1621 return btext[0]
1622 baserev = cachedelta[0]
1622 baserev = cachedelta[0]
1623 delta = cachedelta[1]
1623 delta = cachedelta[1]
1624 # special case deltas which replace entire base; no need to decode
1624 # special case deltas which replace entire base; no need to decode
1625 # base revision. this neatly avoids censored bases, which throw when
1625 # base revision. this neatly avoids censored bases, which throw when
1626 # they're decoded.
1626 # they're decoded.
1627 hlen = struct.calcsize(">lll")
1627 hlen = struct.calcsize(">lll")
1628 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1628 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1629 len(delta) - hlen):
1629 len(delta) - hlen):
1630 btext[0] = delta[hlen:]
1630 btext[0] = delta[hlen:]
1631 else:
1631 else:
1632 if self._inline:
1632 if self._inline:
1633 fh = ifh
1633 fh = ifh
1634 else:
1634 else:
1635 fh = dfh
1635 fh = dfh
1636 basetext = self.revision(baserev, _df=fh, raw=True)
1636 basetext = self.revision(baserev, _df=fh, raw=True)
1637 btext[0] = mdiff.patch(basetext, delta)
1637 btext[0] = mdiff.patch(basetext, delta)
1638
1638
1639 try:
1639 try:
1640 res = self._processflags(btext[0], flags, 'read', raw=True)
1640 res = self._processflags(btext[0], flags, 'read', raw=True)
1641 btext[0], validatehash = res
1641 btext[0], validatehash = res
1642 if validatehash:
1642 if validatehash:
1643 self.checkhash(btext[0], node, p1=p1, p2=p2)
1643 self.checkhash(btext[0], node, p1=p1, p2=p2)
1644 if flags & REVIDX_ISCENSORED:
1644 if flags & REVIDX_ISCENSORED:
1645 raise RevlogError(_('node %s is not censored') % node)
1645 raise RevlogError(_('node %s is not censored') % node)
1646 except CensoredNodeError:
1646 except CensoredNodeError:
1647 # must pass the censored index flag to add censored revisions
1647 # must pass the censored index flag to add censored revisions
1648 if not flags & REVIDX_ISCENSORED:
1648 if not flags & REVIDX_ISCENSORED:
1649 raise
1649 raise
1650 return btext[0]
1650 return btext[0]
1651
1651
1652 def builddelta(rev):
1652 def builddelta(rev):
1653 # can we use the cached delta?
1653 # can we use the cached delta?
1654 if cachedelta and cachedelta[0] == rev:
1654 if cachedelta and cachedelta[0] == rev:
1655 delta = cachedelta[1]
1655 delta = cachedelta[1]
1656 else:
1656 else:
1657 t = buildtext()
1657 t = buildtext()
1658 if self.iscensored(rev):
1658 if self.iscensored(rev):
1659 # deltas based on a censored revision must replace the
1659 # deltas based on a censored revision must replace the
1660 # full content in one patch, so delta works everywhere
1660 # full content in one patch, so delta works everywhere
1661 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1661 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1662 delta = header + t
1662 delta = header + t
1663 else:
1663 else:
1664 if self._inline:
1664 if self._inline:
1665 fh = ifh
1665 fh = ifh
1666 else:
1666 else:
1667 fh = dfh
1667 fh = dfh
1668 ptext = self.revision(rev, _df=fh, raw=True)
1668 ptext = self.revision(rev, _df=fh, raw=True)
1669 delta = mdiff.textdiff(ptext, t)
1669 delta = mdiff.textdiff(ptext, t)
1670 header, data = self.compress(delta)
1670 header, data = self.compress(delta)
1671 deltalen = len(header) + len(data)
1671 deltalen = len(header) + len(data)
1672 chainbase = self.chainbase(rev)
1672 chainbase = self.chainbase(rev)
1673 dist = deltalen + offset - self.start(chainbase)
1673 dist = deltalen + offset - self.start(chainbase)
1674 if self._generaldelta:
1674 if self._generaldelta:
1675 base = rev
1675 base = rev
1676 else:
1676 else:
1677 base = chainbase
1677 base = chainbase
1678 chainlen, compresseddeltalen = self._chaininfo(rev)
1678 chainlen, compresseddeltalen = self._chaininfo(rev)
1679 chainlen += 1
1679 chainlen += 1
1680 compresseddeltalen += deltalen
1680 compresseddeltalen += deltalen
1681 return (dist, deltalen, (header, data), base,
1681 return (dist, deltalen, (header, data), base,
1682 chainbase, chainlen, compresseddeltalen)
1682 chainbase, chainlen, compresseddeltalen)
1683
1683
1684 curr = len(self)
1684 curr = len(self)
1685 prev = curr - 1
1685 prev = curr - 1
1686 offset = self.end(prev)
1686 offset = self.end(prev)
1687 delta = None
1687 delta = None
1688 p1r, p2r = self.rev(p1), self.rev(p2)
1688 p1r, p2r = self.rev(p1), self.rev(p2)
1689
1689
1690 # full versions are inserted when the needed deltas
1690 # full versions are inserted when the needed deltas
1691 # become comparable to the uncompressed text
1691 # become comparable to the uncompressed text
1692 if rawtext is None:
1692 if rawtext is None:
1693 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1693 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1694 cachedelta[1])
1694 cachedelta[1])
1695 else:
1695 else:
1696 textlen = len(rawtext)
1696 textlen = len(rawtext)
1697
1697
1698 # should we try to build a delta?
1698 # should we try to build a delta?
1699 if prev != nullrev and self.storedeltachains:
1699 if prev != nullrev and self.storedeltachains:
1700 tested = set()
1700 tested = set()
1701 # This condition is true most of the time when processing
1701 # This condition is true most of the time when processing
1702 # changegroup data into a generaldelta repo. The only time it
1702 # changegroup data into a generaldelta repo. The only time it
1703 # isn't true is if this is the first revision in a delta chain
1703 # isn't true is if this is the first revision in a delta chain
1704 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1704 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1705 if cachedelta and self._generaldelta and self._lazydeltabase:
1705 if cachedelta and self._generaldelta and self._lazydeltabase:
1706 # Assume what we received from the server is a good choice
1706 # Assume what we received from the server is a good choice
1707 # build delta will reuse the cache
1707 # build delta will reuse the cache
1708 candidatedelta = builddelta(cachedelta[0])
1708 candidatedelta = builddelta(cachedelta[0])
1709 tested.add(cachedelta[0])
1709 tested.add(cachedelta[0])
1710 if self._isgooddelta(candidatedelta, textlen):
1710 if self._isgooddelta(candidatedelta, textlen):
1711 delta = candidatedelta
1711 delta = candidatedelta
1712 if delta is None and self._generaldelta:
1712 if delta is None and self._generaldelta:
1713 # exclude already lazy tested base if any
1713 # exclude already lazy tested base if any
1714 parents = [p for p in (p1r, p2r)
1714 parents = [p for p in (p1r, p2r)
1715 if p != nullrev and p not in tested]
1715 if p != nullrev and p not in tested]
1716 if parents and not self._aggressivemergedeltas:
1716 if parents and not self._aggressivemergedeltas:
1717 # Pick whichever parent is closer to us (to minimize the
1717 # Pick whichever parent is closer to us (to minimize the
1718 # chance of having to build a fulltext).
1718 # chance of having to build a fulltext).
1719 parents = [max(parents)]
1719 parents = [max(parents)]
1720 tested.update(parents)
1720 tested.update(parents)
1721 pdeltas = []
1721 pdeltas = []
1722 for p in parents:
1722 for p in parents:
1723 pd = builddelta(p)
1723 pd = builddelta(p)
1724 if self._isgooddelta(pd, textlen):
1724 if self._isgooddelta(pd, textlen):
1725 pdeltas.append(pd)
1725 pdeltas.append(pd)
1726 if pdeltas:
1726 if pdeltas:
1727 delta = min(pdeltas, key=lambda x: x[1])
1727 delta = min(pdeltas, key=lambda x: x[1])
1728 if delta is None and prev not in tested:
1728 if delta is None and prev not in tested:
1729 # other approach failed try against prev to hopefully save us a
1729 # other approach failed try against prev to hopefully save us a
1730 # fulltext.
1730 # fulltext.
1731 candidatedelta = builddelta(prev)
1731 candidatedelta = builddelta(prev)
1732 if self._isgooddelta(candidatedelta, textlen):
1732 if self._isgooddelta(candidatedelta, textlen):
1733 delta = candidatedelta
1733 delta = candidatedelta
1734 if delta is not None:
1734 if delta is not None:
1735 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1735 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1736 else:
1736 else:
1737 rawtext = buildtext()
1737 rawtext = buildtext()
1738 data = self.compress(rawtext)
1738 data = self.compress(rawtext)
1739 l = len(data[1]) + len(data[0])
1739 l = len(data[1]) + len(data[0])
1740 base = chainbase = curr
1740 base = chainbase = curr
1741
1741
1742 e = (offset_type(offset, flags), l, textlen,
1742 e = (offset_type(offset, flags), l, textlen,
1743 base, link, p1r, p2r, node)
1743 base, link, p1r, p2r, node)
1744 self.index.insert(-1, e)
1744 self.index.insert(-1, e)
1745 self.nodemap[node] = curr
1745 self.nodemap[node] = curr
1746
1746
1747 entry = self._io.packentry(e, self.node, self.version, curr)
1747 entry = self._io.packentry(e, self.node, self.version, curr)
1748 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1748 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1749
1749
1750 if alwayscache and rawtext is None:
1750 if alwayscache and rawtext is None:
1751 rawtext = buildtext()
1751 rawtext = buildtext()
1752
1752
1753 if type(rawtext) == str: # only accept immutable objects
1753 if type(rawtext) == str: # only accept immutable objects
1754 self._cache = (node, curr, rawtext)
1754 self._cache = (node, curr, rawtext)
1755 self._chainbasecache[curr] = chainbase
1755 self._chainbasecache[curr] = chainbase
1756 return node
1756 return node
1757
1757
1758 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1758 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1759 # Files opened in a+ mode have inconsistent behavior on various
1759 # Files opened in a+ mode have inconsistent behavior on various
1760 # platforms. Windows requires that a file positioning call be made
1760 # platforms. Windows requires that a file positioning call be made
1761 # when the file handle transitions between reads and writes. See
1761 # when the file handle transitions between reads and writes. See
1762 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1762 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1763 # platforms, Python or the platform itself can be buggy. Some versions
1763 # platforms, Python or the platform itself can be buggy. Some versions
1764 # of Solaris have been observed to not append at the end of the file
1764 # of Solaris have been observed to not append at the end of the file
1765 # if the file was seeked to before the end. See issue4943 for more.
1765 # if the file was seeked to before the end. See issue4943 for more.
1766 #
1766 #
1767 # We work around this issue by inserting a seek() before writing.
1767 # We work around this issue by inserting a seek() before writing.
1768 # Note: This is likely not necessary on Python 3.
1768 # Note: This is likely not necessary on Python 3.
1769 ifh.seek(0, os.SEEK_END)
1769 ifh.seek(0, os.SEEK_END)
1770 if dfh:
1770 if dfh:
1771 dfh.seek(0, os.SEEK_END)
1771 dfh.seek(0, os.SEEK_END)
1772
1772
1773 curr = len(self) - 1
1773 curr = len(self) - 1
1774 if not self._inline:
1774 if not self._inline:
1775 transaction.add(self.datafile, offset)
1775 transaction.add(self.datafile, offset)
1776 transaction.add(self.indexfile, curr * len(entry))
1776 transaction.add(self.indexfile, curr * len(entry))
1777 if data[0]:
1777 if data[0]:
1778 dfh.write(data[0])
1778 dfh.write(data[0])
1779 dfh.write(data[1])
1779 dfh.write(data[1])
1780 ifh.write(entry)
1780 ifh.write(entry)
1781 else:
1781 else:
1782 offset += curr * self._io.size
1782 offset += curr * self._io.size
1783 transaction.add(self.indexfile, offset, curr)
1783 transaction.add(self.indexfile, offset, curr)
1784 ifh.write(entry)
1784 ifh.write(entry)
1785 ifh.write(data[0])
1785 ifh.write(data[0])
1786 ifh.write(data[1])
1786 ifh.write(data[1])
1787 self.checkinlinesize(transaction, ifh)
1787 self.checkinlinesize(transaction, ifh)
1788
1788
1789 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1789 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1790 """
1790 """
1791 add a delta group
1791 add a delta group
1792
1792
1793 given a set of deltas, add them to the revision log. the
1793 given a set of deltas, add them to the revision log. the
1794 first delta is against its parent, which should be in our
1794 first delta is against its parent, which should be in our
1795 log, the rest are against the previous delta.
1795 log, the rest are against the previous delta.
1796
1796
1797 If ``addrevisioncb`` is defined, it will be called with arguments of
1797 If ``addrevisioncb`` is defined, it will be called with arguments of
1798 this revlog and the node that was added.
1798 this revlog and the node that was added.
1799 """
1799 """
1800
1800
1801 # track the base of the current delta log
1801 # track the base of the current delta log
1802 content = []
1802 content = []
1803 node = None
1803 node = None
1804
1804
1805 r = len(self)
1805 r = len(self)
1806 end = 0
1806 end = 0
1807 if r:
1807 if r:
1808 end = self.end(r - 1)
1808 end = self.end(r - 1)
1809 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1809 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1810 isize = r * self._io.size
1810 isize = r * self._io.size
1811 if self._inline:
1811 if self._inline:
1812 transaction.add(self.indexfile, end + isize, r)
1812 transaction.add(self.indexfile, end + isize, r)
1813 dfh = None
1813 dfh = None
1814 else:
1814 else:
1815 transaction.add(self.indexfile, isize, r)
1815 transaction.add(self.indexfile, isize, r)
1816 transaction.add(self.datafile, end)
1816 transaction.add(self.datafile, end)
1817 dfh = self.opener(self.datafile, "a+")
1817 dfh = self.opener(self.datafile, "a+")
1818 def flush():
1818 def flush():
1819 if dfh:
1819 if dfh:
1820 dfh.flush()
1820 dfh.flush()
1821 ifh.flush()
1821 ifh.flush()
1822 try:
1822 try:
1823 # loop through our set of deltas
1823 # loop through our set of deltas
1824 chain = None
1824 chain = None
1825 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1825 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1826 node = chunkdata['node']
1826 node = chunkdata['node']
1827 p1 = chunkdata['p1']
1827 p1 = chunkdata['p1']
1828 p2 = chunkdata['p2']
1828 p2 = chunkdata['p2']
1829 cs = chunkdata['cs']
1829 cs = chunkdata['cs']
1830 deltabase = chunkdata['deltabase']
1830 deltabase = chunkdata['deltabase']
1831 delta = chunkdata['delta']
1831 delta = chunkdata['delta']
1832 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1832 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1833
1833
1834 content.append(node)
1834 content.append(node)
1835
1835
1836 link = linkmapper(cs)
1836 link = linkmapper(cs)
1837 if node in self.nodemap:
1837 if node in self.nodemap:
1838 # this can happen if two branches make the same change
1838 # this can happen if two branches make the same change
1839 chain = node
1839 chain = node
1840 continue
1840 continue
1841
1841
1842 for p in (p1, p2):
1842 for p in (p1, p2):
1843 if p not in self.nodemap:
1843 if p not in self.nodemap:
1844 raise LookupError(p, self.indexfile,
1844 raise LookupError(p, self.indexfile,
1845 _('unknown parent'))
1845 _('unknown parent'))
1846
1846
1847 if deltabase not in self.nodemap:
1847 if deltabase not in self.nodemap:
1848 raise LookupError(deltabase, self.indexfile,
1848 raise LookupError(deltabase, self.indexfile,
1849 _('unknown delta base'))
1849 _('unknown delta base'))
1850
1850
1851 baserev = self.rev(deltabase)
1851 baserev = self.rev(deltabase)
1852
1852
1853 if baserev != nullrev and self.iscensored(baserev):
1853 if baserev != nullrev and self.iscensored(baserev):
1854 # if base is censored, delta must be full replacement in a
1854 # if base is censored, delta must be full replacement in a
1855 # single patch operation
1855 # single patch operation
1856 hlen = struct.calcsize(">lll")
1856 hlen = struct.calcsize(">lll")
1857 oldlen = self.rawsize(baserev)
1857 oldlen = self.rawsize(baserev)
1858 newlen = len(delta) - hlen
1858 newlen = len(delta) - hlen
1859 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1859 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1860 raise error.CensoredBaseError(self.indexfile,
1860 raise error.CensoredBaseError(self.indexfile,
1861 self.node(baserev))
1861 self.node(baserev))
1862
1862
1863 if not flags and self._peek_iscensored(baserev, delta, flush):
1863 if not flags and self._peek_iscensored(baserev, delta, flush):
1864 flags |= REVIDX_ISCENSORED
1864 flags |= REVIDX_ISCENSORED
1865
1865
1866 # We assume consumers of addrevisioncb will want to retrieve
1866 # We assume consumers of addrevisioncb will want to retrieve
1867 # the added revision, which will require a call to
1867 # the added revision, which will require a call to
1868 # revision(). revision() will fast path if there is a cache
1868 # revision(). revision() will fast path if there is a cache
1869 # hit. So, we tell _addrevision() to always cache in this case.
1869 # hit. So, we tell _addrevision() to always cache in this case.
1870 # We're only using addgroup() in the context of changegroup
1870 # We're only using addgroup() in the context of changegroup
1871 # generation so the revision data can always be handled as raw
1871 # generation so the revision data can always be handled as raw
1872 # by the flagprocessor.
1872 # by the flagprocessor.
1873 chain = self._addrevision(node, None, transaction, link,
1873 chain = self._addrevision(node, None, transaction, link,
1874 p1, p2, flags, (baserev, delta),
1874 p1, p2, flags, (baserev, delta),
1875 ifh, dfh,
1875 ifh, dfh,
1876 alwayscache=bool(addrevisioncb))
1876 alwayscache=bool(addrevisioncb))
1877
1877
1878 if addrevisioncb:
1878 if addrevisioncb:
1879 addrevisioncb(self, chain)
1879 addrevisioncb(self, chain)
1880
1880
1881 if not dfh and not self._inline:
1881 if not dfh and not self._inline:
1882 # addrevision switched from inline to conventional
1882 # addrevision switched from inline to conventional
1883 # reopen the index
1883 # reopen the index
1884 ifh.close()
1884 ifh.close()
1885 dfh = self.opener(self.datafile, "a+")
1885 dfh = self.opener(self.datafile, "a+")
1886 ifh = self.opener(self.indexfile, "a+",
1886 ifh = self.opener(self.indexfile, "a+",
1887 checkambig=self._checkambig)
1887 checkambig=self._checkambig)
1888 finally:
1888 finally:
1889 if dfh:
1889 if dfh:
1890 dfh.close()
1890 dfh.close()
1891 ifh.close()
1891 ifh.close()
1892
1892
1893 return content
1893 return content
1894
1894
1895 def iscensored(self, rev):
1895 def iscensored(self, rev):
1896 """Check if a file revision is censored."""
1896 """Check if a file revision is censored."""
1897 return False
1897 return False
1898
1898
1899 def _peek_iscensored(self, baserev, delta, flush):
1899 def _peek_iscensored(self, baserev, delta, flush):
1900 """Quickly check if a delta produces a censored revision."""
1900 """Quickly check if a delta produces a censored revision."""
1901 return False
1901 return False
1902
1902
1903 def getstrippoint(self, minlink):
1903 def getstrippoint(self, minlink):
1904 """find the minimum rev that must be stripped to strip the linkrev
1904 """find the minimum rev that must be stripped to strip the linkrev
1905
1905
1906 Returns a tuple containing the minimum rev and a set of all revs that
1906 Returns a tuple containing the minimum rev and a set of all revs that
1907 have linkrevs that will be broken by this strip.
1907 have linkrevs that will be broken by this strip.
1908 """
1908 """
1909 brokenrevs = set()
1909 brokenrevs = set()
1910 strippoint = len(self)
1910 strippoint = len(self)
1911
1911
1912 heads = {}
1912 heads = {}
1913 futurelargelinkrevs = set()
1913 futurelargelinkrevs = set()
1914 for head in self.headrevs():
1914 for head in self.headrevs():
1915 headlinkrev = self.linkrev(head)
1915 headlinkrev = self.linkrev(head)
1916 heads[head] = headlinkrev
1916 heads[head] = headlinkrev
1917 if headlinkrev >= minlink:
1917 if headlinkrev >= minlink:
1918 futurelargelinkrevs.add(headlinkrev)
1918 futurelargelinkrevs.add(headlinkrev)
1919
1919
1920 # This algorithm involves walking down the rev graph, starting at the
1920 # This algorithm involves walking down the rev graph, starting at the
1921 # heads. Since the revs are topologically sorted according to linkrev,
1921 # heads. Since the revs are topologically sorted according to linkrev,
1922 # once all head linkrevs are below the minlink, we know there are
1922 # once all head linkrevs are below the minlink, we know there are
1923 # no more revs that could have a linkrev greater than minlink.
1923 # no more revs that could have a linkrev greater than minlink.
1924 # So we can stop walking.
1924 # So we can stop walking.
1925 while futurelargelinkrevs:
1925 while futurelargelinkrevs:
1926 strippoint -= 1
1926 strippoint -= 1
1927 linkrev = heads.pop(strippoint)
1927 linkrev = heads.pop(strippoint)
1928
1928
1929 if linkrev < minlink:
1929 if linkrev < minlink:
1930 brokenrevs.add(strippoint)
1930 brokenrevs.add(strippoint)
1931 else:
1931 else:
1932 futurelargelinkrevs.remove(linkrev)
1932 futurelargelinkrevs.remove(linkrev)
1933
1933
1934 for p in self.parentrevs(strippoint):
1934 for p in self.parentrevs(strippoint):
1935 if p != nullrev:
1935 if p != nullrev:
1936 plinkrev = self.linkrev(p)
1936 plinkrev = self.linkrev(p)
1937 heads[p] = plinkrev
1937 heads[p] = plinkrev
1938 if plinkrev >= minlink:
1938 if plinkrev >= minlink:
1939 futurelargelinkrevs.add(plinkrev)
1939 futurelargelinkrevs.add(plinkrev)
1940
1940
1941 return strippoint, brokenrevs
1941 return strippoint, brokenrevs
1942
1942
1943 def strip(self, minlink, transaction):
1943 def strip(self, minlink, transaction):
1944 """truncate the revlog on the first revision with a linkrev >= minlink
1944 """truncate the revlog on the first revision with a linkrev >= minlink
1945
1945
1946 This function is called when we're stripping revision minlink and
1946 This function is called when we're stripping revision minlink and
1947 its descendants from the repository.
1947 its descendants from the repository.
1948
1948
1949 We have to remove all revisions with linkrev >= minlink, because
1949 We have to remove all revisions with linkrev >= minlink, because
1950 the equivalent changelog revisions will be renumbered after the
1950 the equivalent changelog revisions will be renumbered after the
1951 strip.
1951 strip.
1952
1952
1953 So we truncate the revlog on the first of these revisions, and
1953 So we truncate the revlog on the first of these revisions, and
1954 trust that the caller has saved the revisions that shouldn't be
1954 trust that the caller has saved the revisions that shouldn't be
1955 removed and that it'll re-add them after this truncation.
1955 removed and that it'll re-add them after this truncation.
1956 """
1956 """
1957 if len(self) == 0:
1957 if len(self) == 0:
1958 return
1958 return
1959
1959
1960 rev, _ = self.getstrippoint(minlink)
1960 rev, _ = self.getstrippoint(minlink)
1961 if rev == len(self):
1961 if rev == len(self):
1962 return
1962 return
1963
1963
1964 # first truncate the files on disk
1964 # first truncate the files on disk
1965 end = self.start(rev)
1965 end = self.start(rev)
1966 if not self._inline:
1966 if not self._inline:
1967 transaction.add(self.datafile, end)
1967 transaction.add(self.datafile, end)
1968 end = rev * self._io.size
1968 end = rev * self._io.size
1969 else:
1969 else:
1970 end += rev * self._io.size
1970 end += rev * self._io.size
1971
1971
1972 transaction.add(self.indexfile, end)
1972 transaction.add(self.indexfile, end)
1973
1973
1974 # then reset internal state in memory to forget those revisions
1974 # then reset internal state in memory to forget those revisions
1975 self._cache = None
1975 self._cache = None
1976 self._chaininfocache = {}
1976 self._chaininfocache = {}
1977 self._chunkclear()
1977 self._chunkclear()
1978 for x in xrange(rev, len(self)):
1978 for x in xrange(rev, len(self)):
1979 del self.nodemap[self.node(x)]
1979 del self.nodemap[self.node(x)]
1980
1980
1981 del self.index[rev:-1]
1981 del self.index[rev:-1]
1982
1982
1983 def checksize(self):
1983 def checksize(self):
1984 expected = 0
1984 expected = 0
1985 if len(self):
1985 if len(self):
1986 expected = max(0, self.end(len(self) - 1))
1986 expected = max(0, self.end(len(self) - 1))
1987
1987
1988 try:
1988 try:
1989 f = self.opener(self.datafile)
1989 f = self.opener(self.datafile)
1990 f.seek(0, 2)
1990 f.seek(0, 2)
1991 actual = f.tell()
1991 actual = f.tell()
1992 f.close()
1992 f.close()
1993 dd = actual - expected
1993 dd = actual - expected
1994 except IOError as inst:
1994 except IOError as inst:
1995 if inst.errno != errno.ENOENT:
1995 if inst.errno != errno.ENOENT:
1996 raise
1996 raise
1997 dd = 0
1997 dd = 0
1998
1998
1999 try:
1999 try:
2000 f = self.opener(self.indexfile)
2000 f = self.opener(self.indexfile)
2001 f.seek(0, 2)
2001 f.seek(0, 2)
2002 actual = f.tell()
2002 actual = f.tell()
2003 f.close()
2003 f.close()
2004 s = self._io.size
2004 s = self._io.size
2005 i = max(0, actual // s)
2005 i = max(0, actual // s)
2006 di = actual - (i * s)
2006 di = actual - (i * s)
2007 if self._inline:
2007 if self._inline:
2008 databytes = 0
2008 databytes = 0
2009 for r in self:
2009 for r in self:
2010 databytes += max(0, self.length(r))
2010 databytes += max(0, self.length(r))
2011 dd = 0
2011 dd = 0
2012 di = actual - len(self) * s - databytes
2012 di = actual - len(self) * s - databytes
2013 except IOError as inst:
2013 except IOError as inst:
2014 if inst.errno != errno.ENOENT:
2014 if inst.errno != errno.ENOENT:
2015 raise
2015 raise
2016 di = 0
2016 di = 0
2017
2017
2018 return (dd, di)
2018 return (dd, di)
2019
2019
2020 def files(self):
2020 def files(self):
2021 res = [self.indexfile]
2021 res = [self.indexfile]
2022 if not self._inline:
2022 if not self._inline:
2023 res.append(self.datafile)
2023 res.append(self.datafile)
2024 return res
2024 return res
2025
2025
2026 DELTAREUSEALWAYS = 'always'
2026 DELTAREUSEALWAYS = 'always'
2027 DELTAREUSESAMEREVS = 'samerevs'
2027 DELTAREUSESAMEREVS = 'samerevs'
2028 DELTAREUSENEVER = 'never'
2028 DELTAREUSENEVER = 'never'
2029
2029
2030 DELTAREUSEALL = set(['always', 'samerevs', 'never'])
2030 DELTAREUSEALL = set(['always', 'samerevs', 'never'])
2031
2031
2032 def clone(self, tr, destrevlog, addrevisioncb=None,
2032 def clone(self, tr, destrevlog, addrevisioncb=None,
2033 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2033 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2034 """Copy this revlog to another, possibly with format changes.
2034 """Copy this revlog to another, possibly with format changes.
2035
2035
2036 The destination revlog will contain the same revisions and nodes.
2036 The destination revlog will contain the same revisions and nodes.
2037 However, it may not be bit-for-bit identical due to e.g. delta encoding
2037 However, it may not be bit-for-bit identical due to e.g. delta encoding
2038 differences.
2038 differences.
2039
2039
2040 The ``deltareuse`` argument control how deltas from the existing revlog
2040 The ``deltareuse`` argument control how deltas from the existing revlog
2041 are preserved in the destination revlog. The argument can have the
2041 are preserved in the destination revlog. The argument can have the
2042 following values:
2042 following values:
2043
2043
2044 DELTAREUSEALWAYS
2044 DELTAREUSEALWAYS
2045 Deltas will always be reused (if possible), even if the destination
2045 Deltas will always be reused (if possible), even if the destination
2046 revlog would not select the same revisions for the delta. This is the
2046 revlog would not select the same revisions for the delta. This is the
2047 fastest mode of operation.
2047 fastest mode of operation.
2048 DELTAREUSESAMEREVS
2048 DELTAREUSESAMEREVS
2049 Deltas will be reused if the destination revlog would pick the same
2049 Deltas will be reused if the destination revlog would pick the same
2050 revisions for the delta. This mode strikes a balance between speed
2050 revisions for the delta. This mode strikes a balance between speed
2051 and optimization.
2051 and optimization.
2052 DELTAREUSENEVER
2052 DELTAREUSENEVER
2053 Deltas will never be reused. This is the slowest mode of execution.
2053 Deltas will never be reused. This is the slowest mode of execution.
2054 This mode can be used to recompute deltas (e.g. if the diff/delta
2054 This mode can be used to recompute deltas (e.g. if the diff/delta
2055 algorithm changes).
2055 algorithm changes).
2056
2056
2057 Delta computation can be slow, so the choice of delta reuse policy can
2057 Delta computation can be slow, so the choice of delta reuse policy can
2058 significantly affect run time.
2058 significantly affect run time.
2059
2059
2060 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2060 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2061 two extremes. Deltas will be reused if they are appropriate. But if the
2061 two extremes. Deltas will be reused if they are appropriate. But if the
2062 delta could choose a better revision, it will do so. This means if you
2062 delta could choose a better revision, it will do so. This means if you
2063 are converting a non-generaldelta revlog to a generaldelta revlog,
2063 are converting a non-generaldelta revlog to a generaldelta revlog,
2064 deltas will be recomputed if the delta's parent isn't a parent of the
2064 deltas will be recomputed if the delta's parent isn't a parent of the
2065 revision.
2065 revision.
2066
2066
2067 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2067 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2068 controls whether to compute deltas against both parents for merges.
2068 controls whether to compute deltas against both parents for merges.
2069 By default, the current default is used.
2069 By default, the current default is used.
2070 """
2070 """
2071 if deltareuse not in self.DELTAREUSEALL:
2071 if deltareuse not in self.DELTAREUSEALL:
2072 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2072 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2073
2073
2074 if len(destrevlog):
2074 if len(destrevlog):
2075 raise ValueError(_('destination revlog is not empty'))
2075 raise ValueError(_('destination revlog is not empty'))
2076
2076
2077 if getattr(self, 'filteredrevs', None):
2077 if getattr(self, 'filteredrevs', None):
2078 raise ValueError(_('source revlog has filtered revisions'))
2078 raise ValueError(_('source revlog has filtered revisions'))
2079 if getattr(destrevlog, 'filteredrevs', None):
2079 if getattr(destrevlog, 'filteredrevs', None):
2080 raise ValueError(_('destination revlog has filtered revisions'))
2080 raise ValueError(_('destination revlog has filtered revisions'))
2081
2081
2082 # lazydeltabase controls whether to reuse a cached delta, if possible.
2082 # lazydeltabase controls whether to reuse a cached delta, if possible.
2083 oldlazydeltabase = destrevlog._lazydeltabase
2083 oldlazydeltabase = destrevlog._lazydeltabase
2084 oldamd = destrevlog._aggressivemergedeltas
2084 oldamd = destrevlog._aggressivemergedeltas
2085
2085
2086 try:
2086 try:
2087 if deltareuse == self.DELTAREUSEALWAYS:
2087 if deltareuse == self.DELTAREUSEALWAYS:
2088 destrevlog._lazydeltabase = True
2088 destrevlog._lazydeltabase = True
2089 elif deltareuse == self.DELTAREUSESAMEREVS:
2089 elif deltareuse == self.DELTAREUSESAMEREVS:
2090 destrevlog._lazydeltabase = False
2090 destrevlog._lazydeltabase = False
2091
2091
2092 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2092 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2093
2093
2094 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2094 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2095 self.DELTAREUSESAMEREVS)
2095 self.DELTAREUSESAMEREVS)
2096
2096
2097 index = self.index
2097 index = self.index
2098 for rev in self:
2098 for rev in self:
2099 entry = index[rev]
2099 entry = index[rev]
2100
2100
2101 # Some classes override linkrev to take filtered revs into
2101 # Some classes override linkrev to take filtered revs into
2102 # account. Use raw entry from index.
2102 # account. Use raw entry from index.
2103 flags = entry[0] & 0xffff
2103 flags = entry[0] & 0xffff
2104 linkrev = entry[4]
2104 linkrev = entry[4]
2105 p1 = index[entry[5]][7]
2105 p1 = index[entry[5]][7]
2106 p2 = index[entry[6]][7]
2106 p2 = index[entry[6]][7]
2107 node = entry[7]
2107 node = entry[7]
2108
2108
2109 # (Possibly) reuse the delta from the revlog if allowed and
2109 # (Possibly) reuse the delta from the revlog if allowed and
2110 # the revlog chunk is a delta.
2110 # the revlog chunk is a delta.
2111 cachedelta = None
2111 cachedelta = None
2112 rawtext = None
2112 rawtext = None
2113 if populatecachedelta:
2113 if populatecachedelta:
2114 dp = self.deltaparent(rev)
2114 dp = self.deltaparent(rev)
2115 if dp != nullrev:
2115 if dp != nullrev:
2116 cachedelta = (dp, str(self._chunk(rev)))
2116 cachedelta = (dp, str(self._chunk(rev)))
2117
2117
2118 if not cachedelta:
2118 if not cachedelta:
2119 rawtext = self.revision(rev, raw=True)
2119 rawtext = self.revision(rev, raw=True)
2120
2120
2121 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2121 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2122 checkambig=False)
2122 checkambig=False)
2123 dfh = None
2123 dfh = None
2124 if not destrevlog._inline:
2124 if not destrevlog._inline:
2125 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2125 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2126 try:
2126 try:
2127 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2127 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2128 flags, cachedelta, ifh, dfh)
2128 flags, cachedelta, ifh, dfh)
2129 finally:
2129 finally:
2130 if dfh:
2130 if dfh:
2131 dfh.close()
2131 dfh.close()
2132 ifh.close()
2132 ifh.close()
2133
2133
2134 if addrevisioncb:
2134 if addrevisioncb:
2135 addrevisioncb(self, rev, node)
2135 addrevisioncb(self, rev, node)
2136 finally:
2136 finally:
2137 destrevlog._lazydeltabase = oldlazydeltabase
2137 destrevlog._lazydeltabase = oldlazydeltabase
2138 destrevlog._aggressivemergedeltas = oldamd
2138 destrevlog._aggressivemergedeltas = oldamd
General Comments 0
You need to be logged in to leave comments. Login now