##// END OF EJS Templates
revlog: move decompress() from module to revlog class (API)...
Gregory Szorc -
r30793:b6f455a6 default
parent child Browse files
Show More
@@ -1,1241 +1,1240 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import os
23 import os
24 import random
24 import random
25 import sys
25 import sys
26 import time
26 import time
27 from mercurial import (
27 from mercurial import (
28 bdiff,
28 bdiff,
29 changegroup,
29 changegroup,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 error,
33 error,
34 extensions,
34 extensions,
35 mdiff,
35 mdiff,
36 merge,
36 merge,
37 revlog,
38 util,
37 util,
39 )
38 )
40
39
41 # for "historical portability":
40 # for "historical portability":
42 # try to import modules separately (in dict order), and ignore
41 # try to import modules separately (in dict order), and ignore
43 # failure, because these aren't available with early Mercurial
42 # failure, because these aren't available with early Mercurial
44 try:
43 try:
45 from mercurial import branchmap # since 2.5 (or bcee63733aad)
44 from mercurial import branchmap # since 2.5 (or bcee63733aad)
46 except ImportError:
45 except ImportError:
47 pass
46 pass
48 try:
47 try:
49 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
48 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
50 except ImportError:
49 except ImportError:
51 pass
50 pass
52 try:
51 try:
53 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
52 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
54 except ImportError:
53 except ImportError:
55 pass
54 pass
56 try:
55 try:
57 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
56 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
58 except ImportError:
57 except ImportError:
59 pass
58 pass
60
59
61 # for "historical portability":
60 # for "historical portability":
62 # define util.safehasattr forcibly, because util.safehasattr has been
61 # define util.safehasattr forcibly, because util.safehasattr has been
63 # available since 1.9.3 (or 94b200a11cf7)
62 # available since 1.9.3 (or 94b200a11cf7)
64 _undefined = object()
63 _undefined = object()
65 def safehasattr(thing, attr):
64 def safehasattr(thing, attr):
66 return getattr(thing, attr, _undefined) is not _undefined
65 return getattr(thing, attr, _undefined) is not _undefined
67 setattr(util, 'safehasattr', safehasattr)
66 setattr(util, 'safehasattr', safehasattr)
68
67
69 # for "historical portability":
68 # for "historical portability":
70 # use locally defined empty option list, if formatteropts isn't
69 # use locally defined empty option list, if formatteropts isn't
71 # available, because commands.formatteropts has been available since
70 # available, because commands.formatteropts has been available since
72 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
71 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
73 # available since 2.2 (or ae5f92e154d3)
72 # available since 2.2 (or ae5f92e154d3)
74 formatteropts = getattr(commands, "formatteropts", [])
73 formatteropts = getattr(commands, "formatteropts", [])
75
74
76 # for "historical portability":
75 # for "historical portability":
77 # use locally defined option list, if debugrevlogopts isn't available,
76 # use locally defined option list, if debugrevlogopts isn't available,
78 # because commands.debugrevlogopts has been available since 3.7 (or
77 # because commands.debugrevlogopts has been available since 3.7 (or
79 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
78 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
80 # since 1.9 (or a79fea6b3e77).
79 # since 1.9 (or a79fea6b3e77).
81 revlogopts = getattr(commands, "debugrevlogopts", [
80 revlogopts = getattr(commands, "debugrevlogopts", [
82 ('c', 'changelog', False, ('open changelog')),
81 ('c', 'changelog', False, ('open changelog')),
83 ('m', 'manifest', False, ('open manifest')),
82 ('m', 'manifest', False, ('open manifest')),
84 ('', 'dir', False, ('open directory manifest')),
83 ('', 'dir', False, ('open directory manifest')),
85 ])
84 ])
86
85
87 cmdtable = {}
86 cmdtable = {}
88
87
89 # for "historical portability":
88 # for "historical portability":
90 # define parsealiases locally, because cmdutil.parsealiases has been
89 # define parsealiases locally, because cmdutil.parsealiases has been
91 # available since 1.5 (or 6252852b4332)
90 # available since 1.5 (or 6252852b4332)
92 def parsealiases(cmd):
91 def parsealiases(cmd):
93 return cmd.lstrip("^").split("|")
92 return cmd.lstrip("^").split("|")
94
93
95 if safehasattr(cmdutil, 'command'):
94 if safehasattr(cmdutil, 'command'):
96 import inspect
95 import inspect
97 command = cmdutil.command(cmdtable)
96 command = cmdutil.command(cmdtable)
98 if 'norepo' not in inspect.getargspec(command)[0]:
97 if 'norepo' not in inspect.getargspec(command)[0]:
99 # for "historical portability":
98 # for "historical portability":
100 # wrap original cmdutil.command, because "norepo" option has
99 # wrap original cmdutil.command, because "norepo" option has
101 # been available since 3.1 (or 75a96326cecb)
100 # been available since 3.1 (or 75a96326cecb)
102 _command = command
101 _command = command
103 def command(name, options=(), synopsis=None, norepo=False):
102 def command(name, options=(), synopsis=None, norepo=False):
104 if norepo:
103 if norepo:
105 commands.norepo += ' %s' % ' '.join(parsealiases(name))
104 commands.norepo += ' %s' % ' '.join(parsealiases(name))
106 return _command(name, list(options), synopsis)
105 return _command(name, list(options), synopsis)
107 else:
106 else:
108 # for "historical portability":
107 # for "historical portability":
109 # define "@command" annotation locally, because cmdutil.command
108 # define "@command" annotation locally, because cmdutil.command
110 # has been available since 1.9 (or 2daa5179e73f)
109 # has been available since 1.9 (or 2daa5179e73f)
111 def command(name, options=(), synopsis=None, norepo=False):
110 def command(name, options=(), synopsis=None, norepo=False):
112 def decorator(func):
111 def decorator(func):
113 if synopsis:
112 if synopsis:
114 cmdtable[name] = func, list(options), synopsis
113 cmdtable[name] = func, list(options), synopsis
115 else:
114 else:
116 cmdtable[name] = func, list(options)
115 cmdtable[name] = func, list(options)
117 if norepo:
116 if norepo:
118 commands.norepo += ' %s' % ' '.join(parsealiases(name))
117 commands.norepo += ' %s' % ' '.join(parsealiases(name))
119 return func
118 return func
120 return decorator
119 return decorator
121
120
122 def getlen(ui):
121 def getlen(ui):
123 if ui.configbool("perf", "stub"):
122 if ui.configbool("perf", "stub"):
124 return lambda x: 1
123 return lambda x: 1
125 return len
124 return len
126
125
127 def gettimer(ui, opts=None):
126 def gettimer(ui, opts=None):
128 """return a timer function and formatter: (timer, formatter)
127 """return a timer function and formatter: (timer, formatter)
129
128
130 This function exists to gather the creation of formatter in a single
129 This function exists to gather the creation of formatter in a single
131 place instead of duplicating it in all performance commands."""
130 place instead of duplicating it in all performance commands."""
132
131
133 # enforce an idle period before execution to counteract power management
132 # enforce an idle period before execution to counteract power management
134 # experimental config: perf.presleep
133 # experimental config: perf.presleep
135 time.sleep(getint(ui, "perf", "presleep", 1))
134 time.sleep(getint(ui, "perf", "presleep", 1))
136
135
137 if opts is None:
136 if opts is None:
138 opts = {}
137 opts = {}
139 # redirect all to stderr unless buffer api is in use
138 # redirect all to stderr unless buffer api is in use
140 if not ui._buffers:
139 if not ui._buffers:
141 ui = ui.copy()
140 ui = ui.copy()
142 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
141 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
143 if uifout:
142 if uifout:
144 # for "historical portability":
143 # for "historical portability":
145 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
144 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
146 uifout.set(ui.ferr)
145 uifout.set(ui.ferr)
147
146
148 # get a formatter
147 # get a formatter
149 uiformatter = getattr(ui, 'formatter', None)
148 uiformatter = getattr(ui, 'formatter', None)
150 if uiformatter:
149 if uiformatter:
151 fm = uiformatter('perf', opts)
150 fm = uiformatter('perf', opts)
152 else:
151 else:
153 # for "historical portability":
152 # for "historical portability":
154 # define formatter locally, because ui.formatter has been
153 # define formatter locally, because ui.formatter has been
155 # available since 2.2 (or ae5f92e154d3)
154 # available since 2.2 (or ae5f92e154d3)
156 from mercurial import node
155 from mercurial import node
157 class defaultformatter(object):
156 class defaultformatter(object):
158 """Minimized composition of baseformatter and plainformatter
157 """Minimized composition of baseformatter and plainformatter
159 """
158 """
160 def __init__(self, ui, topic, opts):
159 def __init__(self, ui, topic, opts):
161 self._ui = ui
160 self._ui = ui
162 if ui.debugflag:
161 if ui.debugflag:
163 self.hexfunc = node.hex
162 self.hexfunc = node.hex
164 else:
163 else:
165 self.hexfunc = node.short
164 self.hexfunc = node.short
166 def __nonzero__(self):
165 def __nonzero__(self):
167 return False
166 return False
168 def startitem(self):
167 def startitem(self):
169 pass
168 pass
170 def data(self, **data):
169 def data(self, **data):
171 pass
170 pass
172 def write(self, fields, deftext, *fielddata, **opts):
171 def write(self, fields, deftext, *fielddata, **opts):
173 self._ui.write(deftext % fielddata, **opts)
172 self._ui.write(deftext % fielddata, **opts)
174 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
173 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
175 if cond:
174 if cond:
176 self._ui.write(deftext % fielddata, **opts)
175 self._ui.write(deftext % fielddata, **opts)
177 def plain(self, text, **opts):
176 def plain(self, text, **opts):
178 self._ui.write(text, **opts)
177 self._ui.write(text, **opts)
179 def end(self):
178 def end(self):
180 pass
179 pass
181 fm = defaultformatter(ui, 'perf', opts)
180 fm = defaultformatter(ui, 'perf', opts)
182
181
183 # stub function, runs code only once instead of in a loop
182 # stub function, runs code only once instead of in a loop
184 # experimental config: perf.stub
183 # experimental config: perf.stub
185 if ui.configbool("perf", "stub"):
184 if ui.configbool("perf", "stub"):
186 return functools.partial(stub_timer, fm), fm
185 return functools.partial(stub_timer, fm), fm
187 return functools.partial(_timer, fm), fm
186 return functools.partial(_timer, fm), fm
188
187
189 def stub_timer(fm, func, title=None):
188 def stub_timer(fm, func, title=None):
190 func()
189 func()
191
190
192 def _timer(fm, func, title=None):
191 def _timer(fm, func, title=None):
193 results = []
192 results = []
194 begin = time.time()
193 begin = time.time()
195 count = 0
194 count = 0
196 while True:
195 while True:
197 ostart = os.times()
196 ostart = os.times()
198 cstart = time.time()
197 cstart = time.time()
199 r = func()
198 r = func()
200 cstop = time.time()
199 cstop = time.time()
201 ostop = os.times()
200 ostop = os.times()
202 count += 1
201 count += 1
203 a, b = ostart, ostop
202 a, b = ostart, ostop
204 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
203 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
205 if cstop - begin > 3 and count >= 100:
204 if cstop - begin > 3 and count >= 100:
206 break
205 break
207 if cstop - begin > 10 and count >= 3:
206 if cstop - begin > 10 and count >= 3:
208 break
207 break
209
208
210 fm.startitem()
209 fm.startitem()
211
210
212 if title:
211 if title:
213 fm.write('title', '! %s\n', title)
212 fm.write('title', '! %s\n', title)
214 if r:
213 if r:
215 fm.write('result', '! result: %s\n', r)
214 fm.write('result', '! result: %s\n', r)
216 m = min(results)
215 m = min(results)
217 fm.plain('!')
216 fm.plain('!')
218 fm.write('wall', ' wall %f', m[0])
217 fm.write('wall', ' wall %f', m[0])
219 fm.write('comb', ' comb %f', m[1] + m[2])
218 fm.write('comb', ' comb %f', m[1] + m[2])
220 fm.write('user', ' user %f', m[1])
219 fm.write('user', ' user %f', m[1])
221 fm.write('sys', ' sys %f', m[2])
220 fm.write('sys', ' sys %f', m[2])
222 fm.write('count', ' (best of %d)', count)
221 fm.write('count', ' (best of %d)', count)
223 fm.plain('\n')
222 fm.plain('\n')
224
223
225 # utilities for historical portability
224 # utilities for historical portability
226
225
227 def getint(ui, section, name, default):
226 def getint(ui, section, name, default):
228 # for "historical portability":
227 # for "historical portability":
229 # ui.configint has been available since 1.9 (or fa2b596db182)
228 # ui.configint has been available since 1.9 (or fa2b596db182)
230 v = ui.config(section, name, None)
229 v = ui.config(section, name, None)
231 if v is None:
230 if v is None:
232 return default
231 return default
233 try:
232 try:
234 return int(v)
233 return int(v)
235 except ValueError:
234 except ValueError:
236 raise error.ConfigError(("%s.%s is not an integer ('%s')")
235 raise error.ConfigError(("%s.%s is not an integer ('%s')")
237 % (section, name, v))
236 % (section, name, v))
238
237
239 def safeattrsetter(obj, name, ignoremissing=False):
238 def safeattrsetter(obj, name, ignoremissing=False):
240 """Ensure that 'obj' has 'name' attribute before subsequent setattr
239 """Ensure that 'obj' has 'name' attribute before subsequent setattr
241
240
242 This function is aborted, if 'obj' doesn't have 'name' attribute
241 This function is aborted, if 'obj' doesn't have 'name' attribute
243 at runtime. This avoids overlooking removal of an attribute, which
242 at runtime. This avoids overlooking removal of an attribute, which
244 breaks assumption of performance measurement, in the future.
243 breaks assumption of performance measurement, in the future.
245
244
246 This function returns the object to (1) assign a new value, and
245 This function returns the object to (1) assign a new value, and
247 (2) restore an original value to the attribute.
246 (2) restore an original value to the attribute.
248
247
249 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
248 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
250 abortion, and this function returns None. This is useful to
249 abortion, and this function returns None. This is useful to
251 examine an attribute, which isn't ensured in all Mercurial
250 examine an attribute, which isn't ensured in all Mercurial
252 versions.
251 versions.
253 """
252 """
254 if not util.safehasattr(obj, name):
253 if not util.safehasattr(obj, name):
255 if ignoremissing:
254 if ignoremissing:
256 return None
255 return None
257 raise error.Abort(("missing attribute %s of %s might break assumption"
256 raise error.Abort(("missing attribute %s of %s might break assumption"
258 " of performance measurement") % (name, obj))
257 " of performance measurement") % (name, obj))
259
258
260 origvalue = getattr(obj, name)
259 origvalue = getattr(obj, name)
261 class attrutil(object):
260 class attrutil(object):
262 def set(self, newvalue):
261 def set(self, newvalue):
263 setattr(obj, name, newvalue)
262 setattr(obj, name, newvalue)
264 def restore(self):
263 def restore(self):
265 setattr(obj, name, origvalue)
264 setattr(obj, name, origvalue)
266
265
267 return attrutil()
266 return attrutil()
268
267
269 # utilities to examine each internal API changes
268 # utilities to examine each internal API changes
270
269
271 def getbranchmapsubsettable():
270 def getbranchmapsubsettable():
272 # for "historical portability":
271 # for "historical portability":
273 # subsettable is defined in:
272 # subsettable is defined in:
274 # - branchmap since 2.9 (or 175c6fd8cacc)
273 # - branchmap since 2.9 (or 175c6fd8cacc)
275 # - repoview since 2.5 (or 59a9f18d4587)
274 # - repoview since 2.5 (or 59a9f18d4587)
276 for mod in (branchmap, repoview):
275 for mod in (branchmap, repoview):
277 subsettable = getattr(mod, 'subsettable', None)
276 subsettable = getattr(mod, 'subsettable', None)
278 if subsettable:
277 if subsettable:
279 return subsettable
278 return subsettable
280
279
281 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
280 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
282 # branchmap and repoview modules exist, but subsettable attribute
281 # branchmap and repoview modules exist, but subsettable attribute
283 # doesn't)
282 # doesn't)
284 raise error.Abort(("perfbranchmap not available with this Mercurial"),
283 raise error.Abort(("perfbranchmap not available with this Mercurial"),
285 hint="use 2.5 or later")
284 hint="use 2.5 or later")
286
285
287 def getsvfs(repo):
286 def getsvfs(repo):
288 """Return appropriate object to access files under .hg/store
287 """Return appropriate object to access files under .hg/store
289 """
288 """
290 # for "historical portability":
289 # for "historical portability":
291 # repo.svfs has been available since 2.3 (or 7034365089bf)
290 # repo.svfs has been available since 2.3 (or 7034365089bf)
292 svfs = getattr(repo, 'svfs', None)
291 svfs = getattr(repo, 'svfs', None)
293 if svfs:
292 if svfs:
294 return svfs
293 return svfs
295 else:
294 else:
296 return getattr(repo, 'sopener')
295 return getattr(repo, 'sopener')
297
296
298 def getvfs(repo):
297 def getvfs(repo):
299 """Return appropriate object to access files under .hg
298 """Return appropriate object to access files under .hg
300 """
299 """
301 # for "historical portability":
300 # for "historical portability":
302 # repo.vfs has been available since 2.3 (or 7034365089bf)
301 # repo.vfs has been available since 2.3 (or 7034365089bf)
303 vfs = getattr(repo, 'vfs', None)
302 vfs = getattr(repo, 'vfs', None)
304 if vfs:
303 if vfs:
305 return vfs
304 return vfs
306 else:
305 else:
307 return getattr(repo, 'opener')
306 return getattr(repo, 'opener')
308
307
309 def repocleartagscachefunc(repo):
308 def repocleartagscachefunc(repo):
310 """Return the function to clear tags cache according to repo internal API
309 """Return the function to clear tags cache according to repo internal API
311 """
310 """
312 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
311 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
313 # in this case, setattr(repo, '_tagscache', None) or so isn't
312 # in this case, setattr(repo, '_tagscache', None) or so isn't
314 # correct way to clear tags cache, because existing code paths
313 # correct way to clear tags cache, because existing code paths
315 # expect _tagscache to be a structured object.
314 # expect _tagscache to be a structured object.
316 def clearcache():
315 def clearcache():
317 # _tagscache has been filteredpropertycache since 2.5 (or
316 # _tagscache has been filteredpropertycache since 2.5 (or
318 # 98c867ac1330), and delattr() can't work in such case
317 # 98c867ac1330), and delattr() can't work in such case
319 if '_tagscache' in vars(repo):
318 if '_tagscache' in vars(repo):
320 del repo.__dict__['_tagscache']
319 del repo.__dict__['_tagscache']
321 return clearcache
320 return clearcache
322
321
323 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
322 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
324 if repotags: # since 1.4 (or 5614a628d173)
323 if repotags: # since 1.4 (or 5614a628d173)
325 return lambda : repotags.set(None)
324 return lambda : repotags.set(None)
326
325
327 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
326 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
328 if repotagscache: # since 0.6 (or d7df759d0e97)
327 if repotagscache: # since 0.6 (or d7df759d0e97)
329 return lambda : repotagscache.set(None)
328 return lambda : repotagscache.set(None)
330
329
331 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
330 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
332 # this point, but it isn't so problematic, because:
331 # this point, but it isn't so problematic, because:
333 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
332 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
334 # in perftags() causes failure soon
333 # in perftags() causes failure soon
335 # - perf.py itself has been available since 1.1 (or eb240755386d)
334 # - perf.py itself has been available since 1.1 (or eb240755386d)
336 raise error.Abort(("tags API of this hg command is unknown"))
335 raise error.Abort(("tags API of this hg command is unknown"))
337
336
338 # perf commands
337 # perf commands
339
338
340 @command('perfwalk', formatteropts)
339 @command('perfwalk', formatteropts)
341 def perfwalk(ui, repo, *pats, **opts):
340 def perfwalk(ui, repo, *pats, **opts):
342 timer, fm = gettimer(ui, opts)
341 timer, fm = gettimer(ui, opts)
343 try:
342 try:
344 m = scmutil.match(repo[None], pats, {})
343 m = scmutil.match(repo[None], pats, {})
345 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
344 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
346 except Exception:
345 except Exception:
347 try:
346 try:
348 m = scmutil.match(repo[None], pats, {})
347 m = scmutil.match(repo[None], pats, {})
349 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
348 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
350 except Exception:
349 except Exception:
351 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
350 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
352 fm.end()
351 fm.end()
353
352
354 @command('perfannotate', formatteropts)
353 @command('perfannotate', formatteropts)
355 def perfannotate(ui, repo, f, **opts):
354 def perfannotate(ui, repo, f, **opts):
356 timer, fm = gettimer(ui, opts)
355 timer, fm = gettimer(ui, opts)
357 fc = repo['.'][f]
356 fc = repo['.'][f]
358 timer(lambda: len(fc.annotate(True)))
357 timer(lambda: len(fc.annotate(True)))
359 fm.end()
358 fm.end()
360
359
361 @command('perfstatus',
360 @command('perfstatus',
362 [('u', 'unknown', False,
361 [('u', 'unknown', False,
363 'ask status to look for unknown files')] + formatteropts)
362 'ask status to look for unknown files')] + formatteropts)
364 def perfstatus(ui, repo, **opts):
363 def perfstatus(ui, repo, **opts):
365 #m = match.always(repo.root, repo.getcwd())
364 #m = match.always(repo.root, repo.getcwd())
366 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
365 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
367 # False))))
366 # False))))
368 timer, fm = gettimer(ui, opts)
367 timer, fm = gettimer(ui, opts)
369 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
368 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
370 fm.end()
369 fm.end()
371
370
372 @command('perfaddremove', formatteropts)
371 @command('perfaddremove', formatteropts)
373 def perfaddremove(ui, repo, **opts):
372 def perfaddremove(ui, repo, **opts):
374 timer, fm = gettimer(ui, opts)
373 timer, fm = gettimer(ui, opts)
375 try:
374 try:
376 oldquiet = repo.ui.quiet
375 oldquiet = repo.ui.quiet
377 repo.ui.quiet = True
376 repo.ui.quiet = True
378 matcher = scmutil.match(repo[None])
377 matcher = scmutil.match(repo[None])
379 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
378 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
380 finally:
379 finally:
381 repo.ui.quiet = oldquiet
380 repo.ui.quiet = oldquiet
382 fm.end()
381 fm.end()
383
382
384 def clearcaches(cl):
383 def clearcaches(cl):
385 # behave somewhat consistently across internal API changes
384 # behave somewhat consistently across internal API changes
386 if util.safehasattr(cl, 'clearcaches'):
385 if util.safehasattr(cl, 'clearcaches'):
387 cl.clearcaches()
386 cl.clearcaches()
388 elif util.safehasattr(cl, '_nodecache'):
387 elif util.safehasattr(cl, '_nodecache'):
389 from mercurial.node import nullid, nullrev
388 from mercurial.node import nullid, nullrev
390 cl._nodecache = {nullid: nullrev}
389 cl._nodecache = {nullid: nullrev}
391 cl._nodepos = None
390 cl._nodepos = None
392
391
393 @command('perfheads', formatteropts)
392 @command('perfheads', formatteropts)
394 def perfheads(ui, repo, **opts):
393 def perfheads(ui, repo, **opts):
395 timer, fm = gettimer(ui, opts)
394 timer, fm = gettimer(ui, opts)
396 cl = repo.changelog
395 cl = repo.changelog
397 def d():
396 def d():
398 len(cl.headrevs())
397 len(cl.headrevs())
399 clearcaches(cl)
398 clearcaches(cl)
400 timer(d)
399 timer(d)
401 fm.end()
400 fm.end()
402
401
403 @command('perftags', formatteropts)
402 @command('perftags', formatteropts)
404 def perftags(ui, repo, **opts):
403 def perftags(ui, repo, **opts):
405 import mercurial.changelog
404 import mercurial.changelog
406 import mercurial.manifest
405 import mercurial.manifest
407 timer, fm = gettimer(ui, opts)
406 timer, fm = gettimer(ui, opts)
408 svfs = getsvfs(repo)
407 svfs = getsvfs(repo)
409 repocleartagscache = repocleartagscachefunc(repo)
408 repocleartagscache = repocleartagscachefunc(repo)
410 def t():
409 def t():
411 repo.changelog = mercurial.changelog.changelog(svfs)
410 repo.changelog = mercurial.changelog.changelog(svfs)
412 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
411 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
413 repocleartagscache()
412 repocleartagscache()
414 return len(repo.tags())
413 return len(repo.tags())
415 timer(t)
414 timer(t)
416 fm.end()
415 fm.end()
417
416
418 @command('perfancestors', formatteropts)
417 @command('perfancestors', formatteropts)
419 def perfancestors(ui, repo, **opts):
418 def perfancestors(ui, repo, **opts):
420 timer, fm = gettimer(ui, opts)
419 timer, fm = gettimer(ui, opts)
421 heads = repo.changelog.headrevs()
420 heads = repo.changelog.headrevs()
422 def d():
421 def d():
423 for a in repo.changelog.ancestors(heads):
422 for a in repo.changelog.ancestors(heads):
424 pass
423 pass
425 timer(d)
424 timer(d)
426 fm.end()
425 fm.end()
427
426
428 @command('perfancestorset', formatteropts)
427 @command('perfancestorset', formatteropts)
429 def perfancestorset(ui, repo, revset, **opts):
428 def perfancestorset(ui, repo, revset, **opts):
430 timer, fm = gettimer(ui, opts)
429 timer, fm = gettimer(ui, opts)
431 revs = repo.revs(revset)
430 revs = repo.revs(revset)
432 heads = repo.changelog.headrevs()
431 heads = repo.changelog.headrevs()
433 def d():
432 def d():
434 s = repo.changelog.ancestors(heads)
433 s = repo.changelog.ancestors(heads)
435 for rev in revs:
434 for rev in revs:
436 rev in s
435 rev in s
437 timer(d)
436 timer(d)
438 fm.end()
437 fm.end()
439
438
440 @command('perfchangegroupchangelog', formatteropts +
439 @command('perfchangegroupchangelog', formatteropts +
441 [('', 'version', '02', 'changegroup version'),
440 [('', 'version', '02', 'changegroup version'),
442 ('r', 'rev', '', 'revisions to add to changegroup')])
441 ('r', 'rev', '', 'revisions to add to changegroup')])
443 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
442 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
444 """Benchmark producing a changelog group for a changegroup.
443 """Benchmark producing a changelog group for a changegroup.
445
444
446 This measures the time spent processing the changelog during a
445 This measures the time spent processing the changelog during a
447 bundle operation. This occurs during `hg bundle` and on a server
446 bundle operation. This occurs during `hg bundle` and on a server
448 processing a `getbundle` wire protocol request (handles clones
447 processing a `getbundle` wire protocol request (handles clones
449 and pull requests).
448 and pull requests).
450
449
451 By default, all revisions are added to the changegroup.
450 By default, all revisions are added to the changegroup.
452 """
451 """
453 cl = repo.changelog
452 cl = repo.changelog
454 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
453 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
455 bundler = changegroup.getbundler(version, repo)
454 bundler = changegroup.getbundler(version, repo)
456
455
457 def lookup(node):
456 def lookup(node):
458 # The real bundler reads the revision in order to access the
457 # The real bundler reads the revision in order to access the
459 # manifest node and files list. Do that here.
458 # manifest node and files list. Do that here.
460 cl.read(node)
459 cl.read(node)
461 return node
460 return node
462
461
463 def d():
462 def d():
464 for chunk in bundler.group(revs, cl, lookup):
463 for chunk in bundler.group(revs, cl, lookup):
465 pass
464 pass
466
465
467 timer, fm = gettimer(ui, opts)
466 timer, fm = gettimer(ui, opts)
468 timer(d)
467 timer(d)
469 fm.end()
468 fm.end()
470
469
471 @command('perfdirs', formatteropts)
470 @command('perfdirs', formatteropts)
472 def perfdirs(ui, repo, **opts):
471 def perfdirs(ui, repo, **opts):
473 timer, fm = gettimer(ui, opts)
472 timer, fm = gettimer(ui, opts)
474 dirstate = repo.dirstate
473 dirstate = repo.dirstate
475 'a' in dirstate
474 'a' in dirstate
476 def d():
475 def d():
477 dirstate.dirs()
476 dirstate.dirs()
478 del dirstate._dirs
477 del dirstate._dirs
479 timer(d)
478 timer(d)
480 fm.end()
479 fm.end()
481
480
482 @command('perfdirstate', formatteropts)
481 @command('perfdirstate', formatteropts)
483 def perfdirstate(ui, repo, **opts):
482 def perfdirstate(ui, repo, **opts):
484 timer, fm = gettimer(ui, opts)
483 timer, fm = gettimer(ui, opts)
485 "a" in repo.dirstate
484 "a" in repo.dirstate
486 def d():
485 def d():
487 repo.dirstate.invalidate()
486 repo.dirstate.invalidate()
488 "a" in repo.dirstate
487 "a" in repo.dirstate
489 timer(d)
488 timer(d)
490 fm.end()
489 fm.end()
491
490
492 @command('perfdirstatedirs', formatteropts)
491 @command('perfdirstatedirs', formatteropts)
493 def perfdirstatedirs(ui, repo, **opts):
492 def perfdirstatedirs(ui, repo, **opts):
494 timer, fm = gettimer(ui, opts)
493 timer, fm = gettimer(ui, opts)
495 "a" in repo.dirstate
494 "a" in repo.dirstate
496 def d():
495 def d():
497 "a" in repo.dirstate._dirs
496 "a" in repo.dirstate._dirs
498 del repo.dirstate._dirs
497 del repo.dirstate._dirs
499 timer(d)
498 timer(d)
500 fm.end()
499 fm.end()
501
500
502 @command('perfdirstatefoldmap', formatteropts)
501 @command('perfdirstatefoldmap', formatteropts)
503 def perfdirstatefoldmap(ui, repo, **opts):
502 def perfdirstatefoldmap(ui, repo, **opts):
504 timer, fm = gettimer(ui, opts)
503 timer, fm = gettimer(ui, opts)
505 dirstate = repo.dirstate
504 dirstate = repo.dirstate
506 'a' in dirstate
505 'a' in dirstate
507 def d():
506 def d():
508 dirstate._filefoldmap.get('a')
507 dirstate._filefoldmap.get('a')
509 del dirstate._filefoldmap
508 del dirstate._filefoldmap
510 timer(d)
509 timer(d)
511 fm.end()
510 fm.end()
512
511
513 @command('perfdirfoldmap', formatteropts)
512 @command('perfdirfoldmap', formatteropts)
514 def perfdirfoldmap(ui, repo, **opts):
513 def perfdirfoldmap(ui, repo, **opts):
515 timer, fm = gettimer(ui, opts)
514 timer, fm = gettimer(ui, opts)
516 dirstate = repo.dirstate
515 dirstate = repo.dirstate
517 'a' in dirstate
516 'a' in dirstate
518 def d():
517 def d():
519 dirstate._dirfoldmap.get('a')
518 dirstate._dirfoldmap.get('a')
520 del dirstate._dirfoldmap
519 del dirstate._dirfoldmap
521 del dirstate._dirs
520 del dirstate._dirs
522 timer(d)
521 timer(d)
523 fm.end()
522 fm.end()
524
523
525 @command('perfdirstatewrite', formatteropts)
524 @command('perfdirstatewrite', formatteropts)
526 def perfdirstatewrite(ui, repo, **opts):
525 def perfdirstatewrite(ui, repo, **opts):
527 timer, fm = gettimer(ui, opts)
526 timer, fm = gettimer(ui, opts)
528 ds = repo.dirstate
527 ds = repo.dirstate
529 "a" in ds
528 "a" in ds
530 def d():
529 def d():
531 ds._dirty = True
530 ds._dirty = True
532 ds.write(repo.currenttransaction())
531 ds.write(repo.currenttransaction())
533 timer(d)
532 timer(d)
534 fm.end()
533 fm.end()
535
534
536 @command('perfmergecalculate',
535 @command('perfmergecalculate',
537 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
536 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
538 def perfmergecalculate(ui, repo, rev, **opts):
537 def perfmergecalculate(ui, repo, rev, **opts):
539 timer, fm = gettimer(ui, opts)
538 timer, fm = gettimer(ui, opts)
540 wctx = repo[None]
539 wctx = repo[None]
541 rctx = scmutil.revsingle(repo, rev, rev)
540 rctx = scmutil.revsingle(repo, rev, rev)
542 ancestor = wctx.ancestor(rctx)
541 ancestor = wctx.ancestor(rctx)
543 # we don't want working dir files to be stat'd in the benchmark, so prime
542 # we don't want working dir files to be stat'd in the benchmark, so prime
544 # that cache
543 # that cache
545 wctx.dirty()
544 wctx.dirty()
546 def d():
545 def d():
547 # acceptremote is True because we don't want prompts in the middle of
546 # acceptremote is True because we don't want prompts in the middle of
548 # our benchmark
547 # our benchmark
549 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
548 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
550 acceptremote=True, followcopies=True)
549 acceptremote=True, followcopies=True)
551 timer(d)
550 timer(d)
552 fm.end()
551 fm.end()
553
552
554 @command('perfpathcopies', [], "REV REV")
553 @command('perfpathcopies', [], "REV REV")
555 def perfpathcopies(ui, repo, rev1, rev2, **opts):
554 def perfpathcopies(ui, repo, rev1, rev2, **opts):
556 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
557 ctx1 = scmutil.revsingle(repo, rev1, rev1)
556 ctx1 = scmutil.revsingle(repo, rev1, rev1)
558 ctx2 = scmutil.revsingle(repo, rev2, rev2)
557 ctx2 = scmutil.revsingle(repo, rev2, rev2)
559 def d():
558 def d():
560 copies.pathcopies(ctx1, ctx2)
559 copies.pathcopies(ctx1, ctx2)
561 timer(d)
560 timer(d)
562 fm.end()
561 fm.end()
563
562
564 @command('perfmanifest', [], 'REV')
563 @command('perfmanifest', [], 'REV')
565 def perfmanifest(ui, repo, rev, **opts):
564 def perfmanifest(ui, repo, rev, **opts):
566 timer, fm = gettimer(ui, opts)
565 timer, fm = gettimer(ui, opts)
567 ctx = scmutil.revsingle(repo, rev, rev)
566 ctx = scmutil.revsingle(repo, rev, rev)
568 t = ctx.manifestnode()
567 t = ctx.manifestnode()
569 def d():
568 def d():
570 repo.manifestlog.clearcaches()
569 repo.manifestlog.clearcaches()
571 repo.manifestlog[t].read()
570 repo.manifestlog[t].read()
572 timer(d)
571 timer(d)
573 fm.end()
572 fm.end()
574
573
575 @command('perfchangeset', formatteropts)
574 @command('perfchangeset', formatteropts)
576 def perfchangeset(ui, repo, rev, **opts):
575 def perfchangeset(ui, repo, rev, **opts):
577 timer, fm = gettimer(ui, opts)
576 timer, fm = gettimer(ui, opts)
578 n = repo[rev].node()
577 n = repo[rev].node()
579 def d():
578 def d():
580 repo.changelog.read(n)
579 repo.changelog.read(n)
581 #repo.changelog._cache = None
580 #repo.changelog._cache = None
582 timer(d)
581 timer(d)
583 fm.end()
582 fm.end()
584
583
585 @command('perfindex', formatteropts)
584 @command('perfindex', formatteropts)
586 def perfindex(ui, repo, **opts):
585 def perfindex(ui, repo, **opts):
587 import mercurial.revlog
586 import mercurial.revlog
588 timer, fm = gettimer(ui, opts)
587 timer, fm = gettimer(ui, opts)
589 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
588 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
590 n = repo["tip"].node()
589 n = repo["tip"].node()
591 svfs = getsvfs(repo)
590 svfs = getsvfs(repo)
592 def d():
591 def d():
593 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
592 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
594 cl.rev(n)
593 cl.rev(n)
595 timer(d)
594 timer(d)
596 fm.end()
595 fm.end()
597
596
598 @command('perfstartup', formatteropts)
597 @command('perfstartup', formatteropts)
599 def perfstartup(ui, repo, **opts):
598 def perfstartup(ui, repo, **opts):
600 timer, fm = gettimer(ui, opts)
599 timer, fm = gettimer(ui, opts)
601 cmd = sys.argv[0]
600 cmd = sys.argv[0]
602 def d():
601 def d():
603 if os.name != 'nt':
602 if os.name != 'nt':
604 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
603 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
605 else:
604 else:
606 os.environ['HGRCPATH'] = ''
605 os.environ['HGRCPATH'] = ''
607 os.system("%s version -q > NUL" % cmd)
606 os.system("%s version -q > NUL" % cmd)
608 timer(d)
607 timer(d)
609 fm.end()
608 fm.end()
610
609
611 @command('perfparents', formatteropts)
610 @command('perfparents', formatteropts)
612 def perfparents(ui, repo, **opts):
611 def perfparents(ui, repo, **opts):
613 timer, fm = gettimer(ui, opts)
612 timer, fm = gettimer(ui, opts)
614 # control the number of commits perfparents iterates over
613 # control the number of commits perfparents iterates over
615 # experimental config: perf.parentscount
614 # experimental config: perf.parentscount
616 count = getint(ui, "perf", "parentscount", 1000)
615 count = getint(ui, "perf", "parentscount", 1000)
617 if len(repo.changelog) < count:
616 if len(repo.changelog) < count:
618 raise error.Abort("repo needs %d commits for this test" % count)
617 raise error.Abort("repo needs %d commits for this test" % count)
619 repo = repo.unfiltered()
618 repo = repo.unfiltered()
620 nl = [repo.changelog.node(i) for i in xrange(count)]
619 nl = [repo.changelog.node(i) for i in xrange(count)]
621 def d():
620 def d():
622 for n in nl:
621 for n in nl:
623 repo.changelog.parents(n)
622 repo.changelog.parents(n)
624 timer(d)
623 timer(d)
625 fm.end()
624 fm.end()
626
625
627 @command('perfctxfiles', formatteropts)
626 @command('perfctxfiles', formatteropts)
628 def perfctxfiles(ui, repo, x, **opts):
627 def perfctxfiles(ui, repo, x, **opts):
629 x = int(x)
628 x = int(x)
630 timer, fm = gettimer(ui, opts)
629 timer, fm = gettimer(ui, opts)
631 def d():
630 def d():
632 len(repo[x].files())
631 len(repo[x].files())
633 timer(d)
632 timer(d)
634 fm.end()
633 fm.end()
635
634
636 @command('perfrawfiles', formatteropts)
635 @command('perfrawfiles', formatteropts)
637 def perfrawfiles(ui, repo, x, **opts):
636 def perfrawfiles(ui, repo, x, **opts):
638 x = int(x)
637 x = int(x)
639 timer, fm = gettimer(ui, opts)
638 timer, fm = gettimer(ui, opts)
640 cl = repo.changelog
639 cl = repo.changelog
641 def d():
640 def d():
642 len(cl.read(x)[3])
641 len(cl.read(x)[3])
643 timer(d)
642 timer(d)
644 fm.end()
643 fm.end()
645
644
646 @command('perflookup', formatteropts)
645 @command('perflookup', formatteropts)
647 def perflookup(ui, repo, rev, **opts):
646 def perflookup(ui, repo, rev, **opts):
648 timer, fm = gettimer(ui, opts)
647 timer, fm = gettimer(ui, opts)
649 timer(lambda: len(repo.lookup(rev)))
648 timer(lambda: len(repo.lookup(rev)))
650 fm.end()
649 fm.end()
651
650
652 @command('perfrevrange', formatteropts)
651 @command('perfrevrange', formatteropts)
653 def perfrevrange(ui, repo, *specs, **opts):
652 def perfrevrange(ui, repo, *specs, **opts):
654 timer, fm = gettimer(ui, opts)
653 timer, fm = gettimer(ui, opts)
655 revrange = scmutil.revrange
654 revrange = scmutil.revrange
656 timer(lambda: len(revrange(repo, specs)))
655 timer(lambda: len(revrange(repo, specs)))
657 fm.end()
656 fm.end()
658
657
659 @command('perfnodelookup', formatteropts)
658 @command('perfnodelookup', formatteropts)
660 def perfnodelookup(ui, repo, rev, **opts):
659 def perfnodelookup(ui, repo, rev, **opts):
661 timer, fm = gettimer(ui, opts)
660 timer, fm = gettimer(ui, opts)
662 import mercurial.revlog
661 import mercurial.revlog
663 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
662 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
664 n = repo[rev].node()
663 n = repo[rev].node()
665 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
664 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
666 def d():
665 def d():
667 cl.rev(n)
666 cl.rev(n)
668 clearcaches(cl)
667 clearcaches(cl)
669 timer(d)
668 timer(d)
670 fm.end()
669 fm.end()
671
670
672 @command('perflog',
671 @command('perflog',
673 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
672 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
674 def perflog(ui, repo, rev=None, **opts):
673 def perflog(ui, repo, rev=None, **opts):
675 if rev is None:
674 if rev is None:
676 rev=[]
675 rev=[]
677 timer, fm = gettimer(ui, opts)
676 timer, fm = gettimer(ui, opts)
678 ui.pushbuffer()
677 ui.pushbuffer()
679 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
678 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
680 copies=opts.get('rename')))
679 copies=opts.get('rename')))
681 ui.popbuffer()
680 ui.popbuffer()
682 fm.end()
681 fm.end()
683
682
684 @command('perfmoonwalk', formatteropts)
683 @command('perfmoonwalk', formatteropts)
685 def perfmoonwalk(ui, repo, **opts):
684 def perfmoonwalk(ui, repo, **opts):
686 """benchmark walking the changelog backwards
685 """benchmark walking the changelog backwards
687
686
688 This also loads the changelog data for each revision in the changelog.
687 This also loads the changelog data for each revision in the changelog.
689 """
688 """
690 timer, fm = gettimer(ui, opts)
689 timer, fm = gettimer(ui, opts)
691 def moonwalk():
690 def moonwalk():
692 for i in xrange(len(repo), -1, -1):
691 for i in xrange(len(repo), -1, -1):
693 ctx = repo[i]
692 ctx = repo[i]
694 ctx.branch() # read changelog data (in addition to the index)
693 ctx.branch() # read changelog data (in addition to the index)
695 timer(moonwalk)
694 timer(moonwalk)
696 fm.end()
695 fm.end()
697
696
698 @command('perftemplating', formatteropts)
697 @command('perftemplating', formatteropts)
699 def perftemplating(ui, repo, rev=None, **opts):
698 def perftemplating(ui, repo, rev=None, **opts):
700 if rev is None:
699 if rev is None:
701 rev=[]
700 rev=[]
702 timer, fm = gettimer(ui, opts)
701 timer, fm = gettimer(ui, opts)
703 ui.pushbuffer()
702 ui.pushbuffer()
704 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
703 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
705 template='{date|shortdate} [{rev}:{node|short}]'
704 template='{date|shortdate} [{rev}:{node|short}]'
706 ' {author|person}: {desc|firstline}\n'))
705 ' {author|person}: {desc|firstline}\n'))
707 ui.popbuffer()
706 ui.popbuffer()
708 fm.end()
707 fm.end()
709
708
710 @command('perfcca', formatteropts)
709 @command('perfcca', formatteropts)
711 def perfcca(ui, repo, **opts):
710 def perfcca(ui, repo, **opts):
712 timer, fm = gettimer(ui, opts)
711 timer, fm = gettimer(ui, opts)
713 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
712 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
714 fm.end()
713 fm.end()
715
714
716 @command('perffncacheload', formatteropts)
715 @command('perffncacheload', formatteropts)
717 def perffncacheload(ui, repo, **opts):
716 def perffncacheload(ui, repo, **opts):
718 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
719 s = repo.store
718 s = repo.store
720 def d():
719 def d():
721 s.fncache._load()
720 s.fncache._load()
722 timer(d)
721 timer(d)
723 fm.end()
722 fm.end()
724
723
725 @command('perffncachewrite', formatteropts)
724 @command('perffncachewrite', formatteropts)
726 def perffncachewrite(ui, repo, **opts):
725 def perffncachewrite(ui, repo, **opts):
727 timer, fm = gettimer(ui, opts)
726 timer, fm = gettimer(ui, opts)
728 s = repo.store
727 s = repo.store
729 s.fncache._load()
728 s.fncache._load()
730 lock = repo.lock()
729 lock = repo.lock()
731 tr = repo.transaction('perffncachewrite')
730 tr = repo.transaction('perffncachewrite')
732 def d():
731 def d():
733 s.fncache._dirty = True
732 s.fncache._dirty = True
734 s.fncache.write(tr)
733 s.fncache.write(tr)
735 timer(d)
734 timer(d)
736 tr.close()
735 tr.close()
737 lock.release()
736 lock.release()
738 fm.end()
737 fm.end()
739
738
740 @command('perffncacheencode', formatteropts)
739 @command('perffncacheencode', formatteropts)
741 def perffncacheencode(ui, repo, **opts):
740 def perffncacheencode(ui, repo, **opts):
742 timer, fm = gettimer(ui, opts)
741 timer, fm = gettimer(ui, opts)
743 s = repo.store
742 s = repo.store
744 s.fncache._load()
743 s.fncache._load()
745 def d():
744 def d():
746 for p in s.fncache.entries:
745 for p in s.fncache.entries:
747 s.encode(p)
746 s.encode(p)
748 timer(d)
747 timer(d)
749 fm.end()
748 fm.end()
750
749
751 @command('perfbdiff', revlogopts + formatteropts + [
750 @command('perfbdiff', revlogopts + formatteropts + [
752 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
751 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
753 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
752 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
754 '-c|-m|FILE REV')
753 '-c|-m|FILE REV')
755 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
754 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
756 """benchmark a bdiff between revisions
755 """benchmark a bdiff between revisions
757
756
758 By default, benchmark a bdiff between its delta parent and itself.
757 By default, benchmark a bdiff between its delta parent and itself.
759
758
760 With ``--count``, benchmark bdiffs between delta parents and self for N
759 With ``--count``, benchmark bdiffs between delta parents and self for N
761 revisions starting at the specified revision.
760 revisions starting at the specified revision.
762
761
763 With ``--alldata``, assume the requested revision is a changeset and
762 With ``--alldata``, assume the requested revision is a changeset and
764 measure bdiffs for all changes related to that changeset (manifest
763 measure bdiffs for all changes related to that changeset (manifest
765 and filelogs).
764 and filelogs).
766 """
765 """
767 if opts['alldata']:
766 if opts['alldata']:
768 opts['changelog'] = True
767 opts['changelog'] = True
769
768
770 if opts.get('changelog') or opts.get('manifest'):
769 if opts.get('changelog') or opts.get('manifest'):
771 file_, rev = None, file_
770 file_, rev = None, file_
772 elif rev is None:
771 elif rev is None:
773 raise error.CommandError('perfbdiff', 'invalid arguments')
772 raise error.CommandError('perfbdiff', 'invalid arguments')
774
773
775 textpairs = []
774 textpairs = []
776
775
777 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
776 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
778
777
779 startrev = r.rev(r.lookup(rev))
778 startrev = r.rev(r.lookup(rev))
780 for rev in range(startrev, min(startrev + count, len(r) - 1)):
779 for rev in range(startrev, min(startrev + count, len(r) - 1)):
781 if opts['alldata']:
780 if opts['alldata']:
782 # Load revisions associated with changeset.
781 # Load revisions associated with changeset.
783 ctx = repo[rev]
782 ctx = repo[rev]
784 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
783 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
785 for pctx in ctx.parents():
784 for pctx in ctx.parents():
786 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
785 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
787 textpairs.append((pman, mtext))
786 textpairs.append((pman, mtext))
788
787
789 # Load filelog revisions by iterating manifest delta.
788 # Load filelog revisions by iterating manifest delta.
790 man = ctx.manifest()
789 man = ctx.manifest()
791 pman = ctx.p1().manifest()
790 pman = ctx.p1().manifest()
792 for filename, change in pman.diff(man).items():
791 for filename, change in pman.diff(man).items():
793 fctx = repo.file(filename)
792 fctx = repo.file(filename)
794 f1 = fctx.revision(change[0][0] or -1)
793 f1 = fctx.revision(change[0][0] or -1)
795 f2 = fctx.revision(change[1][0] or -1)
794 f2 = fctx.revision(change[1][0] or -1)
796 textpairs.append((f1, f2))
795 textpairs.append((f1, f2))
797 else:
796 else:
798 dp = r.deltaparent(rev)
797 dp = r.deltaparent(rev)
799 textpairs.append((r.revision(dp), r.revision(rev)))
798 textpairs.append((r.revision(dp), r.revision(rev)))
800
799
801 def d():
800 def d():
802 for pair in textpairs:
801 for pair in textpairs:
803 bdiff.bdiff(*pair)
802 bdiff.bdiff(*pair)
804
803
805 timer, fm = gettimer(ui, opts)
804 timer, fm = gettimer(ui, opts)
806 timer(d)
805 timer(d)
807 fm.end()
806 fm.end()
808
807
809 @command('perfdiffwd', formatteropts)
808 @command('perfdiffwd', formatteropts)
810 def perfdiffwd(ui, repo, **opts):
809 def perfdiffwd(ui, repo, **opts):
811 """Profile diff of working directory changes"""
810 """Profile diff of working directory changes"""
812 timer, fm = gettimer(ui, opts)
811 timer, fm = gettimer(ui, opts)
813 options = {
812 options = {
814 'w': 'ignore_all_space',
813 'w': 'ignore_all_space',
815 'b': 'ignore_space_change',
814 'b': 'ignore_space_change',
816 'B': 'ignore_blank_lines',
815 'B': 'ignore_blank_lines',
817 }
816 }
818
817
819 for diffopt in ('', 'w', 'b', 'B', 'wB'):
818 for diffopt in ('', 'w', 'b', 'B', 'wB'):
820 opts = dict((options[c], '1') for c in diffopt)
819 opts = dict((options[c], '1') for c in diffopt)
821 def d():
820 def d():
822 ui.pushbuffer()
821 ui.pushbuffer()
823 commands.diff(ui, repo, **opts)
822 commands.diff(ui, repo, **opts)
824 ui.popbuffer()
823 ui.popbuffer()
825 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
824 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
826 timer(d, title)
825 timer(d, title)
827 fm.end()
826 fm.end()
828
827
829 @command('perfrevlog', revlogopts + formatteropts +
828 @command('perfrevlog', revlogopts + formatteropts +
830 [('d', 'dist', 100, 'distance between the revisions'),
829 [('d', 'dist', 100, 'distance between the revisions'),
831 ('s', 'startrev', 0, 'revision to start reading at'),
830 ('s', 'startrev', 0, 'revision to start reading at'),
832 ('', 'reverse', False, 'read in reverse')],
831 ('', 'reverse', False, 'read in reverse')],
833 '-c|-m|FILE')
832 '-c|-m|FILE')
834 def perfrevlog(ui, repo, file_=None, startrev=0, reverse=False, **opts):
833 def perfrevlog(ui, repo, file_=None, startrev=0, reverse=False, **opts):
835 """Benchmark reading a series of revisions from a revlog.
834 """Benchmark reading a series of revisions from a revlog.
836
835
837 By default, we read every ``-d/--dist`` revision from 0 to tip of
836 By default, we read every ``-d/--dist`` revision from 0 to tip of
838 the specified revlog.
837 the specified revlog.
839
838
840 The start revision can be defined via ``-s/--startrev``.
839 The start revision can be defined via ``-s/--startrev``.
841 """
840 """
842 timer, fm = gettimer(ui, opts)
841 timer, fm = gettimer(ui, opts)
843 _len = getlen(ui)
842 _len = getlen(ui)
844
843
845 def d():
844 def d():
846 r = cmdutil.openrevlog(repo, 'perfrevlog', file_, opts)
845 r = cmdutil.openrevlog(repo, 'perfrevlog', file_, opts)
847
846
848 startrev = 0
847 startrev = 0
849 endrev = _len(r)
848 endrev = _len(r)
850 dist = opts['dist']
849 dist = opts['dist']
851
850
852 if reverse:
851 if reverse:
853 startrev, endrev = endrev, startrev
852 startrev, endrev = endrev, startrev
854 dist = -1 * dist
853 dist = -1 * dist
855
854
856 for x in xrange(startrev, endrev, dist):
855 for x in xrange(startrev, endrev, dist):
857 r.revision(r.node(x))
856 r.revision(r.node(x))
858
857
859 timer(d)
858 timer(d)
860 fm.end()
859 fm.end()
861
860
862 @command('perfrevlogchunks', revlogopts + formatteropts +
861 @command('perfrevlogchunks', revlogopts + formatteropts +
863 [('s', 'startrev', 0, 'revision to start at')],
862 [('s', 'startrev', 0, 'revision to start at')],
864 '-c|-m|FILE')
863 '-c|-m|FILE')
865 def perfrevlogchunks(ui, repo, file_=None, startrev=0, **opts):
864 def perfrevlogchunks(ui, repo, file_=None, startrev=0, **opts):
866 """Benchmark operations on revlog chunks.
865 """Benchmark operations on revlog chunks.
867
866
868 Logically, each revlog is a collection of fulltext revisions. However,
867 Logically, each revlog is a collection of fulltext revisions. However,
869 stored within each revlog are "chunks" of possibly compressed data. This
868 stored within each revlog are "chunks" of possibly compressed data. This
870 data needs to be read and decompressed or compressed and written.
869 data needs to be read and decompressed or compressed and written.
871
870
872 This command measures the time it takes to read+decompress and recompress
871 This command measures the time it takes to read+decompress and recompress
873 chunks in a revlog. It effectively isolates I/O and compression performance.
872 chunks in a revlog. It effectively isolates I/O and compression performance.
874 For measurements of higher-level operations like resolving revisions,
873 For measurements of higher-level operations like resolving revisions,
875 see ``perfrevlog`` and ``perfrevlogrevision``.
874 see ``perfrevlog`` and ``perfrevlogrevision``.
876 """
875 """
877 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
876 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
878 revs = list(rl.revs(startrev, len(rl) - 1))
877 revs = list(rl.revs(startrev, len(rl) - 1))
879
878
880 def rlfh(rl):
879 def rlfh(rl):
881 if rl._inline:
880 if rl._inline:
882 return getsvfs(repo)(rl.indexfile)
881 return getsvfs(repo)(rl.indexfile)
883 else:
882 else:
884 return getsvfs(repo)(rl.datafile)
883 return getsvfs(repo)(rl.datafile)
885
884
886 def doread():
885 def doread():
887 rl.clearcaches()
886 rl.clearcaches()
888 for rev in revs:
887 for rev in revs:
889 rl._chunkraw(rev, rev)
888 rl._chunkraw(rev, rev)
890
889
891 def doreadcachedfh():
890 def doreadcachedfh():
892 rl.clearcaches()
891 rl.clearcaches()
893 fh = rlfh(rl)
892 fh = rlfh(rl)
894 for rev in revs:
893 for rev in revs:
895 rl._chunkraw(rev, rev, df=fh)
894 rl._chunkraw(rev, rev, df=fh)
896
895
897 def doreadbatch():
896 def doreadbatch():
898 rl.clearcaches()
897 rl.clearcaches()
899 rl._chunkraw(revs[0], revs[-1])
898 rl._chunkraw(revs[0], revs[-1])
900
899
901 def doreadbatchcachedfh():
900 def doreadbatchcachedfh():
902 rl.clearcaches()
901 rl.clearcaches()
903 fh = rlfh(rl)
902 fh = rlfh(rl)
904 rl._chunkraw(revs[0], revs[-1], df=fh)
903 rl._chunkraw(revs[0], revs[-1], df=fh)
905
904
906 def dochunk():
905 def dochunk():
907 rl.clearcaches()
906 rl.clearcaches()
908 fh = rlfh(rl)
907 fh = rlfh(rl)
909 for rev in revs:
908 for rev in revs:
910 rl._chunk(rev, df=fh)
909 rl._chunk(rev, df=fh)
911
910
912 chunks = [None]
911 chunks = [None]
913
912
914 def dochunkbatch():
913 def dochunkbatch():
915 rl.clearcaches()
914 rl.clearcaches()
916 fh = rlfh(rl)
915 fh = rlfh(rl)
917 # Save chunks as a side-effect.
916 # Save chunks as a side-effect.
918 chunks[0] = rl._chunks(revs, df=fh)
917 chunks[0] = rl._chunks(revs, df=fh)
919
918
920 def docompress():
919 def docompress():
921 rl.clearcaches()
920 rl.clearcaches()
922 for chunk in chunks[0]:
921 for chunk in chunks[0]:
923 rl.compress(chunk)
922 rl.compress(chunk)
924
923
925 benches = [
924 benches = [
926 (lambda: doread(), 'read'),
925 (lambda: doread(), 'read'),
927 (lambda: doreadcachedfh(), 'read w/ reused fd'),
926 (lambda: doreadcachedfh(), 'read w/ reused fd'),
928 (lambda: doreadbatch(), 'read batch'),
927 (lambda: doreadbatch(), 'read batch'),
929 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
928 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
930 (lambda: dochunk(), 'chunk'),
929 (lambda: dochunk(), 'chunk'),
931 (lambda: dochunkbatch(), 'chunk batch'),
930 (lambda: dochunkbatch(), 'chunk batch'),
932 (lambda: docompress(), 'compress'),
931 (lambda: docompress(), 'compress'),
933 ]
932 ]
934
933
935 for fn, title in benches:
934 for fn, title in benches:
936 timer, fm = gettimer(ui, opts)
935 timer, fm = gettimer(ui, opts)
937 timer(fn, title=title)
936 timer(fn, title=title)
938 fm.end()
937 fm.end()
939
938
940 @command('perfrevlogrevision', revlogopts + formatteropts +
939 @command('perfrevlogrevision', revlogopts + formatteropts +
941 [('', 'cache', False, 'use caches instead of clearing')],
940 [('', 'cache', False, 'use caches instead of clearing')],
942 '-c|-m|FILE REV')
941 '-c|-m|FILE REV')
943 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
942 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
944 """Benchmark obtaining a revlog revision.
943 """Benchmark obtaining a revlog revision.
945
944
946 Obtaining a revlog revision consists of roughly the following steps:
945 Obtaining a revlog revision consists of roughly the following steps:
947
946
948 1. Compute the delta chain
947 1. Compute the delta chain
949 2. Obtain the raw chunks for that delta chain
948 2. Obtain the raw chunks for that delta chain
950 3. Decompress each raw chunk
949 3. Decompress each raw chunk
951 4. Apply binary patches to obtain fulltext
950 4. Apply binary patches to obtain fulltext
952 5. Verify hash of fulltext
951 5. Verify hash of fulltext
953
952
954 This command measures the time spent in each of these phases.
953 This command measures the time spent in each of these phases.
955 """
954 """
956 if opts.get('changelog') or opts.get('manifest'):
955 if opts.get('changelog') or opts.get('manifest'):
957 file_, rev = None, file_
956 file_, rev = None, file_
958 elif rev is None:
957 elif rev is None:
959 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
958 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
960
959
961 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
960 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
962 node = r.lookup(rev)
961 node = r.lookup(rev)
963 rev = r.rev(node)
962 rev = r.rev(node)
964
963
965 def dodeltachain(rev):
964 def dodeltachain(rev):
966 if not cache:
965 if not cache:
967 r.clearcaches()
966 r.clearcaches()
968 r._deltachain(rev)
967 r._deltachain(rev)
969
968
970 def doread(chain):
969 def doread(chain):
971 if not cache:
970 if not cache:
972 r.clearcaches()
971 r.clearcaches()
973 r._chunkraw(chain[0], chain[-1])
972 r._chunkraw(chain[0], chain[-1])
974
973
975 def dodecompress(data, chain):
974 def dodecompress(data, chain):
976 if not cache:
975 if not cache:
977 r.clearcaches()
976 r.clearcaches()
978
977
979 start = r.start
978 start = r.start
980 length = r.length
979 length = r.length
981 inline = r._inline
980 inline = r._inline
982 iosize = r._io.size
981 iosize = r._io.size
983 buffer = util.buffer
982 buffer = util.buffer
984 offset = start(chain[0])
983 offset = start(chain[0])
985
984
986 for rev in chain:
985 for rev in chain:
987 chunkstart = start(rev)
986 chunkstart = start(rev)
988 if inline:
987 if inline:
989 chunkstart += (rev + 1) * iosize
988 chunkstart += (rev + 1) * iosize
990 chunklength = length(rev)
989 chunklength = length(rev)
991 b = buffer(data, chunkstart - offset, chunklength)
990 b = buffer(data, chunkstart - offset, chunklength)
992 revlog.decompress(b)
991 r.decompress(b)
993
992
994 def dopatch(text, bins):
993 def dopatch(text, bins):
995 if not cache:
994 if not cache:
996 r.clearcaches()
995 r.clearcaches()
997 mdiff.patches(text, bins)
996 mdiff.patches(text, bins)
998
997
999 def dohash(text):
998 def dohash(text):
1000 if not cache:
999 if not cache:
1001 r.clearcaches()
1000 r.clearcaches()
1002 r.checkhash(text, node, rev=rev)
1001 r.checkhash(text, node, rev=rev)
1003
1002
1004 def dorevision():
1003 def dorevision():
1005 if not cache:
1004 if not cache:
1006 r.clearcaches()
1005 r.clearcaches()
1007 r.revision(node)
1006 r.revision(node)
1008
1007
1009 chain = r._deltachain(rev)[0]
1008 chain = r._deltachain(rev)[0]
1010 data = r._chunkraw(chain[0], chain[-1])[1]
1009 data = r._chunkraw(chain[0], chain[-1])[1]
1011 bins = r._chunks(chain)
1010 bins = r._chunks(chain)
1012 text = str(bins[0])
1011 text = str(bins[0])
1013 bins = bins[1:]
1012 bins = bins[1:]
1014 text = mdiff.patches(text, bins)
1013 text = mdiff.patches(text, bins)
1015
1014
1016 benches = [
1015 benches = [
1017 (lambda: dorevision(), 'full'),
1016 (lambda: dorevision(), 'full'),
1018 (lambda: dodeltachain(rev), 'deltachain'),
1017 (lambda: dodeltachain(rev), 'deltachain'),
1019 (lambda: doread(chain), 'read'),
1018 (lambda: doread(chain), 'read'),
1020 (lambda: dodecompress(data, chain), 'decompress'),
1019 (lambda: dodecompress(data, chain), 'decompress'),
1021 (lambda: dopatch(text, bins), 'patch'),
1020 (lambda: dopatch(text, bins), 'patch'),
1022 (lambda: dohash(text), 'hash'),
1021 (lambda: dohash(text), 'hash'),
1023 ]
1022 ]
1024
1023
1025 for fn, title in benches:
1024 for fn, title in benches:
1026 timer, fm = gettimer(ui, opts)
1025 timer, fm = gettimer(ui, opts)
1027 timer(fn, title=title)
1026 timer(fn, title=title)
1028 fm.end()
1027 fm.end()
1029
1028
1030 @command('perfrevset',
1029 @command('perfrevset',
1031 [('C', 'clear', False, 'clear volatile cache between each call.'),
1030 [('C', 'clear', False, 'clear volatile cache between each call.'),
1032 ('', 'contexts', False, 'obtain changectx for each revision')]
1031 ('', 'contexts', False, 'obtain changectx for each revision')]
1033 + formatteropts, "REVSET")
1032 + formatteropts, "REVSET")
1034 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1033 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1035 """benchmark the execution time of a revset
1034 """benchmark the execution time of a revset
1036
1035
1037 Use the --clean option if need to evaluate the impact of build volatile
1036 Use the --clean option if need to evaluate the impact of build volatile
1038 revisions set cache on the revset execution. Volatile cache hold filtered
1037 revisions set cache on the revset execution. Volatile cache hold filtered
1039 and obsolete related cache."""
1038 and obsolete related cache."""
1040 timer, fm = gettimer(ui, opts)
1039 timer, fm = gettimer(ui, opts)
1041 def d():
1040 def d():
1042 if clear:
1041 if clear:
1043 repo.invalidatevolatilesets()
1042 repo.invalidatevolatilesets()
1044 if contexts:
1043 if contexts:
1045 for ctx in repo.set(expr): pass
1044 for ctx in repo.set(expr): pass
1046 else:
1045 else:
1047 for r in repo.revs(expr): pass
1046 for r in repo.revs(expr): pass
1048 timer(d)
1047 timer(d)
1049 fm.end()
1048 fm.end()
1050
1049
1051 @command('perfvolatilesets', formatteropts)
1050 @command('perfvolatilesets', formatteropts)
1052 def perfvolatilesets(ui, repo, *names, **opts):
1051 def perfvolatilesets(ui, repo, *names, **opts):
1053 """benchmark the computation of various volatile set
1052 """benchmark the computation of various volatile set
1054
1053
1055 Volatile set computes element related to filtering and obsolescence."""
1054 Volatile set computes element related to filtering and obsolescence."""
1056 timer, fm = gettimer(ui, opts)
1055 timer, fm = gettimer(ui, opts)
1057 repo = repo.unfiltered()
1056 repo = repo.unfiltered()
1058
1057
1059 def getobs(name):
1058 def getobs(name):
1060 def d():
1059 def d():
1061 repo.invalidatevolatilesets()
1060 repo.invalidatevolatilesets()
1062 obsolete.getrevs(repo, name)
1061 obsolete.getrevs(repo, name)
1063 return d
1062 return d
1064
1063
1065 allobs = sorted(obsolete.cachefuncs)
1064 allobs = sorted(obsolete.cachefuncs)
1066 if names:
1065 if names:
1067 allobs = [n for n in allobs if n in names]
1066 allobs = [n for n in allobs if n in names]
1068
1067
1069 for name in allobs:
1068 for name in allobs:
1070 timer(getobs(name), title=name)
1069 timer(getobs(name), title=name)
1071
1070
1072 def getfiltered(name):
1071 def getfiltered(name):
1073 def d():
1072 def d():
1074 repo.invalidatevolatilesets()
1073 repo.invalidatevolatilesets()
1075 repoview.filterrevs(repo, name)
1074 repoview.filterrevs(repo, name)
1076 return d
1075 return d
1077
1076
1078 allfilter = sorted(repoview.filtertable)
1077 allfilter = sorted(repoview.filtertable)
1079 if names:
1078 if names:
1080 allfilter = [n for n in allfilter if n in names]
1079 allfilter = [n for n in allfilter if n in names]
1081
1080
1082 for name in allfilter:
1081 for name in allfilter:
1083 timer(getfiltered(name), title=name)
1082 timer(getfiltered(name), title=name)
1084 fm.end()
1083 fm.end()
1085
1084
1086 @command('perfbranchmap',
1085 @command('perfbranchmap',
1087 [('f', 'full', False,
1086 [('f', 'full', False,
1088 'Includes build time of subset'),
1087 'Includes build time of subset'),
1089 ] + formatteropts)
1088 ] + formatteropts)
1090 def perfbranchmap(ui, repo, full=False, **opts):
1089 def perfbranchmap(ui, repo, full=False, **opts):
1091 """benchmark the update of a branchmap
1090 """benchmark the update of a branchmap
1092
1091
1093 This benchmarks the full repo.branchmap() call with read and write disabled
1092 This benchmarks the full repo.branchmap() call with read and write disabled
1094 """
1093 """
1095 timer, fm = gettimer(ui, opts)
1094 timer, fm = gettimer(ui, opts)
1096 def getbranchmap(filtername):
1095 def getbranchmap(filtername):
1097 """generate a benchmark function for the filtername"""
1096 """generate a benchmark function for the filtername"""
1098 if filtername is None:
1097 if filtername is None:
1099 view = repo
1098 view = repo
1100 else:
1099 else:
1101 view = repo.filtered(filtername)
1100 view = repo.filtered(filtername)
1102 def d():
1101 def d():
1103 if full:
1102 if full:
1104 view._branchcaches.clear()
1103 view._branchcaches.clear()
1105 else:
1104 else:
1106 view._branchcaches.pop(filtername, None)
1105 view._branchcaches.pop(filtername, None)
1107 view.branchmap()
1106 view.branchmap()
1108 return d
1107 return d
1109 # add filter in smaller subset to bigger subset
1108 # add filter in smaller subset to bigger subset
1110 possiblefilters = set(repoview.filtertable)
1109 possiblefilters = set(repoview.filtertable)
1111 subsettable = getbranchmapsubsettable()
1110 subsettable = getbranchmapsubsettable()
1112 allfilters = []
1111 allfilters = []
1113 while possiblefilters:
1112 while possiblefilters:
1114 for name in possiblefilters:
1113 for name in possiblefilters:
1115 subset = subsettable.get(name)
1114 subset = subsettable.get(name)
1116 if subset not in possiblefilters:
1115 if subset not in possiblefilters:
1117 break
1116 break
1118 else:
1117 else:
1119 assert False, 'subset cycle %s!' % possiblefilters
1118 assert False, 'subset cycle %s!' % possiblefilters
1120 allfilters.append(name)
1119 allfilters.append(name)
1121 possiblefilters.remove(name)
1120 possiblefilters.remove(name)
1122
1121
1123 # warm the cache
1122 # warm the cache
1124 if not full:
1123 if not full:
1125 for name in allfilters:
1124 for name in allfilters:
1126 repo.filtered(name).branchmap()
1125 repo.filtered(name).branchmap()
1127 # add unfiltered
1126 # add unfiltered
1128 allfilters.append(None)
1127 allfilters.append(None)
1129
1128
1130 branchcacheread = safeattrsetter(branchmap, 'read')
1129 branchcacheread = safeattrsetter(branchmap, 'read')
1131 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1130 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1132 branchcacheread.set(lambda repo: None)
1131 branchcacheread.set(lambda repo: None)
1133 branchcachewrite.set(lambda bc, repo: None)
1132 branchcachewrite.set(lambda bc, repo: None)
1134 try:
1133 try:
1135 for name in allfilters:
1134 for name in allfilters:
1136 timer(getbranchmap(name), title=str(name))
1135 timer(getbranchmap(name), title=str(name))
1137 finally:
1136 finally:
1138 branchcacheread.restore()
1137 branchcacheread.restore()
1139 branchcachewrite.restore()
1138 branchcachewrite.restore()
1140 fm.end()
1139 fm.end()
1141
1140
1142 @command('perfloadmarkers')
1141 @command('perfloadmarkers')
1143 def perfloadmarkers(ui, repo):
1142 def perfloadmarkers(ui, repo):
1144 """benchmark the time to parse the on-disk markers for a repo
1143 """benchmark the time to parse the on-disk markers for a repo
1145
1144
1146 Result is the number of markers in the repo."""
1145 Result is the number of markers in the repo."""
1147 timer, fm = gettimer(ui)
1146 timer, fm = gettimer(ui)
1148 svfs = getsvfs(repo)
1147 svfs = getsvfs(repo)
1149 timer(lambda: len(obsolete.obsstore(svfs)))
1148 timer(lambda: len(obsolete.obsstore(svfs)))
1150 fm.end()
1149 fm.end()
1151
1150
1152 @command('perflrucachedict', formatteropts +
1151 @command('perflrucachedict', formatteropts +
1153 [('', 'size', 4, 'size of cache'),
1152 [('', 'size', 4, 'size of cache'),
1154 ('', 'gets', 10000, 'number of key lookups'),
1153 ('', 'gets', 10000, 'number of key lookups'),
1155 ('', 'sets', 10000, 'number of key sets'),
1154 ('', 'sets', 10000, 'number of key sets'),
1156 ('', 'mixed', 10000, 'number of mixed mode operations'),
1155 ('', 'mixed', 10000, 'number of mixed mode operations'),
1157 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1156 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1158 norepo=True)
1157 norepo=True)
1159 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1158 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1160 mixedgetfreq=50, **opts):
1159 mixedgetfreq=50, **opts):
1161 def doinit():
1160 def doinit():
1162 for i in xrange(10000):
1161 for i in xrange(10000):
1163 util.lrucachedict(size)
1162 util.lrucachedict(size)
1164
1163
1165 values = []
1164 values = []
1166 for i in xrange(size):
1165 for i in xrange(size):
1167 values.append(random.randint(0, sys.maxint))
1166 values.append(random.randint(0, sys.maxint))
1168
1167
1169 # Get mode fills the cache and tests raw lookup performance with no
1168 # Get mode fills the cache and tests raw lookup performance with no
1170 # eviction.
1169 # eviction.
1171 getseq = []
1170 getseq = []
1172 for i in xrange(gets):
1171 for i in xrange(gets):
1173 getseq.append(random.choice(values))
1172 getseq.append(random.choice(values))
1174
1173
1175 def dogets():
1174 def dogets():
1176 d = util.lrucachedict(size)
1175 d = util.lrucachedict(size)
1177 for v in values:
1176 for v in values:
1178 d[v] = v
1177 d[v] = v
1179 for key in getseq:
1178 for key in getseq:
1180 value = d[key]
1179 value = d[key]
1181 value # silence pyflakes warning
1180 value # silence pyflakes warning
1182
1181
1183 # Set mode tests insertion speed with cache eviction.
1182 # Set mode tests insertion speed with cache eviction.
1184 setseq = []
1183 setseq = []
1185 for i in xrange(sets):
1184 for i in xrange(sets):
1186 setseq.append(random.randint(0, sys.maxint))
1185 setseq.append(random.randint(0, sys.maxint))
1187
1186
1188 def dosets():
1187 def dosets():
1189 d = util.lrucachedict(size)
1188 d = util.lrucachedict(size)
1190 for v in setseq:
1189 for v in setseq:
1191 d[v] = v
1190 d[v] = v
1192
1191
1193 # Mixed mode randomly performs gets and sets with eviction.
1192 # Mixed mode randomly performs gets and sets with eviction.
1194 mixedops = []
1193 mixedops = []
1195 for i in xrange(mixed):
1194 for i in xrange(mixed):
1196 r = random.randint(0, 100)
1195 r = random.randint(0, 100)
1197 if r < mixedgetfreq:
1196 if r < mixedgetfreq:
1198 op = 0
1197 op = 0
1199 else:
1198 else:
1200 op = 1
1199 op = 1
1201
1200
1202 mixedops.append((op, random.randint(0, size * 2)))
1201 mixedops.append((op, random.randint(0, size * 2)))
1203
1202
1204 def domixed():
1203 def domixed():
1205 d = util.lrucachedict(size)
1204 d = util.lrucachedict(size)
1206
1205
1207 for op, v in mixedops:
1206 for op, v in mixedops:
1208 if op == 0:
1207 if op == 0:
1209 try:
1208 try:
1210 d[v]
1209 d[v]
1211 except KeyError:
1210 except KeyError:
1212 pass
1211 pass
1213 else:
1212 else:
1214 d[v] = v
1213 d[v] = v
1215
1214
1216 benches = [
1215 benches = [
1217 (doinit, 'init'),
1216 (doinit, 'init'),
1218 (dogets, 'gets'),
1217 (dogets, 'gets'),
1219 (dosets, 'sets'),
1218 (dosets, 'sets'),
1220 (domixed, 'mixed')
1219 (domixed, 'mixed')
1221 ]
1220 ]
1222
1221
1223 for fn, title in benches:
1222 for fn, title in benches:
1224 timer, fm = gettimer(ui, opts)
1223 timer, fm = gettimer(ui, opts)
1225 timer(fn, title=title)
1224 timer(fn, title=title)
1226 fm.end()
1225 fm.end()
1227
1226
1228 def uisetup(ui):
1227 def uisetup(ui):
1229 if (util.safehasattr(cmdutil, 'openrevlog') and
1228 if (util.safehasattr(cmdutil, 'openrevlog') and
1230 not util.safehasattr(commands, 'debugrevlogopts')):
1229 not util.safehasattr(commands, 'debugrevlogopts')):
1231 # for "historical portability":
1230 # for "historical portability":
1232 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1231 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1233 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1232 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1234 # openrevlog() should cause failure, because it has been
1233 # openrevlog() should cause failure, because it has been
1235 # available since 3.5 (or 49c583ca48c4).
1234 # available since 3.5 (or 49c583ca48c4).
1236 def openrevlog(orig, repo, cmd, file_, opts):
1235 def openrevlog(orig, repo, cmd, file_, opts):
1237 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1236 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1238 raise error.Abort("This version doesn't support --dir option",
1237 raise error.Abort("This version doesn't support --dir option",
1239 hint="use 3.5 or later")
1238 hint="use 3.5 or later")
1240 return orig(repo, cmd, file_, opts)
1239 return orig(repo, cmd, file_, opts)
1241 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
1240 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,2064 +1,2069 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import hashlib
18 import hashlib
19 import os
19 import os
20 import struct
20 import struct
21 import zlib
21 import zlib
22
22
23 # import stuff from node for others to import from revlog
23 # import stuff from node for others to import from revlog
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullid,
27 nullid,
28 nullrev,
28 nullrev,
29 )
29 )
30 from .i18n import _
30 from .i18n import _
31 from . import (
31 from . import (
32 ancestor,
32 ancestor,
33 error,
33 error,
34 mdiff,
34 mdiff,
35 parsers,
35 parsers,
36 templatefilters,
36 templatefilters,
37 util,
37 util,
38 )
38 )
39
39
40 _pack = struct.pack
40 _pack = struct.pack
41 _unpack = struct.unpack
41 _unpack = struct.unpack
42 _compress = zlib.compress
42 _compress = zlib.compress
43 _decompress = zlib.decompress
43 _decompress = zlib.decompress
44
44
45 # revlog header flags
45 # revlog header flags
46 REVLOGV0 = 0
46 REVLOGV0 = 0
47 REVLOGNG = 1
47 REVLOGNG = 1
48 REVLOGNGINLINEDATA = (1 << 16)
48 REVLOGNGINLINEDATA = (1 << 16)
49 REVLOGGENERALDELTA = (1 << 17)
49 REVLOGGENERALDELTA = (1 << 17)
50 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
50 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
51 REVLOG_DEFAULT_FORMAT = REVLOGNG
51 REVLOG_DEFAULT_FORMAT = REVLOGNG
52 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
52 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
53 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
53 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
54
54
55 # revlog index flags
55 # revlog index flags
56 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
56 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
57 REVIDX_EXTSTORED = (1 << 14) # revision data is stored externally
57 REVIDX_EXTSTORED = (1 << 14) # revision data is stored externally
58 REVIDX_DEFAULT_FLAGS = 0
58 REVIDX_DEFAULT_FLAGS = 0
59 # stable order in which flags need to be processed and their processors applied
59 # stable order in which flags need to be processed and their processors applied
60 REVIDX_FLAGS_ORDER = [
60 REVIDX_FLAGS_ORDER = [
61 REVIDX_ISCENSORED,
61 REVIDX_ISCENSORED,
62 REVIDX_EXTSTORED,
62 REVIDX_EXTSTORED,
63 ]
63 ]
64 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
64 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
65
65
66 # max size of revlog with inline data
66 # max size of revlog with inline data
67 _maxinline = 131072
67 _maxinline = 131072
68 _chunksize = 1048576
68 _chunksize = 1048576
69
69
70 RevlogError = error.RevlogError
70 RevlogError = error.RevlogError
71 LookupError = error.LookupError
71 LookupError = error.LookupError
72 CensoredNodeError = error.CensoredNodeError
72 CensoredNodeError = error.CensoredNodeError
73 ProgrammingError = error.ProgrammingError
73 ProgrammingError = error.ProgrammingError
74
74
75 # Store flag processors (cf. 'addflagprocessor()' to register)
75 # Store flag processors (cf. 'addflagprocessor()' to register)
76 _flagprocessors = {
76 _flagprocessors = {
77 REVIDX_ISCENSORED: None,
77 REVIDX_ISCENSORED: None,
78 }
78 }
79
79
80 def addflagprocessor(flag, processor):
80 def addflagprocessor(flag, processor):
81 """Register a flag processor on a revision data flag.
81 """Register a flag processor on a revision data flag.
82
82
83 Invariant:
83 Invariant:
84 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
84 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
85 - Only one flag processor can be registered on a specific flag.
85 - Only one flag processor can be registered on a specific flag.
86 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
86 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
87 following signatures:
87 following signatures:
88 - (read) f(self, text) -> newtext, bool
88 - (read) f(self, text) -> newtext, bool
89 - (write) f(self, text) -> newtext, bool
89 - (write) f(self, text) -> newtext, bool
90 - (raw) f(self, text) -> bool
90 - (raw) f(self, text) -> bool
91 The boolean returned by these transforms is used to determine whether
91 The boolean returned by these transforms is used to determine whether
92 'newtext' can be used for hash integrity checking.
92 'newtext' can be used for hash integrity checking.
93
93
94 Note: The 'raw' transform is used for changegroup generation and in some
94 Note: The 'raw' transform is used for changegroup generation and in some
95 debug commands. In this case the transform only indicates whether the
95 debug commands. In this case the transform only indicates whether the
96 contents can be used for hash integrity checks.
96 contents can be used for hash integrity checks.
97 """
97 """
98 if not flag & REVIDX_KNOWN_FLAGS:
98 if not flag & REVIDX_KNOWN_FLAGS:
99 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
99 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
100 raise ProgrammingError(msg)
100 raise ProgrammingError(msg)
101 if flag not in REVIDX_FLAGS_ORDER:
101 if flag not in REVIDX_FLAGS_ORDER:
102 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
102 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
103 raise ProgrammingError(msg)
103 raise ProgrammingError(msg)
104 if flag in _flagprocessors:
104 if flag in _flagprocessors:
105 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
105 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
106 raise error.Abort(msg)
106 raise error.Abort(msg)
107 _flagprocessors[flag] = processor
107 _flagprocessors[flag] = processor
108
108
109 def getoffset(q):
109 def getoffset(q):
110 return int(q >> 16)
110 return int(q >> 16)
111
111
112 def gettype(q):
112 def gettype(q):
113 return int(q & 0xFFFF)
113 return int(q & 0xFFFF)
114
114
115 def offset_type(offset, type):
115 def offset_type(offset, type):
116 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
116 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
117 raise ValueError('unknown revlog index flags')
117 raise ValueError('unknown revlog index flags')
118 return long(long(offset) << 16 | type)
118 return long(long(offset) << 16 | type)
119
119
120 _nullhash = hashlib.sha1(nullid)
120 _nullhash = hashlib.sha1(nullid)
121
121
122 def hash(text, p1, p2):
122 def hash(text, p1, p2):
123 """generate a hash from the given text and its parent hashes
123 """generate a hash from the given text and its parent hashes
124
124
125 This hash combines both the current file contents and its history
125 This hash combines both the current file contents and its history
126 in a manner that makes it easy to distinguish nodes with the same
126 in a manner that makes it easy to distinguish nodes with the same
127 content in the revision graph.
127 content in the revision graph.
128 """
128 """
129 # As of now, if one of the parent node is null, p2 is null
129 # As of now, if one of the parent node is null, p2 is null
130 if p2 == nullid:
130 if p2 == nullid:
131 # deep copy of a hash is faster than creating one
131 # deep copy of a hash is faster than creating one
132 s = _nullhash.copy()
132 s = _nullhash.copy()
133 s.update(p1)
133 s.update(p1)
134 else:
134 else:
135 # none of the parent nodes are nullid
135 # none of the parent nodes are nullid
136 l = [p1, p2]
136 l = [p1, p2]
137 l.sort()
137 l.sort()
138 s = hashlib.sha1(l[0])
138 s = hashlib.sha1(l[0])
139 s.update(l[1])
139 s.update(l[1])
140 s.update(text)
140 s.update(text)
141 return s.digest()
141 return s.digest()
142
142
143 def decompress(bin):
144 """ decompress the given input """
145 if not bin:
146 return bin
147 t = bin[0]
148 if t == '\0':
149 return bin
150 if t == 'x':
151 try:
152 return _decompress(bin)
153 except zlib.error as e:
154 raise RevlogError(_("revlog decompress error: %s") % str(e))
155 if t == 'u':
156 return util.buffer(bin, 1)
157 raise RevlogError(_("unknown compression type %r") % t)
158
159 # index v0:
143 # index v0:
160 # 4 bytes: offset
144 # 4 bytes: offset
161 # 4 bytes: compressed length
145 # 4 bytes: compressed length
162 # 4 bytes: base rev
146 # 4 bytes: base rev
163 # 4 bytes: link rev
147 # 4 bytes: link rev
164 # 20 bytes: parent 1 nodeid
148 # 20 bytes: parent 1 nodeid
165 # 20 bytes: parent 2 nodeid
149 # 20 bytes: parent 2 nodeid
166 # 20 bytes: nodeid
150 # 20 bytes: nodeid
167 indexformatv0 = ">4l20s20s20s"
151 indexformatv0 = ">4l20s20s20s"
168
152
169 class revlogoldio(object):
153 class revlogoldio(object):
170 def __init__(self):
154 def __init__(self):
171 self.size = struct.calcsize(indexformatv0)
155 self.size = struct.calcsize(indexformatv0)
172
156
173 def parseindex(self, data, inline):
157 def parseindex(self, data, inline):
174 s = self.size
158 s = self.size
175 index = []
159 index = []
176 nodemap = {nullid: nullrev}
160 nodemap = {nullid: nullrev}
177 n = off = 0
161 n = off = 0
178 l = len(data)
162 l = len(data)
179 while off + s <= l:
163 while off + s <= l:
180 cur = data[off:off + s]
164 cur = data[off:off + s]
181 off += s
165 off += s
182 e = _unpack(indexformatv0, cur)
166 e = _unpack(indexformatv0, cur)
183 # transform to revlogv1 format
167 # transform to revlogv1 format
184 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
168 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
185 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
169 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
186 index.append(e2)
170 index.append(e2)
187 nodemap[e[6]] = n
171 nodemap[e[6]] = n
188 n += 1
172 n += 1
189
173
190 # add the magic null revision at -1
174 # add the magic null revision at -1
191 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
175 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
192
176
193 return index, nodemap, None
177 return index, nodemap, None
194
178
195 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
196 if gettype(entry[0]):
180 if gettype(entry[0]):
197 raise RevlogError(_("index entry flags need RevlogNG"))
181 raise RevlogError(_("index entry flags need RevlogNG"))
198 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
182 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
199 node(entry[5]), node(entry[6]), entry[7])
183 node(entry[5]), node(entry[6]), entry[7])
200 return _pack(indexformatv0, *e2)
184 return _pack(indexformatv0, *e2)
201
185
202 # index ng:
186 # index ng:
203 # 6 bytes: offset
187 # 6 bytes: offset
204 # 2 bytes: flags
188 # 2 bytes: flags
205 # 4 bytes: compressed length
189 # 4 bytes: compressed length
206 # 4 bytes: uncompressed length
190 # 4 bytes: uncompressed length
207 # 4 bytes: base rev
191 # 4 bytes: base rev
208 # 4 bytes: link rev
192 # 4 bytes: link rev
209 # 4 bytes: parent 1 rev
193 # 4 bytes: parent 1 rev
210 # 4 bytes: parent 2 rev
194 # 4 bytes: parent 2 rev
211 # 32 bytes: nodeid
195 # 32 bytes: nodeid
212 indexformatng = ">Qiiiiii20s12x"
196 indexformatng = ">Qiiiiii20s12x"
213 versionformat = ">I"
197 versionformat = ">I"
214
198
215 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
199 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
216 # signed integer)
200 # signed integer)
217 _maxentrysize = 0x7fffffff
201 _maxentrysize = 0x7fffffff
218
202
219 class revlogio(object):
203 class revlogio(object):
220 def __init__(self):
204 def __init__(self):
221 self.size = struct.calcsize(indexformatng)
205 self.size = struct.calcsize(indexformatng)
222
206
223 def parseindex(self, data, inline):
207 def parseindex(self, data, inline):
224 # call the C implementation to parse the index data
208 # call the C implementation to parse the index data
225 index, cache = parsers.parse_index2(data, inline)
209 index, cache = parsers.parse_index2(data, inline)
226 return index, getattr(index, 'nodemap', None), cache
210 return index, getattr(index, 'nodemap', None), cache
227
211
228 def packentry(self, entry, node, version, rev):
212 def packentry(self, entry, node, version, rev):
229 p = _pack(indexformatng, *entry)
213 p = _pack(indexformatng, *entry)
230 if rev == 0:
214 if rev == 0:
231 p = _pack(versionformat, version) + p[4:]
215 p = _pack(versionformat, version) + p[4:]
232 return p
216 return p
233
217
234 class revlog(object):
218 class revlog(object):
235 """
219 """
236 the underlying revision storage object
220 the underlying revision storage object
237
221
238 A revlog consists of two parts, an index and the revision data.
222 A revlog consists of two parts, an index and the revision data.
239
223
240 The index is a file with a fixed record size containing
224 The index is a file with a fixed record size containing
241 information on each revision, including its nodeid (hash), the
225 information on each revision, including its nodeid (hash), the
242 nodeids of its parents, the position and offset of its data within
226 nodeids of its parents, the position and offset of its data within
243 the data file, and the revision it's based on. Finally, each entry
227 the data file, and the revision it's based on. Finally, each entry
244 contains a linkrev entry that can serve as a pointer to external
228 contains a linkrev entry that can serve as a pointer to external
245 data.
229 data.
246
230
247 The revision data itself is a linear collection of data chunks.
231 The revision data itself is a linear collection of data chunks.
248 Each chunk represents a revision and is usually represented as a
232 Each chunk represents a revision and is usually represented as a
249 delta against the previous chunk. To bound lookup time, runs of
233 delta against the previous chunk. To bound lookup time, runs of
250 deltas are limited to about 2 times the length of the original
234 deltas are limited to about 2 times the length of the original
251 version data. This makes retrieval of a version proportional to
235 version data. This makes retrieval of a version proportional to
252 its size, or O(1) relative to the number of revisions.
236 its size, or O(1) relative to the number of revisions.
253
237
254 Both pieces of the revlog are written to in an append-only
238 Both pieces of the revlog are written to in an append-only
255 fashion, which means we never need to rewrite a file to insert or
239 fashion, which means we never need to rewrite a file to insert or
256 remove data, and can use some simple techniques to avoid the need
240 remove data, and can use some simple techniques to avoid the need
257 for locking while reading.
241 for locking while reading.
258
242
259 If checkambig, indexfile is opened with checkambig=True at
243 If checkambig, indexfile is opened with checkambig=True at
260 writing, to avoid file stat ambiguity.
244 writing, to avoid file stat ambiguity.
261 """
245 """
262 def __init__(self, opener, indexfile, checkambig=False):
246 def __init__(self, opener, indexfile, checkambig=False):
263 """
247 """
264 create a revlog object
248 create a revlog object
265
249
266 opener is a function that abstracts the file opening operation
250 opener is a function that abstracts the file opening operation
267 and can be used to implement COW semantics or the like.
251 and can be used to implement COW semantics or the like.
268 """
252 """
269 self.indexfile = indexfile
253 self.indexfile = indexfile
270 self.datafile = indexfile[:-2] + ".d"
254 self.datafile = indexfile[:-2] + ".d"
271 self.opener = opener
255 self.opener = opener
272 # When True, indexfile is opened with checkambig=True at writing, to
256 # When True, indexfile is opened with checkambig=True at writing, to
273 # avoid file stat ambiguity.
257 # avoid file stat ambiguity.
274 self._checkambig = checkambig
258 self._checkambig = checkambig
275 # 3-tuple of (node, rev, text) for a raw revision.
259 # 3-tuple of (node, rev, text) for a raw revision.
276 self._cache = None
260 self._cache = None
277 # Maps rev to chain base rev.
261 # Maps rev to chain base rev.
278 self._chainbasecache = util.lrucachedict(100)
262 self._chainbasecache = util.lrucachedict(100)
279 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
263 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
280 self._chunkcache = (0, '')
264 self._chunkcache = (0, '')
281 # How much data to read and cache into the raw revlog data cache.
265 # How much data to read and cache into the raw revlog data cache.
282 self._chunkcachesize = 65536
266 self._chunkcachesize = 65536
283 self._maxchainlen = None
267 self._maxchainlen = None
284 self._aggressivemergedeltas = False
268 self._aggressivemergedeltas = False
285 self.index = []
269 self.index = []
286 # Mapping of partial identifiers to full nodes.
270 # Mapping of partial identifiers to full nodes.
287 self._pcache = {}
271 self._pcache = {}
288 # Mapping of revision integer to full node.
272 # Mapping of revision integer to full node.
289 self._nodecache = {nullid: nullrev}
273 self._nodecache = {nullid: nullrev}
290 self._nodepos = None
274 self._nodepos = None
291
275
292 v = REVLOG_DEFAULT_VERSION
276 v = REVLOG_DEFAULT_VERSION
293 opts = getattr(opener, 'options', None)
277 opts = getattr(opener, 'options', None)
294 if opts is not None:
278 if opts is not None:
295 if 'revlogv1' in opts:
279 if 'revlogv1' in opts:
296 if 'generaldelta' in opts:
280 if 'generaldelta' in opts:
297 v |= REVLOGGENERALDELTA
281 v |= REVLOGGENERALDELTA
298 else:
282 else:
299 v = 0
283 v = 0
300 if 'chunkcachesize' in opts:
284 if 'chunkcachesize' in opts:
301 self._chunkcachesize = opts['chunkcachesize']
285 self._chunkcachesize = opts['chunkcachesize']
302 if 'maxchainlen' in opts:
286 if 'maxchainlen' in opts:
303 self._maxchainlen = opts['maxchainlen']
287 self._maxchainlen = opts['maxchainlen']
304 if 'aggressivemergedeltas' in opts:
288 if 'aggressivemergedeltas' in opts:
305 self._aggressivemergedeltas = opts['aggressivemergedeltas']
289 self._aggressivemergedeltas = opts['aggressivemergedeltas']
306 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
290 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
307
291
308 if self._chunkcachesize <= 0:
292 if self._chunkcachesize <= 0:
309 raise RevlogError(_('revlog chunk cache size %r is not greater '
293 raise RevlogError(_('revlog chunk cache size %r is not greater '
310 'than 0') % self._chunkcachesize)
294 'than 0') % self._chunkcachesize)
311 elif self._chunkcachesize & (self._chunkcachesize - 1):
295 elif self._chunkcachesize & (self._chunkcachesize - 1):
312 raise RevlogError(_('revlog chunk cache size %r is not a power '
296 raise RevlogError(_('revlog chunk cache size %r is not a power '
313 'of 2') % self._chunkcachesize)
297 'of 2') % self._chunkcachesize)
314
298
315 indexdata = ''
299 indexdata = ''
316 self._initempty = True
300 self._initempty = True
317 try:
301 try:
318 f = self.opener(self.indexfile)
302 f = self.opener(self.indexfile)
319 indexdata = f.read()
303 indexdata = f.read()
320 f.close()
304 f.close()
321 if len(indexdata) > 0:
305 if len(indexdata) > 0:
322 v = struct.unpack(versionformat, indexdata[:4])[0]
306 v = struct.unpack(versionformat, indexdata[:4])[0]
323 self._initempty = False
307 self._initempty = False
324 except IOError as inst:
308 except IOError as inst:
325 if inst.errno != errno.ENOENT:
309 if inst.errno != errno.ENOENT:
326 raise
310 raise
327
311
328 self.version = v
312 self.version = v
329 self._inline = v & REVLOGNGINLINEDATA
313 self._inline = v & REVLOGNGINLINEDATA
330 self._generaldelta = v & REVLOGGENERALDELTA
314 self._generaldelta = v & REVLOGGENERALDELTA
331 flags = v & ~0xFFFF
315 flags = v & ~0xFFFF
332 fmt = v & 0xFFFF
316 fmt = v & 0xFFFF
333 if fmt == REVLOGV0 and flags:
317 if fmt == REVLOGV0 and flags:
334 raise RevlogError(_("index %s unknown flags %#04x for format v0")
318 raise RevlogError(_("index %s unknown flags %#04x for format v0")
335 % (self.indexfile, flags >> 16))
319 % (self.indexfile, flags >> 16))
336 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
320 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
337 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
321 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
338 % (self.indexfile, flags >> 16))
322 % (self.indexfile, flags >> 16))
339 elif fmt > REVLOGNG:
323 elif fmt > REVLOGNG:
340 raise RevlogError(_("index %s unknown format %d")
324 raise RevlogError(_("index %s unknown format %d")
341 % (self.indexfile, fmt))
325 % (self.indexfile, fmt))
342
326
343 self.storedeltachains = True
327 self.storedeltachains = True
344
328
345 self._io = revlogio()
329 self._io = revlogio()
346 if self.version == REVLOGV0:
330 if self.version == REVLOGV0:
347 self._io = revlogoldio()
331 self._io = revlogoldio()
348 try:
332 try:
349 d = self._io.parseindex(indexdata, self._inline)
333 d = self._io.parseindex(indexdata, self._inline)
350 except (ValueError, IndexError):
334 except (ValueError, IndexError):
351 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
335 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
352 self.index, nodemap, self._chunkcache = d
336 self.index, nodemap, self._chunkcache = d
353 if nodemap is not None:
337 if nodemap is not None:
354 self.nodemap = self._nodecache = nodemap
338 self.nodemap = self._nodecache = nodemap
355 if not self._chunkcache:
339 if not self._chunkcache:
356 self._chunkclear()
340 self._chunkclear()
357 # revnum -> (chain-length, sum-delta-length)
341 # revnum -> (chain-length, sum-delta-length)
358 self._chaininfocache = {}
342 self._chaininfocache = {}
359
343
360 def tip(self):
344 def tip(self):
361 return self.node(len(self.index) - 2)
345 return self.node(len(self.index) - 2)
362 def __contains__(self, rev):
346 def __contains__(self, rev):
363 return 0 <= rev < len(self)
347 return 0 <= rev < len(self)
364 def __len__(self):
348 def __len__(self):
365 return len(self.index) - 1
349 return len(self.index) - 1
366 def __iter__(self):
350 def __iter__(self):
367 return iter(xrange(len(self)))
351 return iter(xrange(len(self)))
368 def revs(self, start=0, stop=None):
352 def revs(self, start=0, stop=None):
369 """iterate over all rev in this revlog (from start to stop)"""
353 """iterate over all rev in this revlog (from start to stop)"""
370 step = 1
354 step = 1
371 if stop is not None:
355 if stop is not None:
372 if start > stop:
356 if start > stop:
373 step = -1
357 step = -1
374 stop += step
358 stop += step
375 else:
359 else:
376 stop = len(self)
360 stop = len(self)
377 return xrange(start, stop, step)
361 return xrange(start, stop, step)
378
362
379 @util.propertycache
363 @util.propertycache
380 def nodemap(self):
364 def nodemap(self):
381 self.rev(self.node(0))
365 self.rev(self.node(0))
382 return self._nodecache
366 return self._nodecache
383
367
384 def hasnode(self, node):
368 def hasnode(self, node):
385 try:
369 try:
386 self.rev(node)
370 self.rev(node)
387 return True
371 return True
388 except KeyError:
372 except KeyError:
389 return False
373 return False
390
374
391 def clearcaches(self):
375 def clearcaches(self):
392 self._cache = None
376 self._cache = None
393 self._chainbasecache.clear()
377 self._chainbasecache.clear()
394 self._chunkcache = (0, '')
378 self._chunkcache = (0, '')
395 self._pcache = {}
379 self._pcache = {}
396
380
397 try:
381 try:
398 self._nodecache.clearcaches()
382 self._nodecache.clearcaches()
399 except AttributeError:
383 except AttributeError:
400 self._nodecache = {nullid: nullrev}
384 self._nodecache = {nullid: nullrev}
401 self._nodepos = None
385 self._nodepos = None
402
386
403 def rev(self, node):
387 def rev(self, node):
404 try:
388 try:
405 return self._nodecache[node]
389 return self._nodecache[node]
406 except TypeError:
390 except TypeError:
407 raise
391 raise
408 except RevlogError:
392 except RevlogError:
409 # parsers.c radix tree lookup failed
393 # parsers.c radix tree lookup failed
410 raise LookupError(node, self.indexfile, _('no node'))
394 raise LookupError(node, self.indexfile, _('no node'))
411 except KeyError:
395 except KeyError:
412 # pure python cache lookup failed
396 # pure python cache lookup failed
413 n = self._nodecache
397 n = self._nodecache
414 i = self.index
398 i = self.index
415 p = self._nodepos
399 p = self._nodepos
416 if p is None:
400 if p is None:
417 p = len(i) - 2
401 p = len(i) - 2
418 for r in xrange(p, -1, -1):
402 for r in xrange(p, -1, -1):
419 v = i[r][7]
403 v = i[r][7]
420 n[v] = r
404 n[v] = r
421 if v == node:
405 if v == node:
422 self._nodepos = r - 1
406 self._nodepos = r - 1
423 return r
407 return r
424 raise LookupError(node, self.indexfile, _('no node'))
408 raise LookupError(node, self.indexfile, _('no node'))
425
409
426 # Accessors for index entries.
410 # Accessors for index entries.
427
411
428 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
412 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
429 # are flags.
413 # are flags.
430 def start(self, rev):
414 def start(self, rev):
431 return int(self.index[rev][0] >> 16)
415 return int(self.index[rev][0] >> 16)
432
416
433 def flags(self, rev):
417 def flags(self, rev):
434 return self.index[rev][0] & 0xFFFF
418 return self.index[rev][0] & 0xFFFF
435
419
436 def length(self, rev):
420 def length(self, rev):
437 return self.index[rev][1]
421 return self.index[rev][1]
438
422
439 def rawsize(self, rev):
423 def rawsize(self, rev):
440 """return the length of the uncompressed text for a given revision"""
424 """return the length of the uncompressed text for a given revision"""
441 l = self.index[rev][2]
425 l = self.index[rev][2]
442 if l >= 0:
426 if l >= 0:
443 return l
427 return l
444
428
445 t = self.revision(self.node(rev))
429 t = self.revision(self.node(rev))
446 return len(t)
430 return len(t)
447 size = rawsize
431 size = rawsize
448
432
449 def chainbase(self, rev):
433 def chainbase(self, rev):
450 base = self._chainbasecache.get(rev)
434 base = self._chainbasecache.get(rev)
451 if base is not None:
435 if base is not None:
452 return base
436 return base
453
437
454 index = self.index
438 index = self.index
455 base = index[rev][3]
439 base = index[rev][3]
456 while base != rev:
440 while base != rev:
457 rev = base
441 rev = base
458 base = index[rev][3]
442 base = index[rev][3]
459
443
460 self._chainbasecache[rev] = base
444 self._chainbasecache[rev] = base
461 return base
445 return base
462
446
463 def linkrev(self, rev):
447 def linkrev(self, rev):
464 return self.index[rev][4]
448 return self.index[rev][4]
465
449
466 def parentrevs(self, rev):
450 def parentrevs(self, rev):
467 return self.index[rev][5:7]
451 return self.index[rev][5:7]
468
452
469 def node(self, rev):
453 def node(self, rev):
470 return self.index[rev][7]
454 return self.index[rev][7]
471
455
472 # Derived from index values.
456 # Derived from index values.
473
457
474 def end(self, rev):
458 def end(self, rev):
475 return self.start(rev) + self.length(rev)
459 return self.start(rev) + self.length(rev)
476
460
477 def parents(self, node):
461 def parents(self, node):
478 i = self.index
462 i = self.index
479 d = i[self.rev(node)]
463 d = i[self.rev(node)]
480 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
464 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
481
465
482 def chainlen(self, rev):
466 def chainlen(self, rev):
483 return self._chaininfo(rev)[0]
467 return self._chaininfo(rev)[0]
484
468
485 def _chaininfo(self, rev):
469 def _chaininfo(self, rev):
486 chaininfocache = self._chaininfocache
470 chaininfocache = self._chaininfocache
487 if rev in chaininfocache:
471 if rev in chaininfocache:
488 return chaininfocache[rev]
472 return chaininfocache[rev]
489 index = self.index
473 index = self.index
490 generaldelta = self._generaldelta
474 generaldelta = self._generaldelta
491 iterrev = rev
475 iterrev = rev
492 e = index[iterrev]
476 e = index[iterrev]
493 clen = 0
477 clen = 0
494 compresseddeltalen = 0
478 compresseddeltalen = 0
495 while iterrev != e[3]:
479 while iterrev != e[3]:
496 clen += 1
480 clen += 1
497 compresseddeltalen += e[1]
481 compresseddeltalen += e[1]
498 if generaldelta:
482 if generaldelta:
499 iterrev = e[3]
483 iterrev = e[3]
500 else:
484 else:
501 iterrev -= 1
485 iterrev -= 1
502 if iterrev in chaininfocache:
486 if iterrev in chaininfocache:
503 t = chaininfocache[iterrev]
487 t = chaininfocache[iterrev]
504 clen += t[0]
488 clen += t[0]
505 compresseddeltalen += t[1]
489 compresseddeltalen += t[1]
506 break
490 break
507 e = index[iterrev]
491 e = index[iterrev]
508 else:
492 else:
509 # Add text length of base since decompressing that also takes
493 # Add text length of base since decompressing that also takes
510 # work. For cache hits the length is already included.
494 # work. For cache hits the length is already included.
511 compresseddeltalen += e[1]
495 compresseddeltalen += e[1]
512 r = (clen, compresseddeltalen)
496 r = (clen, compresseddeltalen)
513 chaininfocache[rev] = r
497 chaininfocache[rev] = r
514 return r
498 return r
515
499
516 def _deltachain(self, rev, stoprev=None):
500 def _deltachain(self, rev, stoprev=None):
517 """Obtain the delta chain for a revision.
501 """Obtain the delta chain for a revision.
518
502
519 ``stoprev`` specifies a revision to stop at. If not specified, we
503 ``stoprev`` specifies a revision to stop at. If not specified, we
520 stop at the base of the chain.
504 stop at the base of the chain.
521
505
522 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
506 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
523 revs in ascending order and ``stopped`` is a bool indicating whether
507 revs in ascending order and ``stopped`` is a bool indicating whether
524 ``stoprev`` was hit.
508 ``stoprev`` was hit.
525 """
509 """
526 chain = []
510 chain = []
527
511
528 # Alias to prevent attribute lookup in tight loop.
512 # Alias to prevent attribute lookup in tight loop.
529 index = self.index
513 index = self.index
530 generaldelta = self._generaldelta
514 generaldelta = self._generaldelta
531
515
532 iterrev = rev
516 iterrev = rev
533 e = index[iterrev]
517 e = index[iterrev]
534 while iterrev != e[3] and iterrev != stoprev:
518 while iterrev != e[3] and iterrev != stoprev:
535 chain.append(iterrev)
519 chain.append(iterrev)
536 if generaldelta:
520 if generaldelta:
537 iterrev = e[3]
521 iterrev = e[3]
538 else:
522 else:
539 iterrev -= 1
523 iterrev -= 1
540 e = index[iterrev]
524 e = index[iterrev]
541
525
542 if iterrev == stoprev:
526 if iterrev == stoprev:
543 stopped = True
527 stopped = True
544 else:
528 else:
545 chain.append(iterrev)
529 chain.append(iterrev)
546 stopped = False
530 stopped = False
547
531
548 chain.reverse()
532 chain.reverse()
549 return chain, stopped
533 return chain, stopped
550
534
551 def ancestors(self, revs, stoprev=0, inclusive=False):
535 def ancestors(self, revs, stoprev=0, inclusive=False):
552 """Generate the ancestors of 'revs' in reverse topological order.
536 """Generate the ancestors of 'revs' in reverse topological order.
553 Does not generate revs lower than stoprev.
537 Does not generate revs lower than stoprev.
554
538
555 See the documentation for ancestor.lazyancestors for more details."""
539 See the documentation for ancestor.lazyancestors for more details."""
556
540
557 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
541 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
558 inclusive=inclusive)
542 inclusive=inclusive)
559
543
560 def descendants(self, revs):
544 def descendants(self, revs):
561 """Generate the descendants of 'revs' in revision order.
545 """Generate the descendants of 'revs' in revision order.
562
546
563 Yield a sequence of revision numbers starting with a child of
547 Yield a sequence of revision numbers starting with a child of
564 some rev in revs, i.e., each revision is *not* considered a
548 some rev in revs, i.e., each revision is *not* considered a
565 descendant of itself. Results are ordered by revision number (a
549 descendant of itself. Results are ordered by revision number (a
566 topological sort)."""
550 topological sort)."""
567 first = min(revs)
551 first = min(revs)
568 if first == nullrev:
552 if first == nullrev:
569 for i in self:
553 for i in self:
570 yield i
554 yield i
571 return
555 return
572
556
573 seen = set(revs)
557 seen = set(revs)
574 for i in self.revs(start=first + 1):
558 for i in self.revs(start=first + 1):
575 for x in self.parentrevs(i):
559 for x in self.parentrevs(i):
576 if x != nullrev and x in seen:
560 if x != nullrev and x in seen:
577 seen.add(i)
561 seen.add(i)
578 yield i
562 yield i
579 break
563 break
580
564
581 def findcommonmissing(self, common=None, heads=None):
565 def findcommonmissing(self, common=None, heads=None):
582 """Return a tuple of the ancestors of common and the ancestors of heads
566 """Return a tuple of the ancestors of common and the ancestors of heads
583 that are not ancestors of common. In revset terminology, we return the
567 that are not ancestors of common. In revset terminology, we return the
584 tuple:
568 tuple:
585
569
586 ::common, (::heads) - (::common)
570 ::common, (::heads) - (::common)
587
571
588 The list is sorted by revision number, meaning it is
572 The list is sorted by revision number, meaning it is
589 topologically sorted.
573 topologically sorted.
590
574
591 'heads' and 'common' are both lists of node IDs. If heads is
575 'heads' and 'common' are both lists of node IDs. If heads is
592 not supplied, uses all of the revlog's heads. If common is not
576 not supplied, uses all of the revlog's heads. If common is not
593 supplied, uses nullid."""
577 supplied, uses nullid."""
594 if common is None:
578 if common is None:
595 common = [nullid]
579 common = [nullid]
596 if heads is None:
580 if heads is None:
597 heads = self.heads()
581 heads = self.heads()
598
582
599 common = [self.rev(n) for n in common]
583 common = [self.rev(n) for n in common]
600 heads = [self.rev(n) for n in heads]
584 heads = [self.rev(n) for n in heads]
601
585
602 # we want the ancestors, but inclusive
586 # we want the ancestors, but inclusive
603 class lazyset(object):
587 class lazyset(object):
604 def __init__(self, lazyvalues):
588 def __init__(self, lazyvalues):
605 self.addedvalues = set()
589 self.addedvalues = set()
606 self.lazyvalues = lazyvalues
590 self.lazyvalues = lazyvalues
607
591
608 def __contains__(self, value):
592 def __contains__(self, value):
609 return value in self.addedvalues or value in self.lazyvalues
593 return value in self.addedvalues or value in self.lazyvalues
610
594
611 def __iter__(self):
595 def __iter__(self):
612 added = self.addedvalues
596 added = self.addedvalues
613 for r in added:
597 for r in added:
614 yield r
598 yield r
615 for r in self.lazyvalues:
599 for r in self.lazyvalues:
616 if not r in added:
600 if not r in added:
617 yield r
601 yield r
618
602
619 def add(self, value):
603 def add(self, value):
620 self.addedvalues.add(value)
604 self.addedvalues.add(value)
621
605
622 def update(self, values):
606 def update(self, values):
623 self.addedvalues.update(values)
607 self.addedvalues.update(values)
624
608
625 has = lazyset(self.ancestors(common))
609 has = lazyset(self.ancestors(common))
626 has.add(nullrev)
610 has.add(nullrev)
627 has.update(common)
611 has.update(common)
628
612
629 # take all ancestors from heads that aren't in has
613 # take all ancestors from heads that aren't in has
630 missing = set()
614 missing = set()
631 visit = collections.deque(r for r in heads if r not in has)
615 visit = collections.deque(r for r in heads if r not in has)
632 while visit:
616 while visit:
633 r = visit.popleft()
617 r = visit.popleft()
634 if r in missing:
618 if r in missing:
635 continue
619 continue
636 else:
620 else:
637 missing.add(r)
621 missing.add(r)
638 for p in self.parentrevs(r):
622 for p in self.parentrevs(r):
639 if p not in has:
623 if p not in has:
640 visit.append(p)
624 visit.append(p)
641 missing = list(missing)
625 missing = list(missing)
642 missing.sort()
626 missing.sort()
643 return has, [self.node(miss) for miss in missing]
627 return has, [self.node(miss) for miss in missing]
644
628
645 def incrementalmissingrevs(self, common=None):
629 def incrementalmissingrevs(self, common=None):
646 """Return an object that can be used to incrementally compute the
630 """Return an object that can be used to incrementally compute the
647 revision numbers of the ancestors of arbitrary sets that are not
631 revision numbers of the ancestors of arbitrary sets that are not
648 ancestors of common. This is an ancestor.incrementalmissingancestors
632 ancestors of common. This is an ancestor.incrementalmissingancestors
649 object.
633 object.
650
634
651 'common' is a list of revision numbers. If common is not supplied, uses
635 'common' is a list of revision numbers. If common is not supplied, uses
652 nullrev.
636 nullrev.
653 """
637 """
654 if common is None:
638 if common is None:
655 common = [nullrev]
639 common = [nullrev]
656
640
657 return ancestor.incrementalmissingancestors(self.parentrevs, common)
641 return ancestor.incrementalmissingancestors(self.parentrevs, common)
658
642
659 def findmissingrevs(self, common=None, heads=None):
643 def findmissingrevs(self, common=None, heads=None):
660 """Return the revision numbers of the ancestors of heads that
644 """Return the revision numbers of the ancestors of heads that
661 are not ancestors of common.
645 are not ancestors of common.
662
646
663 More specifically, return a list of revision numbers corresponding to
647 More specifically, return a list of revision numbers corresponding to
664 nodes N such that every N satisfies the following constraints:
648 nodes N such that every N satisfies the following constraints:
665
649
666 1. N is an ancestor of some node in 'heads'
650 1. N is an ancestor of some node in 'heads'
667 2. N is not an ancestor of any node in 'common'
651 2. N is not an ancestor of any node in 'common'
668
652
669 The list is sorted by revision number, meaning it is
653 The list is sorted by revision number, meaning it is
670 topologically sorted.
654 topologically sorted.
671
655
672 'heads' and 'common' are both lists of revision numbers. If heads is
656 'heads' and 'common' are both lists of revision numbers. If heads is
673 not supplied, uses all of the revlog's heads. If common is not
657 not supplied, uses all of the revlog's heads. If common is not
674 supplied, uses nullid."""
658 supplied, uses nullid."""
675 if common is None:
659 if common is None:
676 common = [nullrev]
660 common = [nullrev]
677 if heads is None:
661 if heads is None:
678 heads = self.headrevs()
662 heads = self.headrevs()
679
663
680 inc = self.incrementalmissingrevs(common=common)
664 inc = self.incrementalmissingrevs(common=common)
681 return inc.missingancestors(heads)
665 return inc.missingancestors(heads)
682
666
683 def findmissing(self, common=None, heads=None):
667 def findmissing(self, common=None, heads=None):
684 """Return the ancestors of heads that are not ancestors of common.
668 """Return the ancestors of heads that are not ancestors of common.
685
669
686 More specifically, return a list of nodes N such that every N
670 More specifically, return a list of nodes N such that every N
687 satisfies the following constraints:
671 satisfies the following constraints:
688
672
689 1. N is an ancestor of some node in 'heads'
673 1. N is an ancestor of some node in 'heads'
690 2. N is not an ancestor of any node in 'common'
674 2. N is not an ancestor of any node in 'common'
691
675
692 The list is sorted by revision number, meaning it is
676 The list is sorted by revision number, meaning it is
693 topologically sorted.
677 topologically sorted.
694
678
695 'heads' and 'common' are both lists of node IDs. If heads is
679 'heads' and 'common' are both lists of node IDs. If heads is
696 not supplied, uses all of the revlog's heads. If common is not
680 not supplied, uses all of the revlog's heads. If common is not
697 supplied, uses nullid."""
681 supplied, uses nullid."""
698 if common is None:
682 if common is None:
699 common = [nullid]
683 common = [nullid]
700 if heads is None:
684 if heads is None:
701 heads = self.heads()
685 heads = self.heads()
702
686
703 common = [self.rev(n) for n in common]
687 common = [self.rev(n) for n in common]
704 heads = [self.rev(n) for n in heads]
688 heads = [self.rev(n) for n in heads]
705
689
706 inc = self.incrementalmissingrevs(common=common)
690 inc = self.incrementalmissingrevs(common=common)
707 return [self.node(r) for r in inc.missingancestors(heads)]
691 return [self.node(r) for r in inc.missingancestors(heads)]
708
692
709 def nodesbetween(self, roots=None, heads=None):
693 def nodesbetween(self, roots=None, heads=None):
710 """Return a topological path from 'roots' to 'heads'.
694 """Return a topological path from 'roots' to 'heads'.
711
695
712 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
696 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
713 topologically sorted list of all nodes N that satisfy both of
697 topologically sorted list of all nodes N that satisfy both of
714 these constraints:
698 these constraints:
715
699
716 1. N is a descendant of some node in 'roots'
700 1. N is a descendant of some node in 'roots'
717 2. N is an ancestor of some node in 'heads'
701 2. N is an ancestor of some node in 'heads'
718
702
719 Every node is considered to be both a descendant and an ancestor
703 Every node is considered to be both a descendant and an ancestor
720 of itself, so every reachable node in 'roots' and 'heads' will be
704 of itself, so every reachable node in 'roots' and 'heads' will be
721 included in 'nodes'.
705 included in 'nodes'.
722
706
723 'outroots' is the list of reachable nodes in 'roots', i.e., the
707 'outroots' is the list of reachable nodes in 'roots', i.e., the
724 subset of 'roots' that is returned in 'nodes'. Likewise,
708 subset of 'roots' that is returned in 'nodes'. Likewise,
725 'outheads' is the subset of 'heads' that is also in 'nodes'.
709 'outheads' is the subset of 'heads' that is also in 'nodes'.
726
710
727 'roots' and 'heads' are both lists of node IDs. If 'roots' is
711 'roots' and 'heads' are both lists of node IDs. If 'roots' is
728 unspecified, uses nullid as the only root. If 'heads' is
712 unspecified, uses nullid as the only root. If 'heads' is
729 unspecified, uses list of all of the revlog's heads."""
713 unspecified, uses list of all of the revlog's heads."""
730 nonodes = ([], [], [])
714 nonodes = ([], [], [])
731 if roots is not None:
715 if roots is not None:
732 roots = list(roots)
716 roots = list(roots)
733 if not roots:
717 if not roots:
734 return nonodes
718 return nonodes
735 lowestrev = min([self.rev(n) for n in roots])
719 lowestrev = min([self.rev(n) for n in roots])
736 else:
720 else:
737 roots = [nullid] # Everybody's a descendant of nullid
721 roots = [nullid] # Everybody's a descendant of nullid
738 lowestrev = nullrev
722 lowestrev = nullrev
739 if (lowestrev == nullrev) and (heads is None):
723 if (lowestrev == nullrev) and (heads is None):
740 # We want _all_ the nodes!
724 # We want _all_ the nodes!
741 return ([self.node(r) for r in self], [nullid], list(self.heads()))
725 return ([self.node(r) for r in self], [nullid], list(self.heads()))
742 if heads is None:
726 if heads is None:
743 # All nodes are ancestors, so the latest ancestor is the last
727 # All nodes are ancestors, so the latest ancestor is the last
744 # node.
728 # node.
745 highestrev = len(self) - 1
729 highestrev = len(self) - 1
746 # Set ancestors to None to signal that every node is an ancestor.
730 # Set ancestors to None to signal that every node is an ancestor.
747 ancestors = None
731 ancestors = None
748 # Set heads to an empty dictionary for later discovery of heads
732 # Set heads to an empty dictionary for later discovery of heads
749 heads = {}
733 heads = {}
750 else:
734 else:
751 heads = list(heads)
735 heads = list(heads)
752 if not heads:
736 if not heads:
753 return nonodes
737 return nonodes
754 ancestors = set()
738 ancestors = set()
755 # Turn heads into a dictionary so we can remove 'fake' heads.
739 # Turn heads into a dictionary so we can remove 'fake' heads.
756 # Also, later we will be using it to filter out the heads we can't
740 # Also, later we will be using it to filter out the heads we can't
757 # find from roots.
741 # find from roots.
758 heads = dict.fromkeys(heads, False)
742 heads = dict.fromkeys(heads, False)
759 # Start at the top and keep marking parents until we're done.
743 # Start at the top and keep marking parents until we're done.
760 nodestotag = set(heads)
744 nodestotag = set(heads)
761 # Remember where the top was so we can use it as a limit later.
745 # Remember where the top was so we can use it as a limit later.
762 highestrev = max([self.rev(n) for n in nodestotag])
746 highestrev = max([self.rev(n) for n in nodestotag])
763 while nodestotag:
747 while nodestotag:
764 # grab a node to tag
748 # grab a node to tag
765 n = nodestotag.pop()
749 n = nodestotag.pop()
766 # Never tag nullid
750 # Never tag nullid
767 if n == nullid:
751 if n == nullid:
768 continue
752 continue
769 # A node's revision number represents its place in a
753 # A node's revision number represents its place in a
770 # topologically sorted list of nodes.
754 # topologically sorted list of nodes.
771 r = self.rev(n)
755 r = self.rev(n)
772 if r >= lowestrev:
756 if r >= lowestrev:
773 if n not in ancestors:
757 if n not in ancestors:
774 # If we are possibly a descendant of one of the roots
758 # If we are possibly a descendant of one of the roots
775 # and we haven't already been marked as an ancestor
759 # and we haven't already been marked as an ancestor
776 ancestors.add(n) # Mark as ancestor
760 ancestors.add(n) # Mark as ancestor
777 # Add non-nullid parents to list of nodes to tag.
761 # Add non-nullid parents to list of nodes to tag.
778 nodestotag.update([p for p in self.parents(n) if
762 nodestotag.update([p for p in self.parents(n) if
779 p != nullid])
763 p != nullid])
780 elif n in heads: # We've seen it before, is it a fake head?
764 elif n in heads: # We've seen it before, is it a fake head?
781 # So it is, real heads should not be the ancestors of
765 # So it is, real heads should not be the ancestors of
782 # any other heads.
766 # any other heads.
783 heads.pop(n)
767 heads.pop(n)
784 if not ancestors:
768 if not ancestors:
785 return nonodes
769 return nonodes
786 # Now that we have our set of ancestors, we want to remove any
770 # Now that we have our set of ancestors, we want to remove any
787 # roots that are not ancestors.
771 # roots that are not ancestors.
788
772
789 # If one of the roots was nullid, everything is included anyway.
773 # If one of the roots was nullid, everything is included anyway.
790 if lowestrev > nullrev:
774 if lowestrev > nullrev:
791 # But, since we weren't, let's recompute the lowest rev to not
775 # But, since we weren't, let's recompute the lowest rev to not
792 # include roots that aren't ancestors.
776 # include roots that aren't ancestors.
793
777
794 # Filter out roots that aren't ancestors of heads
778 # Filter out roots that aren't ancestors of heads
795 roots = [root for root in roots if root in ancestors]
779 roots = [root for root in roots if root in ancestors]
796 # Recompute the lowest revision
780 # Recompute the lowest revision
797 if roots:
781 if roots:
798 lowestrev = min([self.rev(root) for root in roots])
782 lowestrev = min([self.rev(root) for root in roots])
799 else:
783 else:
800 # No more roots? Return empty list
784 # No more roots? Return empty list
801 return nonodes
785 return nonodes
802 else:
786 else:
803 # We are descending from nullid, and don't need to care about
787 # We are descending from nullid, and don't need to care about
804 # any other roots.
788 # any other roots.
805 lowestrev = nullrev
789 lowestrev = nullrev
806 roots = [nullid]
790 roots = [nullid]
807 # Transform our roots list into a set.
791 # Transform our roots list into a set.
808 descendants = set(roots)
792 descendants = set(roots)
809 # Also, keep the original roots so we can filter out roots that aren't
793 # Also, keep the original roots so we can filter out roots that aren't
810 # 'real' roots (i.e. are descended from other roots).
794 # 'real' roots (i.e. are descended from other roots).
811 roots = descendants.copy()
795 roots = descendants.copy()
812 # Our topologically sorted list of output nodes.
796 # Our topologically sorted list of output nodes.
813 orderedout = []
797 orderedout = []
814 # Don't start at nullid since we don't want nullid in our output list,
798 # Don't start at nullid since we don't want nullid in our output list,
815 # and if nullid shows up in descendants, empty parents will look like
799 # and if nullid shows up in descendants, empty parents will look like
816 # they're descendants.
800 # they're descendants.
817 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
801 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
818 n = self.node(r)
802 n = self.node(r)
819 isdescendant = False
803 isdescendant = False
820 if lowestrev == nullrev: # Everybody is a descendant of nullid
804 if lowestrev == nullrev: # Everybody is a descendant of nullid
821 isdescendant = True
805 isdescendant = True
822 elif n in descendants:
806 elif n in descendants:
823 # n is already a descendant
807 # n is already a descendant
824 isdescendant = True
808 isdescendant = True
825 # This check only needs to be done here because all the roots
809 # This check only needs to be done here because all the roots
826 # will start being marked is descendants before the loop.
810 # will start being marked is descendants before the loop.
827 if n in roots:
811 if n in roots:
828 # If n was a root, check if it's a 'real' root.
812 # If n was a root, check if it's a 'real' root.
829 p = tuple(self.parents(n))
813 p = tuple(self.parents(n))
830 # If any of its parents are descendants, it's not a root.
814 # If any of its parents are descendants, it's not a root.
831 if (p[0] in descendants) or (p[1] in descendants):
815 if (p[0] in descendants) or (p[1] in descendants):
832 roots.remove(n)
816 roots.remove(n)
833 else:
817 else:
834 p = tuple(self.parents(n))
818 p = tuple(self.parents(n))
835 # A node is a descendant if either of its parents are
819 # A node is a descendant if either of its parents are
836 # descendants. (We seeded the dependents list with the roots
820 # descendants. (We seeded the dependents list with the roots
837 # up there, remember?)
821 # up there, remember?)
838 if (p[0] in descendants) or (p[1] in descendants):
822 if (p[0] in descendants) or (p[1] in descendants):
839 descendants.add(n)
823 descendants.add(n)
840 isdescendant = True
824 isdescendant = True
841 if isdescendant and ((ancestors is None) or (n in ancestors)):
825 if isdescendant and ((ancestors is None) or (n in ancestors)):
842 # Only include nodes that are both descendants and ancestors.
826 # Only include nodes that are both descendants and ancestors.
843 orderedout.append(n)
827 orderedout.append(n)
844 if (ancestors is not None) and (n in heads):
828 if (ancestors is not None) and (n in heads):
845 # We're trying to figure out which heads are reachable
829 # We're trying to figure out which heads are reachable
846 # from roots.
830 # from roots.
847 # Mark this head as having been reached
831 # Mark this head as having been reached
848 heads[n] = True
832 heads[n] = True
849 elif ancestors is None:
833 elif ancestors is None:
850 # Otherwise, we're trying to discover the heads.
834 # Otherwise, we're trying to discover the heads.
851 # Assume this is a head because if it isn't, the next step
835 # Assume this is a head because if it isn't, the next step
852 # will eventually remove it.
836 # will eventually remove it.
853 heads[n] = True
837 heads[n] = True
854 # But, obviously its parents aren't.
838 # But, obviously its parents aren't.
855 for p in self.parents(n):
839 for p in self.parents(n):
856 heads.pop(p, None)
840 heads.pop(p, None)
857 heads = [head for head, flag in heads.iteritems() if flag]
841 heads = [head for head, flag in heads.iteritems() if flag]
858 roots = list(roots)
842 roots = list(roots)
859 assert orderedout
843 assert orderedout
860 assert roots
844 assert roots
861 assert heads
845 assert heads
862 return (orderedout, roots, heads)
846 return (orderedout, roots, heads)
863
847
864 def headrevs(self):
848 def headrevs(self):
865 try:
849 try:
866 return self.index.headrevs()
850 return self.index.headrevs()
867 except AttributeError:
851 except AttributeError:
868 return self._headrevs()
852 return self._headrevs()
869
853
870 def computephases(self, roots):
854 def computephases(self, roots):
871 return self.index.computephasesmapsets(roots)
855 return self.index.computephasesmapsets(roots)
872
856
873 def _headrevs(self):
857 def _headrevs(self):
874 count = len(self)
858 count = len(self)
875 if not count:
859 if not count:
876 return [nullrev]
860 return [nullrev]
877 # we won't iter over filtered rev so nobody is a head at start
861 # we won't iter over filtered rev so nobody is a head at start
878 ishead = [0] * (count + 1)
862 ishead = [0] * (count + 1)
879 index = self.index
863 index = self.index
880 for r in self:
864 for r in self:
881 ishead[r] = 1 # I may be an head
865 ishead[r] = 1 # I may be an head
882 e = index[r]
866 e = index[r]
883 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
867 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
884 return [r for r, val in enumerate(ishead) if val]
868 return [r for r, val in enumerate(ishead) if val]
885
869
886 def heads(self, start=None, stop=None):
870 def heads(self, start=None, stop=None):
887 """return the list of all nodes that have no children
871 """return the list of all nodes that have no children
888
872
889 if start is specified, only heads that are descendants of
873 if start is specified, only heads that are descendants of
890 start will be returned
874 start will be returned
891 if stop is specified, it will consider all the revs from stop
875 if stop is specified, it will consider all the revs from stop
892 as if they had no children
876 as if they had no children
893 """
877 """
894 if start is None and stop is None:
878 if start is None and stop is None:
895 if not len(self):
879 if not len(self):
896 return [nullid]
880 return [nullid]
897 return [self.node(r) for r in self.headrevs()]
881 return [self.node(r) for r in self.headrevs()]
898
882
899 if start is None:
883 if start is None:
900 start = nullid
884 start = nullid
901 if stop is None:
885 if stop is None:
902 stop = []
886 stop = []
903 stoprevs = set([self.rev(n) for n in stop])
887 stoprevs = set([self.rev(n) for n in stop])
904 startrev = self.rev(start)
888 startrev = self.rev(start)
905 reachable = set((startrev,))
889 reachable = set((startrev,))
906 heads = set((startrev,))
890 heads = set((startrev,))
907
891
908 parentrevs = self.parentrevs
892 parentrevs = self.parentrevs
909 for r in self.revs(start=startrev + 1):
893 for r in self.revs(start=startrev + 1):
910 for p in parentrevs(r):
894 for p in parentrevs(r):
911 if p in reachable:
895 if p in reachable:
912 if r not in stoprevs:
896 if r not in stoprevs:
913 reachable.add(r)
897 reachable.add(r)
914 heads.add(r)
898 heads.add(r)
915 if p in heads and p not in stoprevs:
899 if p in heads and p not in stoprevs:
916 heads.remove(p)
900 heads.remove(p)
917
901
918 return [self.node(r) for r in heads]
902 return [self.node(r) for r in heads]
919
903
920 def children(self, node):
904 def children(self, node):
921 """find the children of a given node"""
905 """find the children of a given node"""
922 c = []
906 c = []
923 p = self.rev(node)
907 p = self.rev(node)
924 for r in self.revs(start=p + 1):
908 for r in self.revs(start=p + 1):
925 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
909 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
926 if prevs:
910 if prevs:
927 for pr in prevs:
911 for pr in prevs:
928 if pr == p:
912 if pr == p:
929 c.append(self.node(r))
913 c.append(self.node(r))
930 elif p == nullrev:
914 elif p == nullrev:
931 c.append(self.node(r))
915 c.append(self.node(r))
932 return c
916 return c
933
917
934 def descendant(self, start, end):
918 def descendant(self, start, end):
935 if start == nullrev:
919 if start == nullrev:
936 return True
920 return True
937 for i in self.descendants([start]):
921 for i in self.descendants([start]):
938 if i == end:
922 if i == end:
939 return True
923 return True
940 elif i > end:
924 elif i > end:
941 break
925 break
942 return False
926 return False
943
927
944 def commonancestorsheads(self, a, b):
928 def commonancestorsheads(self, a, b):
945 """calculate all the heads of the common ancestors of nodes a and b"""
929 """calculate all the heads of the common ancestors of nodes a and b"""
946 a, b = self.rev(a), self.rev(b)
930 a, b = self.rev(a), self.rev(b)
947 try:
931 try:
948 ancs = self.index.commonancestorsheads(a, b)
932 ancs = self.index.commonancestorsheads(a, b)
949 except (AttributeError, OverflowError): # C implementation failed
933 except (AttributeError, OverflowError): # C implementation failed
950 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
934 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
951 return map(self.node, ancs)
935 return map(self.node, ancs)
952
936
953 def isancestor(self, a, b):
937 def isancestor(self, a, b):
954 """return True if node a is an ancestor of node b
938 """return True if node a is an ancestor of node b
955
939
956 The implementation of this is trivial but the use of
940 The implementation of this is trivial but the use of
957 commonancestorsheads is not."""
941 commonancestorsheads is not."""
958 return a in self.commonancestorsheads(a, b)
942 return a in self.commonancestorsheads(a, b)
959
943
960 def ancestor(self, a, b):
944 def ancestor(self, a, b):
961 """calculate the "best" common ancestor of nodes a and b"""
945 """calculate the "best" common ancestor of nodes a and b"""
962
946
963 a, b = self.rev(a), self.rev(b)
947 a, b = self.rev(a), self.rev(b)
964 try:
948 try:
965 ancs = self.index.ancestors(a, b)
949 ancs = self.index.ancestors(a, b)
966 except (AttributeError, OverflowError):
950 except (AttributeError, OverflowError):
967 ancs = ancestor.ancestors(self.parentrevs, a, b)
951 ancs = ancestor.ancestors(self.parentrevs, a, b)
968 if ancs:
952 if ancs:
969 # choose a consistent winner when there's a tie
953 # choose a consistent winner when there's a tie
970 return min(map(self.node, ancs))
954 return min(map(self.node, ancs))
971 return nullid
955 return nullid
972
956
973 def _match(self, id):
957 def _match(self, id):
974 if isinstance(id, int):
958 if isinstance(id, int):
975 # rev
959 # rev
976 return self.node(id)
960 return self.node(id)
977 if len(id) == 20:
961 if len(id) == 20:
978 # possibly a binary node
962 # possibly a binary node
979 # odds of a binary node being all hex in ASCII are 1 in 10**25
963 # odds of a binary node being all hex in ASCII are 1 in 10**25
980 try:
964 try:
981 node = id
965 node = id
982 self.rev(node) # quick search the index
966 self.rev(node) # quick search the index
983 return node
967 return node
984 except LookupError:
968 except LookupError:
985 pass # may be partial hex id
969 pass # may be partial hex id
986 try:
970 try:
987 # str(rev)
971 # str(rev)
988 rev = int(id)
972 rev = int(id)
989 if str(rev) != id:
973 if str(rev) != id:
990 raise ValueError
974 raise ValueError
991 if rev < 0:
975 if rev < 0:
992 rev = len(self) + rev
976 rev = len(self) + rev
993 if rev < 0 or rev >= len(self):
977 if rev < 0 or rev >= len(self):
994 raise ValueError
978 raise ValueError
995 return self.node(rev)
979 return self.node(rev)
996 except (ValueError, OverflowError):
980 except (ValueError, OverflowError):
997 pass
981 pass
998 if len(id) == 40:
982 if len(id) == 40:
999 try:
983 try:
1000 # a full hex nodeid?
984 # a full hex nodeid?
1001 node = bin(id)
985 node = bin(id)
1002 self.rev(node)
986 self.rev(node)
1003 return node
987 return node
1004 except (TypeError, LookupError):
988 except (TypeError, LookupError):
1005 pass
989 pass
1006
990
1007 def _partialmatch(self, id):
991 def _partialmatch(self, id):
1008 try:
992 try:
1009 partial = self.index.partialmatch(id)
993 partial = self.index.partialmatch(id)
1010 if partial and self.hasnode(partial):
994 if partial and self.hasnode(partial):
1011 return partial
995 return partial
1012 return None
996 return None
1013 except RevlogError:
997 except RevlogError:
1014 # parsers.c radix tree lookup gave multiple matches
998 # parsers.c radix tree lookup gave multiple matches
1015 # fast path: for unfiltered changelog, radix tree is accurate
999 # fast path: for unfiltered changelog, radix tree is accurate
1016 if not getattr(self, 'filteredrevs', None):
1000 if not getattr(self, 'filteredrevs', None):
1017 raise LookupError(id, self.indexfile,
1001 raise LookupError(id, self.indexfile,
1018 _('ambiguous identifier'))
1002 _('ambiguous identifier'))
1019 # fall through to slow path that filters hidden revisions
1003 # fall through to slow path that filters hidden revisions
1020 except (AttributeError, ValueError):
1004 except (AttributeError, ValueError):
1021 # we are pure python, or key was too short to search radix tree
1005 # we are pure python, or key was too short to search radix tree
1022 pass
1006 pass
1023
1007
1024 if id in self._pcache:
1008 if id in self._pcache:
1025 return self._pcache[id]
1009 return self._pcache[id]
1026
1010
1027 if len(id) < 40:
1011 if len(id) < 40:
1028 try:
1012 try:
1029 # hex(node)[:...]
1013 # hex(node)[:...]
1030 l = len(id) // 2 # grab an even number of digits
1014 l = len(id) // 2 # grab an even number of digits
1031 prefix = bin(id[:l * 2])
1015 prefix = bin(id[:l * 2])
1032 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1016 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1033 nl = [n for n in nl if hex(n).startswith(id) and
1017 nl = [n for n in nl if hex(n).startswith(id) and
1034 self.hasnode(n)]
1018 self.hasnode(n)]
1035 if len(nl) > 0:
1019 if len(nl) > 0:
1036 if len(nl) == 1:
1020 if len(nl) == 1:
1037 self._pcache[id] = nl[0]
1021 self._pcache[id] = nl[0]
1038 return nl[0]
1022 return nl[0]
1039 raise LookupError(id, self.indexfile,
1023 raise LookupError(id, self.indexfile,
1040 _('ambiguous identifier'))
1024 _('ambiguous identifier'))
1041 return None
1025 return None
1042 except TypeError:
1026 except TypeError:
1043 pass
1027 pass
1044
1028
1045 def lookup(self, id):
1029 def lookup(self, id):
1046 """locate a node based on:
1030 """locate a node based on:
1047 - revision number or str(revision number)
1031 - revision number or str(revision number)
1048 - nodeid or subset of hex nodeid
1032 - nodeid or subset of hex nodeid
1049 """
1033 """
1050 n = self._match(id)
1034 n = self._match(id)
1051 if n is not None:
1035 if n is not None:
1052 return n
1036 return n
1053 n = self._partialmatch(id)
1037 n = self._partialmatch(id)
1054 if n:
1038 if n:
1055 return n
1039 return n
1056
1040
1057 raise LookupError(id, self.indexfile, _('no match found'))
1041 raise LookupError(id, self.indexfile, _('no match found'))
1058
1042
1059 def cmp(self, node, text):
1043 def cmp(self, node, text):
1060 """compare text with a given file revision
1044 """compare text with a given file revision
1061
1045
1062 returns True if text is different than what is stored.
1046 returns True if text is different than what is stored.
1063 """
1047 """
1064 p1, p2 = self.parents(node)
1048 p1, p2 = self.parents(node)
1065 return hash(text, p1, p2) != node
1049 return hash(text, p1, p2) != node
1066
1050
1067 def _addchunk(self, offset, data):
1051 def _addchunk(self, offset, data):
1068 """Add a segment to the revlog cache.
1052 """Add a segment to the revlog cache.
1069
1053
1070 Accepts an absolute offset and the data that is at that location.
1054 Accepts an absolute offset and the data that is at that location.
1071 """
1055 """
1072 o, d = self._chunkcache
1056 o, d = self._chunkcache
1073 # try to add to existing cache
1057 # try to add to existing cache
1074 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1058 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1075 self._chunkcache = o, d + data
1059 self._chunkcache = o, d + data
1076 else:
1060 else:
1077 self._chunkcache = offset, data
1061 self._chunkcache = offset, data
1078
1062
1079 def _loadchunk(self, offset, length, df=None):
1063 def _loadchunk(self, offset, length, df=None):
1080 """Load a segment of raw data from the revlog.
1064 """Load a segment of raw data from the revlog.
1081
1065
1082 Accepts an absolute offset, length to read, and an optional existing
1066 Accepts an absolute offset, length to read, and an optional existing
1083 file handle to read from.
1067 file handle to read from.
1084
1068
1085 If an existing file handle is passed, it will be seeked and the
1069 If an existing file handle is passed, it will be seeked and the
1086 original seek position will NOT be restored.
1070 original seek position will NOT be restored.
1087
1071
1088 Returns a str or buffer of raw byte data.
1072 Returns a str or buffer of raw byte data.
1089 """
1073 """
1090 if df is not None:
1074 if df is not None:
1091 closehandle = False
1075 closehandle = False
1092 else:
1076 else:
1093 if self._inline:
1077 if self._inline:
1094 df = self.opener(self.indexfile)
1078 df = self.opener(self.indexfile)
1095 else:
1079 else:
1096 df = self.opener(self.datafile)
1080 df = self.opener(self.datafile)
1097 closehandle = True
1081 closehandle = True
1098
1082
1099 # Cache data both forward and backward around the requested
1083 # Cache data both forward and backward around the requested
1100 # data, in a fixed size window. This helps speed up operations
1084 # data, in a fixed size window. This helps speed up operations
1101 # involving reading the revlog backwards.
1085 # involving reading the revlog backwards.
1102 cachesize = self._chunkcachesize
1086 cachesize = self._chunkcachesize
1103 realoffset = offset & ~(cachesize - 1)
1087 realoffset = offset & ~(cachesize - 1)
1104 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1088 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1105 - realoffset)
1089 - realoffset)
1106 df.seek(realoffset)
1090 df.seek(realoffset)
1107 d = df.read(reallength)
1091 d = df.read(reallength)
1108 if closehandle:
1092 if closehandle:
1109 df.close()
1093 df.close()
1110 self._addchunk(realoffset, d)
1094 self._addchunk(realoffset, d)
1111 if offset != realoffset or reallength != length:
1095 if offset != realoffset or reallength != length:
1112 return util.buffer(d, offset - realoffset, length)
1096 return util.buffer(d, offset - realoffset, length)
1113 return d
1097 return d
1114
1098
1115 def _getchunk(self, offset, length, df=None):
1099 def _getchunk(self, offset, length, df=None):
1116 """Obtain a segment of raw data from the revlog.
1100 """Obtain a segment of raw data from the revlog.
1117
1101
1118 Accepts an absolute offset, length of bytes to obtain, and an
1102 Accepts an absolute offset, length of bytes to obtain, and an
1119 optional file handle to the already-opened revlog. If the file
1103 optional file handle to the already-opened revlog. If the file
1120 handle is used, it's original seek position will not be preserved.
1104 handle is used, it's original seek position will not be preserved.
1121
1105
1122 Requests for data may be returned from a cache.
1106 Requests for data may be returned from a cache.
1123
1107
1124 Returns a str or a buffer instance of raw byte data.
1108 Returns a str or a buffer instance of raw byte data.
1125 """
1109 """
1126 o, d = self._chunkcache
1110 o, d = self._chunkcache
1127 l = len(d)
1111 l = len(d)
1128
1112
1129 # is it in the cache?
1113 # is it in the cache?
1130 cachestart = offset - o
1114 cachestart = offset - o
1131 cacheend = cachestart + length
1115 cacheend = cachestart + length
1132 if cachestart >= 0 and cacheend <= l:
1116 if cachestart >= 0 and cacheend <= l:
1133 if cachestart == 0 and cacheend == l:
1117 if cachestart == 0 and cacheend == l:
1134 return d # avoid a copy
1118 return d # avoid a copy
1135 return util.buffer(d, cachestart, cacheend - cachestart)
1119 return util.buffer(d, cachestart, cacheend - cachestart)
1136
1120
1137 return self._loadchunk(offset, length, df=df)
1121 return self._loadchunk(offset, length, df=df)
1138
1122
1139 def _chunkraw(self, startrev, endrev, df=None):
1123 def _chunkraw(self, startrev, endrev, df=None):
1140 """Obtain a segment of raw data corresponding to a range of revisions.
1124 """Obtain a segment of raw data corresponding to a range of revisions.
1141
1125
1142 Accepts the start and end revisions and an optional already-open
1126 Accepts the start and end revisions and an optional already-open
1143 file handle to be used for reading. If the file handle is read, its
1127 file handle to be used for reading. If the file handle is read, its
1144 seek position will not be preserved.
1128 seek position will not be preserved.
1145
1129
1146 Requests for data may be satisfied by a cache.
1130 Requests for data may be satisfied by a cache.
1147
1131
1148 Returns a 2-tuple of (offset, data) for the requested range of
1132 Returns a 2-tuple of (offset, data) for the requested range of
1149 revisions. Offset is the integer offset from the beginning of the
1133 revisions. Offset is the integer offset from the beginning of the
1150 revlog and data is a str or buffer of the raw byte data.
1134 revlog and data is a str or buffer of the raw byte data.
1151
1135
1152 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1136 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1153 to determine where each revision's data begins and ends.
1137 to determine where each revision's data begins and ends.
1154 """
1138 """
1155 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1139 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1156 # (functions are expensive).
1140 # (functions are expensive).
1157 index = self.index
1141 index = self.index
1158 istart = index[startrev]
1142 istart = index[startrev]
1159 start = int(istart[0] >> 16)
1143 start = int(istart[0] >> 16)
1160 if startrev == endrev:
1144 if startrev == endrev:
1161 end = start + istart[1]
1145 end = start + istart[1]
1162 else:
1146 else:
1163 iend = index[endrev]
1147 iend = index[endrev]
1164 end = int(iend[0] >> 16) + iend[1]
1148 end = int(iend[0] >> 16) + iend[1]
1165
1149
1166 if self._inline:
1150 if self._inline:
1167 start += (startrev + 1) * self._io.size
1151 start += (startrev + 1) * self._io.size
1168 end += (endrev + 1) * self._io.size
1152 end += (endrev + 1) * self._io.size
1169 length = end - start
1153 length = end - start
1170
1154
1171 return start, self._getchunk(start, length, df=df)
1155 return start, self._getchunk(start, length, df=df)
1172
1156
1173 def _chunk(self, rev, df=None):
1157 def _chunk(self, rev, df=None):
1174 """Obtain a single decompressed chunk for a revision.
1158 """Obtain a single decompressed chunk for a revision.
1175
1159
1176 Accepts an integer revision and an optional already-open file handle
1160 Accepts an integer revision and an optional already-open file handle
1177 to be used for reading. If used, the seek position of the file will not
1161 to be used for reading. If used, the seek position of the file will not
1178 be preserved.
1162 be preserved.
1179
1163
1180 Returns a str holding uncompressed data for the requested revision.
1164 Returns a str holding uncompressed data for the requested revision.
1181 """
1165 """
1182 return decompress(self._chunkraw(rev, rev, df=df)[1])
1166 return self.decompress(self._chunkraw(rev, rev, df=df)[1])
1183
1167
1184 def _chunks(self, revs, df=None):
1168 def _chunks(self, revs, df=None):
1185 """Obtain decompressed chunks for the specified revisions.
1169 """Obtain decompressed chunks for the specified revisions.
1186
1170
1187 Accepts an iterable of numeric revisions that are assumed to be in
1171 Accepts an iterable of numeric revisions that are assumed to be in
1188 ascending order. Also accepts an optional already-open file handle
1172 ascending order. Also accepts an optional already-open file handle
1189 to be used for reading. If used, the seek position of the file will
1173 to be used for reading. If used, the seek position of the file will
1190 not be preserved.
1174 not be preserved.
1191
1175
1192 This function is similar to calling ``self._chunk()`` multiple times,
1176 This function is similar to calling ``self._chunk()`` multiple times,
1193 but is faster.
1177 but is faster.
1194
1178
1195 Returns a list with decompressed data for each requested revision.
1179 Returns a list with decompressed data for each requested revision.
1196 """
1180 """
1197 if not revs:
1181 if not revs:
1198 return []
1182 return []
1199 start = self.start
1183 start = self.start
1200 length = self.length
1184 length = self.length
1201 inline = self._inline
1185 inline = self._inline
1202 iosize = self._io.size
1186 iosize = self._io.size
1203 buffer = util.buffer
1187 buffer = util.buffer
1204
1188
1205 l = []
1189 l = []
1206 ladd = l.append
1190 ladd = l.append
1207
1191
1208 try:
1192 try:
1209 offset, data = self._chunkraw(revs[0], revs[-1], df=df)
1193 offset, data = self._chunkraw(revs[0], revs[-1], df=df)
1210 except OverflowError:
1194 except OverflowError:
1211 # issue4215 - we can't cache a run of chunks greater than
1195 # issue4215 - we can't cache a run of chunks greater than
1212 # 2G on Windows
1196 # 2G on Windows
1213 return [self._chunk(rev, df=df) for rev in revs]
1197 return [self._chunk(rev, df=df) for rev in revs]
1214
1198
1199 decomp = self.decompress
1215 for rev in revs:
1200 for rev in revs:
1216 chunkstart = start(rev)
1201 chunkstart = start(rev)
1217 if inline:
1202 if inline:
1218 chunkstart += (rev + 1) * iosize
1203 chunkstart += (rev + 1) * iosize
1219 chunklength = length(rev)
1204 chunklength = length(rev)
1220 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1205 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1221
1206
1222 return l
1207 return l
1223
1208
1224 def _chunkclear(self):
1209 def _chunkclear(self):
1225 """Clear the raw chunk cache."""
1210 """Clear the raw chunk cache."""
1226 self._chunkcache = (0, '')
1211 self._chunkcache = (0, '')
1227
1212
1228 def deltaparent(self, rev):
1213 def deltaparent(self, rev):
1229 """return deltaparent of the given revision"""
1214 """return deltaparent of the given revision"""
1230 base = self.index[rev][3]
1215 base = self.index[rev][3]
1231 if base == rev:
1216 if base == rev:
1232 return nullrev
1217 return nullrev
1233 elif self._generaldelta:
1218 elif self._generaldelta:
1234 return base
1219 return base
1235 else:
1220 else:
1236 return rev - 1
1221 return rev - 1
1237
1222
1238 def revdiff(self, rev1, rev2):
1223 def revdiff(self, rev1, rev2):
1239 """return or calculate a delta between two revisions"""
1224 """return or calculate a delta between two revisions"""
1240 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1225 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1241 return str(self._chunk(rev2))
1226 return str(self._chunk(rev2))
1242
1227
1243 return mdiff.textdiff(self.revision(rev1),
1228 return mdiff.textdiff(self.revision(rev1),
1244 self.revision(rev2))
1229 self.revision(rev2))
1245
1230
1246 def revision(self, nodeorrev, _df=None, raw=False):
1231 def revision(self, nodeorrev, _df=None, raw=False):
1247 """return an uncompressed revision of a given node or revision
1232 """return an uncompressed revision of a given node or revision
1248 number.
1233 number.
1249
1234
1250 _df - an existing file handle to read from. (internal-only)
1235 _df - an existing file handle to read from. (internal-only)
1251 raw - an optional argument specifying if the revision data is to be
1236 raw - an optional argument specifying if the revision data is to be
1252 treated as raw data when applying flag transforms. 'raw' should be set
1237 treated as raw data when applying flag transforms. 'raw' should be set
1253 to True when generating changegroups or in debug commands.
1238 to True when generating changegroups or in debug commands.
1254 """
1239 """
1255 if isinstance(nodeorrev, int):
1240 if isinstance(nodeorrev, int):
1256 rev = nodeorrev
1241 rev = nodeorrev
1257 node = self.node(rev)
1242 node = self.node(rev)
1258 else:
1243 else:
1259 node = nodeorrev
1244 node = nodeorrev
1260 rev = None
1245 rev = None
1261
1246
1262 cachedrev = None
1247 cachedrev = None
1263 if node == nullid:
1248 if node == nullid:
1264 return ""
1249 return ""
1265 if self._cache:
1250 if self._cache:
1266 if self._cache[0] == node:
1251 if self._cache[0] == node:
1267 return self._cache[2]
1252 return self._cache[2]
1268 cachedrev = self._cache[1]
1253 cachedrev = self._cache[1]
1269
1254
1270 # look up what we need to read
1255 # look up what we need to read
1271 text = None
1256 text = None
1272 if rev is None:
1257 if rev is None:
1273 rev = self.rev(node)
1258 rev = self.rev(node)
1274
1259
1275 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1260 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1276 if stopped:
1261 if stopped:
1277 text = self._cache[2]
1262 text = self._cache[2]
1278
1263
1279 # drop cache to save memory
1264 # drop cache to save memory
1280 self._cache = None
1265 self._cache = None
1281
1266
1282 bins = self._chunks(chain, df=_df)
1267 bins = self._chunks(chain, df=_df)
1283 if text is None:
1268 if text is None:
1284 text = str(bins[0])
1269 text = str(bins[0])
1285 bins = bins[1:]
1270 bins = bins[1:]
1286
1271
1287 text = mdiff.patches(text, bins)
1272 text = mdiff.patches(text, bins)
1288
1273
1289 text, validatehash = self._processflags(text, self.flags(rev), 'read',
1274 text, validatehash = self._processflags(text, self.flags(rev), 'read',
1290 raw=raw)
1275 raw=raw)
1291 if validatehash:
1276 if validatehash:
1292 self.checkhash(text, node, rev=rev)
1277 self.checkhash(text, node, rev=rev)
1293
1278
1294 self._cache = (node, rev, text)
1279 self._cache = (node, rev, text)
1295 return text
1280 return text
1296
1281
1297 def hash(self, text, p1, p2):
1282 def hash(self, text, p1, p2):
1298 """Compute a node hash.
1283 """Compute a node hash.
1299
1284
1300 Available as a function so that subclasses can replace the hash
1285 Available as a function so that subclasses can replace the hash
1301 as needed.
1286 as needed.
1302 """
1287 """
1303 return hash(text, p1, p2)
1288 return hash(text, p1, p2)
1304
1289
1305 def _processflags(self, text, flags, operation, raw=False):
1290 def _processflags(self, text, flags, operation, raw=False):
1306 """Inspect revision data flags and applies transforms defined by
1291 """Inspect revision data flags and applies transforms defined by
1307 registered flag processors.
1292 registered flag processors.
1308
1293
1309 ``text`` - the revision data to process
1294 ``text`` - the revision data to process
1310 ``flags`` - the revision flags
1295 ``flags`` - the revision flags
1311 ``operation`` - the operation being performed (read or write)
1296 ``operation`` - the operation being performed (read or write)
1312 ``raw`` - an optional argument describing if the raw transform should be
1297 ``raw`` - an optional argument describing if the raw transform should be
1313 applied.
1298 applied.
1314
1299
1315 This method processes the flags in the order (or reverse order if
1300 This method processes the flags in the order (or reverse order if
1316 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1301 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1317 flag processors registered for present flags. The order of flags defined
1302 flag processors registered for present flags. The order of flags defined
1318 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1303 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1319
1304
1320 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1305 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1321 processed text and ``validatehash`` is a bool indicating whether the
1306 processed text and ``validatehash`` is a bool indicating whether the
1322 returned text should be checked for hash integrity.
1307 returned text should be checked for hash integrity.
1323
1308
1324 Note: If the ``raw`` argument is set, it has precedence over the
1309 Note: If the ``raw`` argument is set, it has precedence over the
1325 operation and will only update the value of ``validatehash``.
1310 operation and will only update the value of ``validatehash``.
1326 """
1311 """
1327 if not operation in ('read', 'write'):
1312 if not operation in ('read', 'write'):
1328 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1313 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1329 # Check all flags are known.
1314 # Check all flags are known.
1330 if flags & ~REVIDX_KNOWN_FLAGS:
1315 if flags & ~REVIDX_KNOWN_FLAGS:
1331 raise RevlogError(_("incompatible revision flag '%#x'") %
1316 raise RevlogError(_("incompatible revision flag '%#x'") %
1332 (flags & ~REVIDX_KNOWN_FLAGS))
1317 (flags & ~REVIDX_KNOWN_FLAGS))
1333 validatehash = True
1318 validatehash = True
1334 # Depending on the operation (read or write), the order might be
1319 # Depending on the operation (read or write), the order might be
1335 # reversed due to non-commutative transforms.
1320 # reversed due to non-commutative transforms.
1336 orderedflags = REVIDX_FLAGS_ORDER
1321 orderedflags = REVIDX_FLAGS_ORDER
1337 if operation == 'write':
1322 if operation == 'write':
1338 orderedflags = reversed(orderedflags)
1323 orderedflags = reversed(orderedflags)
1339
1324
1340 for flag in orderedflags:
1325 for flag in orderedflags:
1341 # If a flagprocessor has been registered for a known flag, apply the
1326 # If a flagprocessor has been registered for a known flag, apply the
1342 # related operation transform and update result tuple.
1327 # related operation transform and update result tuple.
1343 if flag & flags:
1328 if flag & flags:
1344 vhash = True
1329 vhash = True
1345
1330
1346 if flag not in _flagprocessors:
1331 if flag not in _flagprocessors:
1347 message = _("missing processor for flag '%#x'") % (flag)
1332 message = _("missing processor for flag '%#x'") % (flag)
1348 raise RevlogError(message)
1333 raise RevlogError(message)
1349
1334
1350 processor = _flagprocessors[flag]
1335 processor = _flagprocessors[flag]
1351 if processor is not None:
1336 if processor is not None:
1352 readtransform, writetransform, rawtransform = processor
1337 readtransform, writetransform, rawtransform = processor
1353
1338
1354 if raw:
1339 if raw:
1355 vhash = rawtransform(self, text)
1340 vhash = rawtransform(self, text)
1356 elif operation == 'read':
1341 elif operation == 'read':
1357 text, vhash = readtransform(self, text)
1342 text, vhash = readtransform(self, text)
1358 else: # write operation
1343 else: # write operation
1359 text, vhash = writetransform(self, text)
1344 text, vhash = writetransform(self, text)
1360 validatehash = validatehash and vhash
1345 validatehash = validatehash and vhash
1361
1346
1362 return text, validatehash
1347 return text, validatehash
1363
1348
1364 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1349 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1365 """Check node hash integrity.
1350 """Check node hash integrity.
1366
1351
1367 Available as a function so that subclasses can extend hash mismatch
1352 Available as a function so that subclasses can extend hash mismatch
1368 behaviors as needed.
1353 behaviors as needed.
1369 """
1354 """
1370 if p1 is None and p2 is None:
1355 if p1 is None and p2 is None:
1371 p1, p2 = self.parents(node)
1356 p1, p2 = self.parents(node)
1372 if node != self.hash(text, p1, p2):
1357 if node != self.hash(text, p1, p2):
1373 revornode = rev
1358 revornode = rev
1374 if revornode is None:
1359 if revornode is None:
1375 revornode = templatefilters.short(hex(node))
1360 revornode = templatefilters.short(hex(node))
1376 raise RevlogError(_("integrity check failed on %s:%s")
1361 raise RevlogError(_("integrity check failed on %s:%s")
1377 % (self.indexfile, revornode))
1362 % (self.indexfile, revornode))
1378
1363
1379 def checkinlinesize(self, tr, fp=None):
1364 def checkinlinesize(self, tr, fp=None):
1380 """Check if the revlog is too big for inline and convert if so.
1365 """Check if the revlog is too big for inline and convert if so.
1381
1366
1382 This should be called after revisions are added to the revlog. If the
1367 This should be called after revisions are added to the revlog. If the
1383 revlog has grown too large to be an inline revlog, it will convert it
1368 revlog has grown too large to be an inline revlog, it will convert it
1384 to use multiple index and data files.
1369 to use multiple index and data files.
1385 """
1370 """
1386 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1371 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1387 return
1372 return
1388
1373
1389 trinfo = tr.find(self.indexfile)
1374 trinfo = tr.find(self.indexfile)
1390 if trinfo is None:
1375 if trinfo is None:
1391 raise RevlogError(_("%s not found in the transaction")
1376 raise RevlogError(_("%s not found in the transaction")
1392 % self.indexfile)
1377 % self.indexfile)
1393
1378
1394 trindex = trinfo[2]
1379 trindex = trinfo[2]
1395 if trindex is not None:
1380 if trindex is not None:
1396 dataoff = self.start(trindex)
1381 dataoff = self.start(trindex)
1397 else:
1382 else:
1398 # revlog was stripped at start of transaction, use all leftover data
1383 # revlog was stripped at start of transaction, use all leftover data
1399 trindex = len(self) - 1
1384 trindex = len(self) - 1
1400 dataoff = self.end(-2)
1385 dataoff = self.end(-2)
1401
1386
1402 tr.add(self.datafile, dataoff)
1387 tr.add(self.datafile, dataoff)
1403
1388
1404 if fp:
1389 if fp:
1405 fp.flush()
1390 fp.flush()
1406 fp.close()
1391 fp.close()
1407
1392
1408 df = self.opener(self.datafile, 'w')
1393 df = self.opener(self.datafile, 'w')
1409 try:
1394 try:
1410 for r in self:
1395 for r in self:
1411 df.write(self._chunkraw(r, r)[1])
1396 df.write(self._chunkraw(r, r)[1])
1412 finally:
1397 finally:
1413 df.close()
1398 df.close()
1414
1399
1415 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1400 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1416 checkambig=self._checkambig)
1401 checkambig=self._checkambig)
1417 self.version &= ~(REVLOGNGINLINEDATA)
1402 self.version &= ~(REVLOGNGINLINEDATA)
1418 self._inline = False
1403 self._inline = False
1419 for i in self:
1404 for i in self:
1420 e = self._io.packentry(self.index[i], self.node, self.version, i)
1405 e = self._io.packentry(self.index[i], self.node, self.version, i)
1421 fp.write(e)
1406 fp.write(e)
1422
1407
1423 # if we don't call close, the temp file will never replace the
1408 # if we don't call close, the temp file will never replace the
1424 # real index
1409 # real index
1425 fp.close()
1410 fp.close()
1426
1411
1427 tr.replace(self.indexfile, trindex * self._io.size)
1412 tr.replace(self.indexfile, trindex * self._io.size)
1428 self._chunkclear()
1413 self._chunkclear()
1429
1414
1430 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1415 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1431 node=None, flags=REVIDX_DEFAULT_FLAGS):
1416 node=None, flags=REVIDX_DEFAULT_FLAGS):
1432 """add a revision to the log
1417 """add a revision to the log
1433
1418
1434 text - the revision data to add
1419 text - the revision data to add
1435 transaction - the transaction object used for rollback
1420 transaction - the transaction object used for rollback
1436 link - the linkrev data to add
1421 link - the linkrev data to add
1437 p1, p2 - the parent nodeids of the revision
1422 p1, p2 - the parent nodeids of the revision
1438 cachedelta - an optional precomputed delta
1423 cachedelta - an optional precomputed delta
1439 node - nodeid of revision; typically node is not specified, and it is
1424 node - nodeid of revision; typically node is not specified, and it is
1440 computed by default as hash(text, p1, p2), however subclasses might
1425 computed by default as hash(text, p1, p2), however subclasses might
1441 use different hashing method (and override checkhash() in such case)
1426 use different hashing method (and override checkhash() in such case)
1442 flags - the known flags to set on the revision
1427 flags - the known flags to set on the revision
1443 """
1428 """
1444 if link == nullrev:
1429 if link == nullrev:
1445 raise RevlogError(_("attempted to add linkrev -1 to %s")
1430 raise RevlogError(_("attempted to add linkrev -1 to %s")
1446 % self.indexfile)
1431 % self.indexfile)
1447
1432
1448 if flags:
1433 if flags:
1449 node = node or self.hash(text, p1, p2)
1434 node = node or self.hash(text, p1, p2)
1450
1435
1451 newtext, validatehash = self._processflags(text, flags, 'write')
1436 newtext, validatehash = self._processflags(text, flags, 'write')
1452
1437
1453 # If the flag processor modifies the revision data, ignore any provided
1438 # If the flag processor modifies the revision data, ignore any provided
1454 # cachedelta.
1439 # cachedelta.
1455 if newtext != text:
1440 if newtext != text:
1456 cachedelta = None
1441 cachedelta = None
1457 text = newtext
1442 text = newtext
1458
1443
1459 if len(text) > _maxentrysize:
1444 if len(text) > _maxentrysize:
1460 raise RevlogError(
1445 raise RevlogError(
1461 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1446 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1462 % (self.indexfile, len(text)))
1447 % (self.indexfile, len(text)))
1463
1448
1464 node = node or self.hash(text, p1, p2)
1449 node = node or self.hash(text, p1, p2)
1465 if node in self.nodemap:
1450 if node in self.nodemap:
1466 return node
1451 return node
1467
1452
1468 if validatehash:
1453 if validatehash:
1469 self.checkhash(text, node, p1=p1, p2=p2)
1454 self.checkhash(text, node, p1=p1, p2=p2)
1470
1455
1471 dfh = None
1456 dfh = None
1472 if not self._inline:
1457 if not self._inline:
1473 dfh = self.opener(self.datafile, "a+")
1458 dfh = self.opener(self.datafile, "a+")
1474 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1459 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1475 try:
1460 try:
1476 return self._addrevision(node, text, transaction, link, p1, p2,
1461 return self._addrevision(node, text, transaction, link, p1, p2,
1477 flags, cachedelta, ifh, dfh)
1462 flags, cachedelta, ifh, dfh)
1478 finally:
1463 finally:
1479 if dfh:
1464 if dfh:
1480 dfh.close()
1465 dfh.close()
1481 ifh.close()
1466 ifh.close()
1482
1467
1483 def compress(self, text):
1468 def compress(self, text):
1484 """ generate a possibly-compressed representation of text """
1469 """ generate a possibly-compressed representation of text """
1485 if not text:
1470 if not text:
1486 return ("", text)
1471 return ("", text)
1487 l = len(text)
1472 l = len(text)
1488 bin = None
1473 bin = None
1489 if l < 44:
1474 if l < 44:
1490 pass
1475 pass
1491 elif l > 1000000:
1476 elif l > 1000000:
1492 # zlib makes an internal copy, thus doubling memory usage for
1477 # zlib makes an internal copy, thus doubling memory usage for
1493 # large files, so lets do this in pieces
1478 # large files, so lets do this in pieces
1494 z = zlib.compressobj()
1479 z = zlib.compressobj()
1495 p = []
1480 p = []
1496 pos = 0
1481 pos = 0
1497 while pos < l:
1482 while pos < l:
1498 pos2 = pos + 2**20
1483 pos2 = pos + 2**20
1499 p.append(z.compress(text[pos:pos2]))
1484 p.append(z.compress(text[pos:pos2]))
1500 pos = pos2
1485 pos = pos2
1501 p.append(z.flush())
1486 p.append(z.flush())
1502 if sum(map(len, p)) < l:
1487 if sum(map(len, p)) < l:
1503 bin = "".join(p)
1488 bin = "".join(p)
1504 else:
1489 else:
1505 bin = _compress(text)
1490 bin = _compress(text)
1506 if bin is None or len(bin) >= l:
1491 if bin is None or len(bin) >= l:
1507 if text[0] == '\0':
1492 if text[0] == '\0':
1508 return ("", text)
1493 return ("", text)
1509 return ('u', text)
1494 return ('u', text)
1510 return ("", bin)
1495 return ("", bin)
1511
1496
1497 def decompress(self, data):
1498 """Decompress a revlog chunk.
1499
1500 The chunk is expected to begin with a header identifying the
1501 format type so it can be routed to an appropriate decompressor.
1502 """
1503 if not data:
1504 return data
1505 t = data[0]
1506 if t == '\0':
1507 return data
1508 if t == 'x':
1509 try:
1510 return _decompress(data)
1511 except zlib.error as e:
1512 raise RevlogError(_('revlog decompress error: %s') % str(e))
1513 if t == 'u':
1514 return util.buffer(data, 1)
1515 raise RevlogError(_('unknown compression type %r') % t)
1516
1512 def _isgooddelta(self, d, textlen):
1517 def _isgooddelta(self, d, textlen):
1513 """Returns True if the given delta is good. Good means that it is within
1518 """Returns True if the given delta is good. Good means that it is within
1514 the disk span, disk size, and chain length bounds that we know to be
1519 the disk span, disk size, and chain length bounds that we know to be
1515 performant."""
1520 performant."""
1516 if d is None:
1521 if d is None:
1517 return False
1522 return False
1518
1523
1519 # - 'dist' is the distance from the base revision -- bounding it limits
1524 # - 'dist' is the distance from the base revision -- bounding it limits
1520 # the amount of I/O we need to do.
1525 # the amount of I/O we need to do.
1521 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1526 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1522 # to apply -- bounding it limits the amount of CPU we consume.
1527 # to apply -- bounding it limits the amount of CPU we consume.
1523 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1528 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1524 if (dist > textlen * 4 or l > textlen or
1529 if (dist > textlen * 4 or l > textlen or
1525 compresseddeltalen > textlen * 2 or
1530 compresseddeltalen > textlen * 2 or
1526 (self._maxchainlen and chainlen > self._maxchainlen)):
1531 (self._maxchainlen and chainlen > self._maxchainlen)):
1527 return False
1532 return False
1528
1533
1529 return True
1534 return True
1530
1535
1531 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1536 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1532 cachedelta, ifh, dfh, alwayscache=False, raw=False):
1537 cachedelta, ifh, dfh, alwayscache=False, raw=False):
1533 """internal function to add revisions to the log
1538 """internal function to add revisions to the log
1534
1539
1535 see addrevision for argument descriptions.
1540 see addrevision for argument descriptions.
1536 invariants:
1541 invariants:
1537 - text is optional (can be None); if not set, cachedelta must be set.
1542 - text is optional (can be None); if not set, cachedelta must be set.
1538 if both are set, they must correspond to each other.
1543 if both are set, they must correspond to each other.
1539 - raw is optional; if set to True, it indicates the revision data is to
1544 - raw is optional; if set to True, it indicates the revision data is to
1540 be treated by _processflags() as raw. It is usually set by changegroup
1545 be treated by _processflags() as raw. It is usually set by changegroup
1541 generation and debug commands.
1546 generation and debug commands.
1542 """
1547 """
1543 btext = [text]
1548 btext = [text]
1544 def buildtext():
1549 def buildtext():
1545 if btext[0] is not None:
1550 if btext[0] is not None:
1546 return btext[0]
1551 return btext[0]
1547 baserev = cachedelta[0]
1552 baserev = cachedelta[0]
1548 delta = cachedelta[1]
1553 delta = cachedelta[1]
1549 # special case deltas which replace entire base; no need to decode
1554 # special case deltas which replace entire base; no need to decode
1550 # base revision. this neatly avoids censored bases, which throw when
1555 # base revision. this neatly avoids censored bases, which throw when
1551 # they're decoded.
1556 # they're decoded.
1552 hlen = struct.calcsize(">lll")
1557 hlen = struct.calcsize(">lll")
1553 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1558 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1554 len(delta) - hlen):
1559 len(delta) - hlen):
1555 btext[0] = delta[hlen:]
1560 btext[0] = delta[hlen:]
1556 else:
1561 else:
1557 if self._inline:
1562 if self._inline:
1558 fh = ifh
1563 fh = ifh
1559 else:
1564 else:
1560 fh = dfh
1565 fh = dfh
1561 basetext = self.revision(self.node(baserev), _df=fh, raw=raw)
1566 basetext = self.revision(self.node(baserev), _df=fh, raw=raw)
1562 btext[0] = mdiff.patch(basetext, delta)
1567 btext[0] = mdiff.patch(basetext, delta)
1563
1568
1564 try:
1569 try:
1565 res = self._processflags(btext[0], flags, 'read', raw=raw)
1570 res = self._processflags(btext[0], flags, 'read', raw=raw)
1566 btext[0], validatehash = res
1571 btext[0], validatehash = res
1567 if validatehash:
1572 if validatehash:
1568 self.checkhash(btext[0], node, p1=p1, p2=p2)
1573 self.checkhash(btext[0], node, p1=p1, p2=p2)
1569 if flags & REVIDX_ISCENSORED:
1574 if flags & REVIDX_ISCENSORED:
1570 raise RevlogError(_('node %s is not censored') % node)
1575 raise RevlogError(_('node %s is not censored') % node)
1571 except CensoredNodeError:
1576 except CensoredNodeError:
1572 # must pass the censored index flag to add censored revisions
1577 # must pass the censored index flag to add censored revisions
1573 if not flags & REVIDX_ISCENSORED:
1578 if not flags & REVIDX_ISCENSORED:
1574 raise
1579 raise
1575 return btext[0]
1580 return btext[0]
1576
1581
1577 def builddelta(rev):
1582 def builddelta(rev):
1578 # can we use the cached delta?
1583 # can we use the cached delta?
1579 if cachedelta and cachedelta[0] == rev:
1584 if cachedelta and cachedelta[0] == rev:
1580 delta = cachedelta[1]
1585 delta = cachedelta[1]
1581 else:
1586 else:
1582 t = buildtext()
1587 t = buildtext()
1583 if self.iscensored(rev):
1588 if self.iscensored(rev):
1584 # deltas based on a censored revision must replace the
1589 # deltas based on a censored revision must replace the
1585 # full content in one patch, so delta works everywhere
1590 # full content in one patch, so delta works everywhere
1586 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1591 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1587 delta = header + t
1592 delta = header + t
1588 else:
1593 else:
1589 if self._inline:
1594 if self._inline:
1590 fh = ifh
1595 fh = ifh
1591 else:
1596 else:
1592 fh = dfh
1597 fh = dfh
1593 ptext = self.revision(self.node(rev), _df=fh)
1598 ptext = self.revision(self.node(rev), _df=fh)
1594 delta = mdiff.textdiff(ptext, t)
1599 delta = mdiff.textdiff(ptext, t)
1595 header, data = self.compress(delta)
1600 header, data = self.compress(delta)
1596 deltalen = len(header) + len(data)
1601 deltalen = len(header) + len(data)
1597 chainbase = self.chainbase(rev)
1602 chainbase = self.chainbase(rev)
1598 dist = deltalen + offset - self.start(chainbase)
1603 dist = deltalen + offset - self.start(chainbase)
1599 if self._generaldelta:
1604 if self._generaldelta:
1600 base = rev
1605 base = rev
1601 else:
1606 else:
1602 base = chainbase
1607 base = chainbase
1603 chainlen, compresseddeltalen = self._chaininfo(rev)
1608 chainlen, compresseddeltalen = self._chaininfo(rev)
1604 chainlen += 1
1609 chainlen += 1
1605 compresseddeltalen += deltalen
1610 compresseddeltalen += deltalen
1606 return (dist, deltalen, (header, data), base,
1611 return (dist, deltalen, (header, data), base,
1607 chainbase, chainlen, compresseddeltalen)
1612 chainbase, chainlen, compresseddeltalen)
1608
1613
1609 curr = len(self)
1614 curr = len(self)
1610 prev = curr - 1
1615 prev = curr - 1
1611 offset = self.end(prev)
1616 offset = self.end(prev)
1612 delta = None
1617 delta = None
1613 p1r, p2r = self.rev(p1), self.rev(p2)
1618 p1r, p2r = self.rev(p1), self.rev(p2)
1614
1619
1615 # full versions are inserted when the needed deltas
1620 # full versions are inserted when the needed deltas
1616 # become comparable to the uncompressed text
1621 # become comparable to the uncompressed text
1617 if text is None:
1622 if text is None:
1618 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1623 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1619 cachedelta[1])
1624 cachedelta[1])
1620 else:
1625 else:
1621 textlen = len(text)
1626 textlen = len(text)
1622
1627
1623 # should we try to build a delta?
1628 # should we try to build a delta?
1624 if prev != nullrev and self.storedeltachains:
1629 if prev != nullrev and self.storedeltachains:
1625 tested = set()
1630 tested = set()
1626 # This condition is true most of the time when processing
1631 # This condition is true most of the time when processing
1627 # changegroup data into a generaldelta repo. The only time it
1632 # changegroup data into a generaldelta repo. The only time it
1628 # isn't true is if this is the first revision in a delta chain
1633 # isn't true is if this is the first revision in a delta chain
1629 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1634 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1630 if cachedelta and self._generaldelta and self._lazydeltabase:
1635 if cachedelta and self._generaldelta and self._lazydeltabase:
1631 # Assume what we received from the server is a good choice
1636 # Assume what we received from the server is a good choice
1632 # build delta will reuse the cache
1637 # build delta will reuse the cache
1633 candidatedelta = builddelta(cachedelta[0])
1638 candidatedelta = builddelta(cachedelta[0])
1634 tested.add(cachedelta[0])
1639 tested.add(cachedelta[0])
1635 if self._isgooddelta(candidatedelta, textlen):
1640 if self._isgooddelta(candidatedelta, textlen):
1636 delta = candidatedelta
1641 delta = candidatedelta
1637 if delta is None and self._generaldelta:
1642 if delta is None and self._generaldelta:
1638 # exclude already lazy tested base if any
1643 # exclude already lazy tested base if any
1639 parents = [p for p in (p1r, p2r)
1644 parents = [p for p in (p1r, p2r)
1640 if p != nullrev and p not in tested]
1645 if p != nullrev and p not in tested]
1641 if parents and not self._aggressivemergedeltas:
1646 if parents and not self._aggressivemergedeltas:
1642 # Pick whichever parent is closer to us (to minimize the
1647 # Pick whichever parent is closer to us (to minimize the
1643 # chance of having to build a fulltext).
1648 # chance of having to build a fulltext).
1644 parents = [max(parents)]
1649 parents = [max(parents)]
1645 tested.update(parents)
1650 tested.update(parents)
1646 pdeltas = []
1651 pdeltas = []
1647 for p in parents:
1652 for p in parents:
1648 pd = builddelta(p)
1653 pd = builddelta(p)
1649 if self._isgooddelta(pd, textlen):
1654 if self._isgooddelta(pd, textlen):
1650 pdeltas.append(pd)
1655 pdeltas.append(pd)
1651 if pdeltas:
1656 if pdeltas:
1652 delta = min(pdeltas, key=lambda x: x[1])
1657 delta = min(pdeltas, key=lambda x: x[1])
1653 if delta is None and prev not in tested:
1658 if delta is None and prev not in tested:
1654 # other approach failed try against prev to hopefully save us a
1659 # other approach failed try against prev to hopefully save us a
1655 # fulltext.
1660 # fulltext.
1656 candidatedelta = builddelta(prev)
1661 candidatedelta = builddelta(prev)
1657 if self._isgooddelta(candidatedelta, textlen):
1662 if self._isgooddelta(candidatedelta, textlen):
1658 delta = candidatedelta
1663 delta = candidatedelta
1659 if delta is not None:
1664 if delta is not None:
1660 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1665 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1661 else:
1666 else:
1662 text = buildtext()
1667 text = buildtext()
1663 data = self.compress(text)
1668 data = self.compress(text)
1664 l = len(data[1]) + len(data[0])
1669 l = len(data[1]) + len(data[0])
1665 base = chainbase = curr
1670 base = chainbase = curr
1666
1671
1667 e = (offset_type(offset, flags), l, textlen,
1672 e = (offset_type(offset, flags), l, textlen,
1668 base, link, p1r, p2r, node)
1673 base, link, p1r, p2r, node)
1669 self.index.insert(-1, e)
1674 self.index.insert(-1, e)
1670 self.nodemap[node] = curr
1675 self.nodemap[node] = curr
1671
1676
1672 entry = self._io.packentry(e, self.node, self.version, curr)
1677 entry = self._io.packentry(e, self.node, self.version, curr)
1673 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1678 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1674
1679
1675 if alwayscache and text is None:
1680 if alwayscache and text is None:
1676 text = buildtext()
1681 text = buildtext()
1677
1682
1678 if type(text) == str: # only accept immutable objects
1683 if type(text) == str: # only accept immutable objects
1679 self._cache = (node, curr, text)
1684 self._cache = (node, curr, text)
1680 self._chainbasecache[curr] = chainbase
1685 self._chainbasecache[curr] = chainbase
1681 return node
1686 return node
1682
1687
1683 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1688 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1684 # Files opened in a+ mode have inconsistent behavior on various
1689 # Files opened in a+ mode have inconsistent behavior on various
1685 # platforms. Windows requires that a file positioning call be made
1690 # platforms. Windows requires that a file positioning call be made
1686 # when the file handle transitions between reads and writes. See
1691 # when the file handle transitions between reads and writes. See
1687 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1692 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1688 # platforms, Python or the platform itself can be buggy. Some versions
1693 # platforms, Python or the platform itself can be buggy. Some versions
1689 # of Solaris have been observed to not append at the end of the file
1694 # of Solaris have been observed to not append at the end of the file
1690 # if the file was seeked to before the end. See issue4943 for more.
1695 # if the file was seeked to before the end. See issue4943 for more.
1691 #
1696 #
1692 # We work around this issue by inserting a seek() before writing.
1697 # We work around this issue by inserting a seek() before writing.
1693 # Note: This is likely not necessary on Python 3.
1698 # Note: This is likely not necessary on Python 3.
1694 ifh.seek(0, os.SEEK_END)
1699 ifh.seek(0, os.SEEK_END)
1695 if dfh:
1700 if dfh:
1696 dfh.seek(0, os.SEEK_END)
1701 dfh.seek(0, os.SEEK_END)
1697
1702
1698 curr = len(self) - 1
1703 curr = len(self) - 1
1699 if not self._inline:
1704 if not self._inline:
1700 transaction.add(self.datafile, offset)
1705 transaction.add(self.datafile, offset)
1701 transaction.add(self.indexfile, curr * len(entry))
1706 transaction.add(self.indexfile, curr * len(entry))
1702 if data[0]:
1707 if data[0]:
1703 dfh.write(data[0])
1708 dfh.write(data[0])
1704 dfh.write(data[1])
1709 dfh.write(data[1])
1705 ifh.write(entry)
1710 ifh.write(entry)
1706 else:
1711 else:
1707 offset += curr * self._io.size
1712 offset += curr * self._io.size
1708 transaction.add(self.indexfile, offset, curr)
1713 transaction.add(self.indexfile, offset, curr)
1709 ifh.write(entry)
1714 ifh.write(entry)
1710 ifh.write(data[0])
1715 ifh.write(data[0])
1711 ifh.write(data[1])
1716 ifh.write(data[1])
1712 self.checkinlinesize(transaction, ifh)
1717 self.checkinlinesize(transaction, ifh)
1713
1718
1714 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1719 def addgroup(self, cg, linkmapper, transaction, addrevisioncb=None):
1715 """
1720 """
1716 add a delta group
1721 add a delta group
1717
1722
1718 given a set of deltas, add them to the revision log. the
1723 given a set of deltas, add them to the revision log. the
1719 first delta is against its parent, which should be in our
1724 first delta is against its parent, which should be in our
1720 log, the rest are against the previous delta.
1725 log, the rest are against the previous delta.
1721
1726
1722 If ``addrevisioncb`` is defined, it will be called with arguments of
1727 If ``addrevisioncb`` is defined, it will be called with arguments of
1723 this revlog and the node that was added.
1728 this revlog and the node that was added.
1724 """
1729 """
1725
1730
1726 # track the base of the current delta log
1731 # track the base of the current delta log
1727 content = []
1732 content = []
1728 node = None
1733 node = None
1729
1734
1730 r = len(self)
1735 r = len(self)
1731 end = 0
1736 end = 0
1732 if r:
1737 if r:
1733 end = self.end(r - 1)
1738 end = self.end(r - 1)
1734 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1739 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1735 isize = r * self._io.size
1740 isize = r * self._io.size
1736 if self._inline:
1741 if self._inline:
1737 transaction.add(self.indexfile, end + isize, r)
1742 transaction.add(self.indexfile, end + isize, r)
1738 dfh = None
1743 dfh = None
1739 else:
1744 else:
1740 transaction.add(self.indexfile, isize, r)
1745 transaction.add(self.indexfile, isize, r)
1741 transaction.add(self.datafile, end)
1746 transaction.add(self.datafile, end)
1742 dfh = self.opener(self.datafile, "a+")
1747 dfh = self.opener(self.datafile, "a+")
1743 def flush():
1748 def flush():
1744 if dfh:
1749 if dfh:
1745 dfh.flush()
1750 dfh.flush()
1746 ifh.flush()
1751 ifh.flush()
1747 try:
1752 try:
1748 # loop through our set of deltas
1753 # loop through our set of deltas
1749 chain = None
1754 chain = None
1750 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1755 for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
1751 node = chunkdata['node']
1756 node = chunkdata['node']
1752 p1 = chunkdata['p1']
1757 p1 = chunkdata['p1']
1753 p2 = chunkdata['p2']
1758 p2 = chunkdata['p2']
1754 cs = chunkdata['cs']
1759 cs = chunkdata['cs']
1755 deltabase = chunkdata['deltabase']
1760 deltabase = chunkdata['deltabase']
1756 delta = chunkdata['delta']
1761 delta = chunkdata['delta']
1757 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1762 flags = chunkdata['flags'] or REVIDX_DEFAULT_FLAGS
1758
1763
1759 content.append(node)
1764 content.append(node)
1760
1765
1761 link = linkmapper(cs)
1766 link = linkmapper(cs)
1762 if node in self.nodemap:
1767 if node in self.nodemap:
1763 # this can happen if two branches make the same change
1768 # this can happen if two branches make the same change
1764 chain = node
1769 chain = node
1765 continue
1770 continue
1766
1771
1767 for p in (p1, p2):
1772 for p in (p1, p2):
1768 if p not in self.nodemap:
1773 if p not in self.nodemap:
1769 raise LookupError(p, self.indexfile,
1774 raise LookupError(p, self.indexfile,
1770 _('unknown parent'))
1775 _('unknown parent'))
1771
1776
1772 if deltabase not in self.nodemap:
1777 if deltabase not in self.nodemap:
1773 raise LookupError(deltabase, self.indexfile,
1778 raise LookupError(deltabase, self.indexfile,
1774 _('unknown delta base'))
1779 _('unknown delta base'))
1775
1780
1776 baserev = self.rev(deltabase)
1781 baserev = self.rev(deltabase)
1777
1782
1778 if baserev != nullrev and self.iscensored(baserev):
1783 if baserev != nullrev and self.iscensored(baserev):
1779 # if base is censored, delta must be full replacement in a
1784 # if base is censored, delta must be full replacement in a
1780 # single patch operation
1785 # single patch operation
1781 hlen = struct.calcsize(">lll")
1786 hlen = struct.calcsize(">lll")
1782 oldlen = self.rawsize(baserev)
1787 oldlen = self.rawsize(baserev)
1783 newlen = len(delta) - hlen
1788 newlen = len(delta) - hlen
1784 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1789 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1785 raise error.CensoredBaseError(self.indexfile,
1790 raise error.CensoredBaseError(self.indexfile,
1786 self.node(baserev))
1791 self.node(baserev))
1787
1792
1788 if not flags and self._peek_iscensored(baserev, delta, flush):
1793 if not flags and self._peek_iscensored(baserev, delta, flush):
1789 flags |= REVIDX_ISCENSORED
1794 flags |= REVIDX_ISCENSORED
1790
1795
1791 # We assume consumers of addrevisioncb will want to retrieve
1796 # We assume consumers of addrevisioncb will want to retrieve
1792 # the added revision, which will require a call to
1797 # the added revision, which will require a call to
1793 # revision(). revision() will fast path if there is a cache
1798 # revision(). revision() will fast path if there is a cache
1794 # hit. So, we tell _addrevision() to always cache in this case.
1799 # hit. So, we tell _addrevision() to always cache in this case.
1795 # We're only using addgroup() in the context of changegroup
1800 # We're only using addgroup() in the context of changegroup
1796 # generation so the revision data can always be handled as raw
1801 # generation so the revision data can always be handled as raw
1797 # by the flagprocessor.
1802 # by the flagprocessor.
1798 chain = self._addrevision(node, None, transaction, link,
1803 chain = self._addrevision(node, None, transaction, link,
1799 p1, p2, flags, (baserev, delta),
1804 p1, p2, flags, (baserev, delta),
1800 ifh, dfh,
1805 ifh, dfh,
1801 alwayscache=bool(addrevisioncb),
1806 alwayscache=bool(addrevisioncb),
1802 raw=True)
1807 raw=True)
1803
1808
1804 if addrevisioncb:
1809 if addrevisioncb:
1805 addrevisioncb(self, chain)
1810 addrevisioncb(self, chain)
1806
1811
1807 if not dfh and not self._inline:
1812 if not dfh and not self._inline:
1808 # addrevision switched from inline to conventional
1813 # addrevision switched from inline to conventional
1809 # reopen the index
1814 # reopen the index
1810 ifh.close()
1815 ifh.close()
1811 dfh = self.opener(self.datafile, "a+")
1816 dfh = self.opener(self.datafile, "a+")
1812 ifh = self.opener(self.indexfile, "a+",
1817 ifh = self.opener(self.indexfile, "a+",
1813 checkambig=self._checkambig)
1818 checkambig=self._checkambig)
1814 finally:
1819 finally:
1815 if dfh:
1820 if dfh:
1816 dfh.close()
1821 dfh.close()
1817 ifh.close()
1822 ifh.close()
1818
1823
1819 return content
1824 return content
1820
1825
1821 def iscensored(self, rev):
1826 def iscensored(self, rev):
1822 """Check if a file revision is censored."""
1827 """Check if a file revision is censored."""
1823 return False
1828 return False
1824
1829
1825 def _peek_iscensored(self, baserev, delta, flush):
1830 def _peek_iscensored(self, baserev, delta, flush):
1826 """Quickly check if a delta produces a censored revision."""
1831 """Quickly check if a delta produces a censored revision."""
1827 return False
1832 return False
1828
1833
1829 def getstrippoint(self, minlink):
1834 def getstrippoint(self, minlink):
1830 """find the minimum rev that must be stripped to strip the linkrev
1835 """find the minimum rev that must be stripped to strip the linkrev
1831
1836
1832 Returns a tuple containing the minimum rev and a set of all revs that
1837 Returns a tuple containing the minimum rev and a set of all revs that
1833 have linkrevs that will be broken by this strip.
1838 have linkrevs that will be broken by this strip.
1834 """
1839 """
1835 brokenrevs = set()
1840 brokenrevs = set()
1836 strippoint = len(self)
1841 strippoint = len(self)
1837
1842
1838 heads = {}
1843 heads = {}
1839 futurelargelinkrevs = set()
1844 futurelargelinkrevs = set()
1840 for head in self.headrevs():
1845 for head in self.headrevs():
1841 headlinkrev = self.linkrev(head)
1846 headlinkrev = self.linkrev(head)
1842 heads[head] = headlinkrev
1847 heads[head] = headlinkrev
1843 if headlinkrev >= minlink:
1848 if headlinkrev >= minlink:
1844 futurelargelinkrevs.add(headlinkrev)
1849 futurelargelinkrevs.add(headlinkrev)
1845
1850
1846 # This algorithm involves walking down the rev graph, starting at the
1851 # This algorithm involves walking down the rev graph, starting at the
1847 # heads. Since the revs are topologically sorted according to linkrev,
1852 # heads. Since the revs are topologically sorted according to linkrev,
1848 # once all head linkrevs are below the minlink, we know there are
1853 # once all head linkrevs are below the minlink, we know there are
1849 # no more revs that could have a linkrev greater than minlink.
1854 # no more revs that could have a linkrev greater than minlink.
1850 # So we can stop walking.
1855 # So we can stop walking.
1851 while futurelargelinkrevs:
1856 while futurelargelinkrevs:
1852 strippoint -= 1
1857 strippoint -= 1
1853 linkrev = heads.pop(strippoint)
1858 linkrev = heads.pop(strippoint)
1854
1859
1855 if linkrev < minlink:
1860 if linkrev < minlink:
1856 brokenrevs.add(strippoint)
1861 brokenrevs.add(strippoint)
1857 else:
1862 else:
1858 futurelargelinkrevs.remove(linkrev)
1863 futurelargelinkrevs.remove(linkrev)
1859
1864
1860 for p in self.parentrevs(strippoint):
1865 for p in self.parentrevs(strippoint):
1861 if p != nullrev:
1866 if p != nullrev:
1862 plinkrev = self.linkrev(p)
1867 plinkrev = self.linkrev(p)
1863 heads[p] = plinkrev
1868 heads[p] = plinkrev
1864 if plinkrev >= minlink:
1869 if plinkrev >= minlink:
1865 futurelargelinkrevs.add(plinkrev)
1870 futurelargelinkrevs.add(plinkrev)
1866
1871
1867 return strippoint, brokenrevs
1872 return strippoint, brokenrevs
1868
1873
1869 def strip(self, minlink, transaction):
1874 def strip(self, minlink, transaction):
1870 """truncate the revlog on the first revision with a linkrev >= minlink
1875 """truncate the revlog on the first revision with a linkrev >= minlink
1871
1876
1872 This function is called when we're stripping revision minlink and
1877 This function is called when we're stripping revision minlink and
1873 its descendants from the repository.
1878 its descendants from the repository.
1874
1879
1875 We have to remove all revisions with linkrev >= minlink, because
1880 We have to remove all revisions with linkrev >= minlink, because
1876 the equivalent changelog revisions will be renumbered after the
1881 the equivalent changelog revisions will be renumbered after the
1877 strip.
1882 strip.
1878
1883
1879 So we truncate the revlog on the first of these revisions, and
1884 So we truncate the revlog on the first of these revisions, and
1880 trust that the caller has saved the revisions that shouldn't be
1885 trust that the caller has saved the revisions that shouldn't be
1881 removed and that it'll re-add them after this truncation.
1886 removed and that it'll re-add them after this truncation.
1882 """
1887 """
1883 if len(self) == 0:
1888 if len(self) == 0:
1884 return
1889 return
1885
1890
1886 rev, _ = self.getstrippoint(minlink)
1891 rev, _ = self.getstrippoint(minlink)
1887 if rev == len(self):
1892 if rev == len(self):
1888 return
1893 return
1889
1894
1890 # first truncate the files on disk
1895 # first truncate the files on disk
1891 end = self.start(rev)
1896 end = self.start(rev)
1892 if not self._inline:
1897 if not self._inline:
1893 transaction.add(self.datafile, end)
1898 transaction.add(self.datafile, end)
1894 end = rev * self._io.size
1899 end = rev * self._io.size
1895 else:
1900 else:
1896 end += rev * self._io.size
1901 end += rev * self._io.size
1897
1902
1898 transaction.add(self.indexfile, end)
1903 transaction.add(self.indexfile, end)
1899
1904
1900 # then reset internal state in memory to forget those revisions
1905 # then reset internal state in memory to forget those revisions
1901 self._cache = None
1906 self._cache = None
1902 self._chaininfocache = {}
1907 self._chaininfocache = {}
1903 self._chunkclear()
1908 self._chunkclear()
1904 for x in xrange(rev, len(self)):
1909 for x in xrange(rev, len(self)):
1905 del self.nodemap[self.node(x)]
1910 del self.nodemap[self.node(x)]
1906
1911
1907 del self.index[rev:-1]
1912 del self.index[rev:-1]
1908
1913
1909 def checksize(self):
1914 def checksize(self):
1910 expected = 0
1915 expected = 0
1911 if len(self):
1916 if len(self):
1912 expected = max(0, self.end(len(self) - 1))
1917 expected = max(0, self.end(len(self) - 1))
1913
1918
1914 try:
1919 try:
1915 f = self.opener(self.datafile)
1920 f = self.opener(self.datafile)
1916 f.seek(0, 2)
1921 f.seek(0, 2)
1917 actual = f.tell()
1922 actual = f.tell()
1918 f.close()
1923 f.close()
1919 dd = actual - expected
1924 dd = actual - expected
1920 except IOError as inst:
1925 except IOError as inst:
1921 if inst.errno != errno.ENOENT:
1926 if inst.errno != errno.ENOENT:
1922 raise
1927 raise
1923 dd = 0
1928 dd = 0
1924
1929
1925 try:
1930 try:
1926 f = self.opener(self.indexfile)
1931 f = self.opener(self.indexfile)
1927 f.seek(0, 2)
1932 f.seek(0, 2)
1928 actual = f.tell()
1933 actual = f.tell()
1929 f.close()
1934 f.close()
1930 s = self._io.size
1935 s = self._io.size
1931 i = max(0, actual // s)
1936 i = max(0, actual // s)
1932 di = actual - (i * s)
1937 di = actual - (i * s)
1933 if self._inline:
1938 if self._inline:
1934 databytes = 0
1939 databytes = 0
1935 for r in self:
1940 for r in self:
1936 databytes += max(0, self.length(r))
1941 databytes += max(0, self.length(r))
1937 dd = 0
1942 dd = 0
1938 di = actual - len(self) * s - databytes
1943 di = actual - len(self) * s - databytes
1939 except IOError as inst:
1944 except IOError as inst:
1940 if inst.errno != errno.ENOENT:
1945 if inst.errno != errno.ENOENT:
1941 raise
1946 raise
1942 di = 0
1947 di = 0
1943
1948
1944 return (dd, di)
1949 return (dd, di)
1945
1950
1946 def files(self):
1951 def files(self):
1947 res = [self.indexfile]
1952 res = [self.indexfile]
1948 if not self._inline:
1953 if not self._inline:
1949 res.append(self.datafile)
1954 res.append(self.datafile)
1950 return res
1955 return res
1951
1956
1952 DELTAREUSEALWAYS = 'always'
1957 DELTAREUSEALWAYS = 'always'
1953 DELTAREUSESAMEREVS = 'samerevs'
1958 DELTAREUSESAMEREVS = 'samerevs'
1954 DELTAREUSENEVER = 'never'
1959 DELTAREUSENEVER = 'never'
1955
1960
1956 DELTAREUSEALL = set(['always', 'samerevs', 'never'])
1961 DELTAREUSEALL = set(['always', 'samerevs', 'never'])
1957
1962
1958 def clone(self, tr, destrevlog, addrevisioncb=None,
1963 def clone(self, tr, destrevlog, addrevisioncb=None,
1959 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
1964 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
1960 """Copy this revlog to another, possibly with format changes.
1965 """Copy this revlog to another, possibly with format changes.
1961
1966
1962 The destination revlog will contain the same revisions and nodes.
1967 The destination revlog will contain the same revisions and nodes.
1963 However, it may not be bit-for-bit identical due to e.g. delta encoding
1968 However, it may not be bit-for-bit identical due to e.g. delta encoding
1964 differences.
1969 differences.
1965
1970
1966 The ``deltareuse`` argument control how deltas from the existing revlog
1971 The ``deltareuse`` argument control how deltas from the existing revlog
1967 are preserved in the destination revlog. The argument can have the
1972 are preserved in the destination revlog. The argument can have the
1968 following values:
1973 following values:
1969
1974
1970 DELTAREUSEALWAYS
1975 DELTAREUSEALWAYS
1971 Deltas will always be reused (if possible), even if the destination
1976 Deltas will always be reused (if possible), even if the destination
1972 revlog would not select the same revisions for the delta. This is the
1977 revlog would not select the same revisions for the delta. This is the
1973 fastest mode of operation.
1978 fastest mode of operation.
1974 DELTAREUSESAMEREVS
1979 DELTAREUSESAMEREVS
1975 Deltas will be reused if the destination revlog would pick the same
1980 Deltas will be reused if the destination revlog would pick the same
1976 revisions for the delta. This mode strikes a balance between speed
1981 revisions for the delta. This mode strikes a balance between speed
1977 and optimization.
1982 and optimization.
1978 DELTAREUSENEVER
1983 DELTAREUSENEVER
1979 Deltas will never be reused. This is the slowest mode of execution.
1984 Deltas will never be reused. This is the slowest mode of execution.
1980 This mode can be used to recompute deltas (e.g. if the diff/delta
1985 This mode can be used to recompute deltas (e.g. if the diff/delta
1981 algorithm changes).
1986 algorithm changes).
1982
1987
1983 Delta computation can be slow, so the choice of delta reuse policy can
1988 Delta computation can be slow, so the choice of delta reuse policy can
1984 significantly affect run time.
1989 significantly affect run time.
1985
1990
1986 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
1991 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
1987 two extremes. Deltas will be reused if they are appropriate. But if the
1992 two extremes. Deltas will be reused if they are appropriate. But if the
1988 delta could choose a better revision, it will do so. This means if you
1993 delta could choose a better revision, it will do so. This means if you
1989 are converting a non-generaldelta revlog to a generaldelta revlog,
1994 are converting a non-generaldelta revlog to a generaldelta revlog,
1990 deltas will be recomputed if the delta's parent isn't a parent of the
1995 deltas will be recomputed if the delta's parent isn't a parent of the
1991 revision.
1996 revision.
1992
1997
1993 In addition to the delta policy, the ``aggressivemergedeltas`` argument
1998 In addition to the delta policy, the ``aggressivemergedeltas`` argument
1994 controls whether to compute deltas against both parents for merges.
1999 controls whether to compute deltas against both parents for merges.
1995 By default, the current default is used.
2000 By default, the current default is used.
1996 """
2001 """
1997 if deltareuse not in self.DELTAREUSEALL:
2002 if deltareuse not in self.DELTAREUSEALL:
1998 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2003 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
1999
2004
2000 if len(destrevlog):
2005 if len(destrevlog):
2001 raise ValueError(_('destination revlog is not empty'))
2006 raise ValueError(_('destination revlog is not empty'))
2002
2007
2003 if getattr(self, 'filteredrevs', None):
2008 if getattr(self, 'filteredrevs', None):
2004 raise ValueError(_('source revlog has filtered revisions'))
2009 raise ValueError(_('source revlog has filtered revisions'))
2005 if getattr(destrevlog, 'filteredrevs', None):
2010 if getattr(destrevlog, 'filteredrevs', None):
2006 raise ValueError(_('destination revlog has filtered revisions'))
2011 raise ValueError(_('destination revlog has filtered revisions'))
2007
2012
2008 # lazydeltabase controls whether to reuse a cached delta, if possible.
2013 # lazydeltabase controls whether to reuse a cached delta, if possible.
2009 oldlazydeltabase = destrevlog._lazydeltabase
2014 oldlazydeltabase = destrevlog._lazydeltabase
2010 oldamd = destrevlog._aggressivemergedeltas
2015 oldamd = destrevlog._aggressivemergedeltas
2011
2016
2012 try:
2017 try:
2013 if deltareuse == self.DELTAREUSEALWAYS:
2018 if deltareuse == self.DELTAREUSEALWAYS:
2014 destrevlog._lazydeltabase = True
2019 destrevlog._lazydeltabase = True
2015 elif deltareuse == self.DELTAREUSESAMEREVS:
2020 elif deltareuse == self.DELTAREUSESAMEREVS:
2016 destrevlog._lazydeltabase = False
2021 destrevlog._lazydeltabase = False
2017
2022
2018 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2023 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2019
2024
2020 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2025 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2021 self.DELTAREUSESAMEREVS)
2026 self.DELTAREUSESAMEREVS)
2022
2027
2023 index = self.index
2028 index = self.index
2024 for rev in self:
2029 for rev in self:
2025 entry = index[rev]
2030 entry = index[rev]
2026
2031
2027 # Some classes override linkrev to take filtered revs into
2032 # Some classes override linkrev to take filtered revs into
2028 # account. Use raw entry from index.
2033 # account. Use raw entry from index.
2029 flags = entry[0] & 0xffff
2034 flags = entry[0] & 0xffff
2030 linkrev = entry[4]
2035 linkrev = entry[4]
2031 p1 = index[entry[5]][7]
2036 p1 = index[entry[5]][7]
2032 p2 = index[entry[6]][7]
2037 p2 = index[entry[6]][7]
2033 node = entry[7]
2038 node = entry[7]
2034
2039
2035 # (Possibly) reuse the delta from the revlog if allowed and
2040 # (Possibly) reuse the delta from the revlog if allowed and
2036 # the revlog chunk is a delta.
2041 # the revlog chunk is a delta.
2037 cachedelta = None
2042 cachedelta = None
2038 text = None
2043 text = None
2039 if populatecachedelta:
2044 if populatecachedelta:
2040 dp = self.deltaparent(rev)
2045 dp = self.deltaparent(rev)
2041 if dp != nullrev:
2046 if dp != nullrev:
2042 cachedelta = (dp, str(self._chunk(rev)))
2047 cachedelta = (dp, str(self._chunk(rev)))
2043
2048
2044 if not cachedelta:
2049 if not cachedelta:
2045 text = self.revision(rev)
2050 text = self.revision(rev)
2046
2051
2047 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2052 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2048 checkambig=False)
2053 checkambig=False)
2049 dfh = None
2054 dfh = None
2050 if not destrevlog._inline:
2055 if not destrevlog._inline:
2051 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2056 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2052 try:
2057 try:
2053 destrevlog._addrevision(node, text, tr, linkrev, p1, p2,
2058 destrevlog._addrevision(node, text, tr, linkrev, p1, p2,
2054 flags, cachedelta, ifh, dfh)
2059 flags, cachedelta, ifh, dfh)
2055 finally:
2060 finally:
2056 if dfh:
2061 if dfh:
2057 dfh.close()
2062 dfh.close()
2058 ifh.close()
2063 ifh.close()
2059
2064
2060 if addrevisioncb:
2065 if addrevisioncb:
2061 addrevisioncb(self, rev, node)
2066 addrevisioncb(self, rev, node)
2062 finally:
2067 finally:
2063 destrevlog._lazydeltabase = oldlazydeltabase
2068 destrevlog._lazydeltabase = oldlazydeltabase
2064 destrevlog._aggressivemergedeltas = oldamd
2069 destrevlog._aggressivemergedeltas = oldamd
General Comments 0
You need to be logged in to leave comments. Login now