##// END OF EJS Templates
merge with stable
Gregory Szorc -
r39183:b95b48a5 merge default
parent child Browse files
Show More
@@ -1,1889 +1,1946 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import gc
23 import gc
24 import os
24 import os
25 import random
25 import random
26 import struct
26 import struct
27 import sys
27 import sys
28 import threading
28 import threading
29 import time
29 import time
30 from mercurial import (
30 from mercurial import (
31 changegroup,
31 changegroup,
32 cmdutil,
32 cmdutil,
33 commands,
33 commands,
34 copies,
34 copies,
35 error,
35 error,
36 extensions,
36 extensions,
37 mdiff,
37 mdiff,
38 merge,
38 merge,
39 revlog,
39 revlog,
40 util,
40 util,
41 )
41 )
42
42
43 # for "historical portability":
43 # for "historical portability":
44 # try to import modules separately (in dict order), and ignore
44 # try to import modules separately (in dict order), and ignore
45 # failure, because these aren't available with early Mercurial
45 # failure, because these aren't available with early Mercurial
46 try:
46 try:
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 except ImportError:
48 except ImportError:
49 pass
49 pass
50 try:
50 try:
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 dir(registrar) # forcibly load it
56 dir(registrar) # forcibly load it
57 except ImportError:
57 except ImportError:
58 registrar = None
58 registrar = None
59 try:
59 try:
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 except ImportError:
61 except ImportError:
62 pass
62 pass
63 try:
63 try:
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import pycompat
68 from mercurial import pycompat
69 getargspec = pycompat.getargspec # added to module after 4.5
69 getargspec = pycompat.getargspec # added to module after 4.5
70 except (ImportError, AttributeError):
70 except (ImportError, AttributeError):
71 import inspect
71 import inspect
72 getargspec = inspect.getargspec
72 getargspec = inspect.getargspec
73
73
74 try:
74 try:
75 # 4.7+
75 # 4.7+
76 queue = pycompat.queue.Queue
76 queue = pycompat.queue.Queue
77 except (AttributeError, ImportError):
77 except (AttributeError, ImportError):
78 # <4.7.
78 # <4.7.
79 try:
79 try:
80 queue = pycompat.queue
80 queue = pycompat.queue
81 except (AttributeError, ImportError):
81 except (AttributeError, ImportError):
82 queue = util.queue
82 queue = util.queue
83
83
84 try:
84 try:
85 from mercurial import logcmdutil
85 from mercurial import logcmdutil
86 makelogtemplater = logcmdutil.maketemplater
86 makelogtemplater = logcmdutil.maketemplater
87 except (AttributeError, ImportError):
87 except (AttributeError, ImportError):
88 try:
88 try:
89 makelogtemplater = cmdutil.makelogtemplater
89 makelogtemplater = cmdutil.makelogtemplater
90 except (AttributeError, ImportError):
90 except (AttributeError, ImportError):
91 makelogtemplater = None
91 makelogtemplater = None
92
92
93 # for "historical portability":
93 # for "historical portability":
94 # define util.safehasattr forcibly, because util.safehasattr has been
94 # define util.safehasattr forcibly, because util.safehasattr has been
95 # available since 1.9.3 (or 94b200a11cf7)
95 # available since 1.9.3 (or 94b200a11cf7)
96 _undefined = object()
96 _undefined = object()
97 def safehasattr(thing, attr):
97 def safehasattr(thing, attr):
98 return getattr(thing, attr, _undefined) is not _undefined
98 return getattr(thing, attr, _undefined) is not _undefined
99 setattr(util, 'safehasattr', safehasattr)
99 setattr(util, 'safehasattr', safehasattr)
100
100
101 # for "historical portability":
101 # for "historical portability":
102 # define util.timer forcibly, because util.timer has been available
102 # define util.timer forcibly, because util.timer has been available
103 # since ae5d60bb70c9
103 # since ae5d60bb70c9
104 if safehasattr(time, 'perf_counter'):
104 if safehasattr(time, 'perf_counter'):
105 util.timer = time.perf_counter
105 util.timer = time.perf_counter
106 elif os.name == 'nt':
106 elif os.name == 'nt':
107 util.timer = time.clock
107 util.timer = time.clock
108 else:
108 else:
109 util.timer = time.time
109 util.timer = time.time
110
110
111 # for "historical portability":
111 # for "historical portability":
112 # use locally defined empty option list, if formatteropts isn't
112 # use locally defined empty option list, if formatteropts isn't
113 # available, because commands.formatteropts has been available since
113 # available, because commands.formatteropts has been available since
114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
115 # available since 2.2 (or ae5f92e154d3)
115 # available since 2.2 (or ae5f92e154d3)
116 formatteropts = getattr(cmdutil, "formatteropts",
116 formatteropts = getattr(cmdutil, "formatteropts",
117 getattr(commands, "formatteropts", []))
117 getattr(commands, "formatteropts", []))
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # use locally defined option list, if debugrevlogopts isn't available,
120 # use locally defined option list, if debugrevlogopts isn't available,
121 # because commands.debugrevlogopts has been available since 3.7 (or
121 # because commands.debugrevlogopts has been available since 3.7 (or
122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
123 # since 1.9 (or a79fea6b3e77).
123 # since 1.9 (or a79fea6b3e77).
124 revlogopts = getattr(cmdutil, "debugrevlogopts",
124 revlogopts = getattr(cmdutil, "debugrevlogopts",
125 getattr(commands, "debugrevlogopts", [
125 getattr(commands, "debugrevlogopts", [
126 ('c', 'changelog', False, ('open changelog')),
126 ('c', 'changelog', False, ('open changelog')),
127 ('m', 'manifest', False, ('open manifest')),
127 ('m', 'manifest', False, ('open manifest')),
128 ('', 'dir', False, ('open directory manifest')),
128 ('', 'dir', False, ('open directory manifest')),
129 ]))
129 ]))
130
130
131 cmdtable = {}
131 cmdtable = {}
132
132
133 # for "historical portability":
133 # for "historical portability":
134 # define parsealiases locally, because cmdutil.parsealiases has been
134 # define parsealiases locally, because cmdutil.parsealiases has been
135 # available since 1.5 (or 6252852b4332)
135 # available since 1.5 (or 6252852b4332)
136 def parsealiases(cmd):
136 def parsealiases(cmd):
137 return cmd.lstrip("^").split("|")
137 return cmd.lstrip("^").split("|")
138
138
139 if safehasattr(registrar, 'command'):
139 if safehasattr(registrar, 'command'):
140 command = registrar.command(cmdtable)
140 command = registrar.command(cmdtable)
141 elif safehasattr(cmdutil, 'command'):
141 elif safehasattr(cmdutil, 'command'):
142 command = cmdutil.command(cmdtable)
142 command = cmdutil.command(cmdtable)
143 if 'norepo' not in getargspec(command).args:
143 if 'norepo' not in getargspec(command).args:
144 # for "historical portability":
144 # for "historical portability":
145 # wrap original cmdutil.command, because "norepo" option has
145 # wrap original cmdutil.command, because "norepo" option has
146 # been available since 3.1 (or 75a96326cecb)
146 # been available since 3.1 (or 75a96326cecb)
147 _command = command
147 _command = command
148 def command(name, options=(), synopsis=None, norepo=False):
148 def command(name, options=(), synopsis=None, norepo=False):
149 if norepo:
149 if norepo:
150 commands.norepo += ' %s' % ' '.join(parsealiases(name))
150 commands.norepo += ' %s' % ' '.join(parsealiases(name))
151 return _command(name, list(options), synopsis)
151 return _command(name, list(options), synopsis)
152 else:
152 else:
153 # for "historical portability":
153 # for "historical portability":
154 # define "@command" annotation locally, because cmdutil.command
154 # define "@command" annotation locally, because cmdutil.command
155 # has been available since 1.9 (or 2daa5179e73f)
155 # has been available since 1.9 (or 2daa5179e73f)
156 def command(name, options=(), synopsis=None, norepo=False):
156 def command(name, options=(), synopsis=None, norepo=False):
157 def decorator(func):
157 def decorator(func):
158 if synopsis:
158 if synopsis:
159 cmdtable[name] = func, list(options), synopsis
159 cmdtable[name] = func, list(options), synopsis
160 else:
160 else:
161 cmdtable[name] = func, list(options)
161 cmdtable[name] = func, list(options)
162 if norepo:
162 if norepo:
163 commands.norepo += ' %s' % ' '.join(parsealiases(name))
163 commands.norepo += ' %s' % ' '.join(parsealiases(name))
164 return func
164 return func
165 return decorator
165 return decorator
166
166
167 try:
167 try:
168 import mercurial.registrar
168 import mercurial.registrar
169 import mercurial.configitems
169 import mercurial.configitems
170 configtable = {}
170 configtable = {}
171 configitem = mercurial.registrar.configitem(configtable)
171 configitem = mercurial.registrar.configitem(configtable)
172 configitem('perf', 'presleep',
172 configitem('perf', 'presleep',
173 default=mercurial.configitems.dynamicdefault,
173 default=mercurial.configitems.dynamicdefault,
174 )
174 )
175 configitem('perf', 'stub',
175 configitem('perf', 'stub',
176 default=mercurial.configitems.dynamicdefault,
176 default=mercurial.configitems.dynamicdefault,
177 )
177 )
178 configitem('perf', 'parentscount',
178 configitem('perf', 'parentscount',
179 default=mercurial.configitems.dynamicdefault,
179 default=mercurial.configitems.dynamicdefault,
180 )
180 )
181 configitem('perf', 'all-timing',
181 configitem('perf', 'all-timing',
182 default=mercurial.configitems.dynamicdefault,
182 default=mercurial.configitems.dynamicdefault,
183 )
183 )
184 except (ImportError, AttributeError):
184 except (ImportError, AttributeError):
185 pass
185 pass
186
186
187 def getlen(ui):
187 def getlen(ui):
188 if ui.configbool("perf", "stub", False):
188 if ui.configbool("perf", "stub", False):
189 return lambda x: 1
189 return lambda x: 1
190 return len
190 return len
191
191
192 def gettimer(ui, opts=None):
192 def gettimer(ui, opts=None):
193 """return a timer function and formatter: (timer, formatter)
193 """return a timer function and formatter: (timer, formatter)
194
194
195 This function exists to gather the creation of formatter in a single
195 This function exists to gather the creation of formatter in a single
196 place instead of duplicating it in all performance commands."""
196 place instead of duplicating it in all performance commands."""
197
197
198 # enforce an idle period before execution to counteract power management
198 # enforce an idle period before execution to counteract power management
199 # experimental config: perf.presleep
199 # experimental config: perf.presleep
200 time.sleep(getint(ui, "perf", "presleep", 1))
200 time.sleep(getint(ui, "perf", "presleep", 1))
201
201
202 if opts is None:
202 if opts is None:
203 opts = {}
203 opts = {}
204 # redirect all to stderr unless buffer api is in use
204 # redirect all to stderr unless buffer api is in use
205 if not ui._buffers:
205 if not ui._buffers:
206 ui = ui.copy()
206 ui = ui.copy()
207 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
207 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
208 if uifout:
208 if uifout:
209 # for "historical portability":
209 # for "historical portability":
210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
211 uifout.set(ui.ferr)
211 uifout.set(ui.ferr)
212
212
213 # get a formatter
213 # get a formatter
214 uiformatter = getattr(ui, 'formatter', None)
214 uiformatter = getattr(ui, 'formatter', None)
215 if uiformatter:
215 if uiformatter:
216 fm = uiformatter('perf', opts)
216 fm = uiformatter('perf', opts)
217 else:
217 else:
218 # for "historical portability":
218 # for "historical portability":
219 # define formatter locally, because ui.formatter has been
219 # define formatter locally, because ui.formatter has been
220 # available since 2.2 (or ae5f92e154d3)
220 # available since 2.2 (or ae5f92e154d3)
221 from mercurial import node
221 from mercurial import node
222 class defaultformatter(object):
222 class defaultformatter(object):
223 """Minimized composition of baseformatter and plainformatter
223 """Minimized composition of baseformatter and plainformatter
224 """
224 """
225 def __init__(self, ui, topic, opts):
225 def __init__(self, ui, topic, opts):
226 self._ui = ui
226 self._ui = ui
227 if ui.debugflag:
227 if ui.debugflag:
228 self.hexfunc = node.hex
228 self.hexfunc = node.hex
229 else:
229 else:
230 self.hexfunc = node.short
230 self.hexfunc = node.short
231 def __nonzero__(self):
231 def __nonzero__(self):
232 return False
232 return False
233 __bool__ = __nonzero__
233 __bool__ = __nonzero__
234 def startitem(self):
234 def startitem(self):
235 pass
235 pass
236 def data(self, **data):
236 def data(self, **data):
237 pass
237 pass
238 def write(self, fields, deftext, *fielddata, **opts):
238 def write(self, fields, deftext, *fielddata, **opts):
239 self._ui.write(deftext % fielddata, **opts)
239 self._ui.write(deftext % fielddata, **opts)
240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
241 if cond:
241 if cond:
242 self._ui.write(deftext % fielddata, **opts)
242 self._ui.write(deftext % fielddata, **opts)
243 def plain(self, text, **opts):
243 def plain(self, text, **opts):
244 self._ui.write(text, **opts)
244 self._ui.write(text, **opts)
245 def end(self):
245 def end(self):
246 pass
246 pass
247 fm = defaultformatter(ui, 'perf', opts)
247 fm = defaultformatter(ui, 'perf', opts)
248
248
249 # stub function, runs code only once instead of in a loop
249 # stub function, runs code only once instead of in a loop
250 # experimental config: perf.stub
250 # experimental config: perf.stub
251 if ui.configbool("perf", "stub", False):
251 if ui.configbool("perf", "stub", False):
252 return functools.partial(stub_timer, fm), fm
252 return functools.partial(stub_timer, fm), fm
253
253
254 # experimental config: perf.all-timing
254 # experimental config: perf.all-timing
255 displayall = ui.configbool("perf", "all-timing", False)
255 displayall = ui.configbool("perf", "all-timing", False)
256 return functools.partial(_timer, fm, displayall=displayall), fm
256 return functools.partial(_timer, fm, displayall=displayall), fm
257
257
258 def stub_timer(fm, func, title=None):
258 def stub_timer(fm, func, title=None):
259 func()
259 func()
260
260
261 def _timer(fm, func, title=None, displayall=False):
261 def _timer(fm, func, title=None, displayall=False):
262 gc.collect()
262 gc.collect()
263 results = []
263 results = []
264 begin = util.timer()
264 begin = util.timer()
265 count = 0
265 count = 0
266 while True:
266 while True:
267 ostart = os.times()
267 ostart = os.times()
268 cstart = util.timer()
268 cstart = util.timer()
269 r = func()
269 r = func()
270 cstop = util.timer()
270 cstop = util.timer()
271 ostop = os.times()
271 ostop = os.times()
272 count += 1
272 count += 1
273 a, b = ostart, ostop
273 a, b = ostart, ostop
274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
275 if cstop - begin > 3 and count >= 100:
275 if cstop - begin > 3 and count >= 100:
276 break
276 break
277 if cstop - begin > 10 and count >= 3:
277 if cstop - begin > 10 and count >= 3:
278 break
278 break
279
279
280 fm.startitem()
280 fm.startitem()
281
281
282 if title:
282 if title:
283 fm.write('title', '! %s\n', title)
283 fm.write('title', '! %s\n', title)
284 if r:
284 if r:
285 fm.write('result', '! result: %s\n', r)
285 fm.write('result', '! result: %s\n', r)
286 def display(role, entry):
286 def display(role, entry):
287 prefix = ''
287 prefix = ''
288 if role != 'best':
288 if role != 'best':
289 prefix = '%s.' % role
289 prefix = '%s.' % role
290 fm.plain('!')
290 fm.plain('!')
291 fm.write(prefix + 'wall', ' wall %f', entry[0])
291 fm.write(prefix + 'wall', ' wall %f', entry[0])
292 fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
292 fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
293 fm.write(prefix + 'user', ' user %f', entry[1])
293 fm.write(prefix + 'user', ' user %f', entry[1])
294 fm.write(prefix + 'sys', ' sys %f', entry[2])
294 fm.write(prefix + 'sys', ' sys %f', entry[2])
295 fm.write(prefix + 'count', ' (%s of %d)', role, count)
295 fm.write(prefix + 'count', ' (%s of %d)', role, count)
296 fm.plain('\n')
296 fm.plain('\n')
297 results.sort()
297 results.sort()
298 min_val = results[0]
298 min_val = results[0]
299 display('best', min_val)
299 display('best', min_val)
300 if displayall:
300 if displayall:
301 max_val = results[-1]
301 max_val = results[-1]
302 display('max', max_val)
302 display('max', max_val)
303 avg = tuple([sum(x) / count for x in zip(*results)])
303 avg = tuple([sum(x) / count for x in zip(*results)])
304 display('avg', avg)
304 display('avg', avg)
305 median = results[len(results) // 2]
305 median = results[len(results) // 2]
306 display('median', median)
306 display('median', median)
307
307
308 # utilities for historical portability
308 # utilities for historical portability
309
309
310 def getint(ui, section, name, default):
310 def getint(ui, section, name, default):
311 # for "historical portability":
311 # for "historical portability":
312 # ui.configint has been available since 1.9 (or fa2b596db182)
312 # ui.configint has been available since 1.9 (or fa2b596db182)
313 v = ui.config(section, name, None)
313 v = ui.config(section, name, None)
314 if v is None:
314 if v is None:
315 return default
315 return default
316 try:
316 try:
317 return int(v)
317 return int(v)
318 except ValueError:
318 except ValueError:
319 raise error.ConfigError(("%s.%s is not an integer ('%s')")
319 raise error.ConfigError(("%s.%s is not an integer ('%s')")
320 % (section, name, v))
320 % (section, name, v))
321
321
322 def safeattrsetter(obj, name, ignoremissing=False):
322 def safeattrsetter(obj, name, ignoremissing=False):
323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
324
324
325 This function is aborted, if 'obj' doesn't have 'name' attribute
325 This function is aborted, if 'obj' doesn't have 'name' attribute
326 at runtime. This avoids overlooking removal of an attribute, which
326 at runtime. This avoids overlooking removal of an attribute, which
327 breaks assumption of performance measurement, in the future.
327 breaks assumption of performance measurement, in the future.
328
328
329 This function returns the object to (1) assign a new value, and
329 This function returns the object to (1) assign a new value, and
330 (2) restore an original value to the attribute.
330 (2) restore an original value to the attribute.
331
331
332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
333 abortion, and this function returns None. This is useful to
333 abortion, and this function returns None. This is useful to
334 examine an attribute, which isn't ensured in all Mercurial
334 examine an attribute, which isn't ensured in all Mercurial
335 versions.
335 versions.
336 """
336 """
337 if not util.safehasattr(obj, name):
337 if not util.safehasattr(obj, name):
338 if ignoremissing:
338 if ignoremissing:
339 return None
339 return None
340 raise error.Abort(("missing attribute %s of %s might break assumption"
340 raise error.Abort(("missing attribute %s of %s might break assumption"
341 " of performance measurement") % (name, obj))
341 " of performance measurement") % (name, obj))
342
342
343 origvalue = getattr(obj, name)
343 origvalue = getattr(obj, name)
344 class attrutil(object):
344 class attrutil(object):
345 def set(self, newvalue):
345 def set(self, newvalue):
346 setattr(obj, name, newvalue)
346 setattr(obj, name, newvalue)
347 def restore(self):
347 def restore(self):
348 setattr(obj, name, origvalue)
348 setattr(obj, name, origvalue)
349
349
350 return attrutil()
350 return attrutil()
351
351
352 # utilities to examine each internal API changes
352 # utilities to examine each internal API changes
353
353
354 def getbranchmapsubsettable():
354 def getbranchmapsubsettable():
355 # for "historical portability":
355 # for "historical portability":
356 # subsettable is defined in:
356 # subsettable is defined in:
357 # - branchmap since 2.9 (or 175c6fd8cacc)
357 # - branchmap since 2.9 (or 175c6fd8cacc)
358 # - repoview since 2.5 (or 59a9f18d4587)
358 # - repoview since 2.5 (or 59a9f18d4587)
359 for mod in (branchmap, repoview):
359 for mod in (branchmap, repoview):
360 subsettable = getattr(mod, 'subsettable', None)
360 subsettable = getattr(mod, 'subsettable', None)
361 if subsettable:
361 if subsettable:
362 return subsettable
362 return subsettable
363
363
364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
365 # branchmap and repoview modules exist, but subsettable attribute
365 # branchmap and repoview modules exist, but subsettable attribute
366 # doesn't)
366 # doesn't)
367 raise error.Abort(("perfbranchmap not available with this Mercurial"),
367 raise error.Abort(("perfbranchmap not available with this Mercurial"),
368 hint="use 2.5 or later")
368 hint="use 2.5 or later")
369
369
370 def getsvfs(repo):
370 def getsvfs(repo):
371 """Return appropriate object to access files under .hg/store
371 """Return appropriate object to access files under .hg/store
372 """
372 """
373 # for "historical portability":
373 # for "historical portability":
374 # repo.svfs has been available since 2.3 (or 7034365089bf)
374 # repo.svfs has been available since 2.3 (or 7034365089bf)
375 svfs = getattr(repo, 'svfs', None)
375 svfs = getattr(repo, 'svfs', None)
376 if svfs:
376 if svfs:
377 return svfs
377 return svfs
378 else:
378 else:
379 return getattr(repo, 'sopener')
379 return getattr(repo, 'sopener')
380
380
381 def getvfs(repo):
381 def getvfs(repo):
382 """Return appropriate object to access files under .hg
382 """Return appropriate object to access files under .hg
383 """
383 """
384 # for "historical portability":
384 # for "historical portability":
385 # repo.vfs has been available since 2.3 (or 7034365089bf)
385 # repo.vfs has been available since 2.3 (or 7034365089bf)
386 vfs = getattr(repo, 'vfs', None)
386 vfs = getattr(repo, 'vfs', None)
387 if vfs:
387 if vfs:
388 return vfs
388 return vfs
389 else:
389 else:
390 return getattr(repo, 'opener')
390 return getattr(repo, 'opener')
391
391
392 def repocleartagscachefunc(repo):
392 def repocleartagscachefunc(repo):
393 """Return the function to clear tags cache according to repo internal API
393 """Return the function to clear tags cache according to repo internal API
394 """
394 """
395 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
395 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
396 # in this case, setattr(repo, '_tagscache', None) or so isn't
396 # in this case, setattr(repo, '_tagscache', None) or so isn't
397 # correct way to clear tags cache, because existing code paths
397 # correct way to clear tags cache, because existing code paths
398 # expect _tagscache to be a structured object.
398 # expect _tagscache to be a structured object.
399 def clearcache():
399 def clearcache():
400 # _tagscache has been filteredpropertycache since 2.5 (or
400 # _tagscache has been filteredpropertycache since 2.5 (or
401 # 98c867ac1330), and delattr() can't work in such case
401 # 98c867ac1330), and delattr() can't work in such case
402 if '_tagscache' in vars(repo):
402 if '_tagscache' in vars(repo):
403 del repo.__dict__['_tagscache']
403 del repo.__dict__['_tagscache']
404 return clearcache
404 return clearcache
405
405
406 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
406 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
407 if repotags: # since 1.4 (or 5614a628d173)
407 if repotags: # since 1.4 (or 5614a628d173)
408 return lambda : repotags.set(None)
408 return lambda : repotags.set(None)
409
409
410 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
410 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
411 if repotagscache: # since 0.6 (or d7df759d0e97)
411 if repotagscache: # since 0.6 (or d7df759d0e97)
412 return lambda : repotagscache.set(None)
412 return lambda : repotagscache.set(None)
413
413
414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
415 # this point, but it isn't so problematic, because:
415 # this point, but it isn't so problematic, because:
416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
417 # in perftags() causes failure soon
417 # in perftags() causes failure soon
418 # - perf.py itself has been available since 1.1 (or eb240755386d)
418 # - perf.py itself has been available since 1.1 (or eb240755386d)
419 raise error.Abort(("tags API of this hg command is unknown"))
419 raise error.Abort(("tags API of this hg command is unknown"))
420
420
421 # utilities to clear cache
421 # utilities to clear cache
422
422
423 def clearfilecache(repo, attrname):
423 def clearfilecache(repo, attrname):
424 unfi = repo.unfiltered()
424 unfi = repo.unfiltered()
425 if attrname in vars(unfi):
425 if attrname in vars(unfi):
426 delattr(unfi, attrname)
426 delattr(unfi, attrname)
427 unfi._filecache.pop(attrname, None)
427 unfi._filecache.pop(attrname, None)
428
428
429 # perf commands
429 # perf commands
430
430
431 @command('perfwalk', formatteropts)
431 @command('perfwalk', formatteropts)
432 def perfwalk(ui, repo, *pats, **opts):
432 def perfwalk(ui, repo, *pats, **opts):
433 timer, fm = gettimer(ui, opts)
433 timer, fm = gettimer(ui, opts)
434 m = scmutil.match(repo[None], pats, {})
434 m = scmutil.match(repo[None], pats, {})
435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
436 ignored=False))))
436 ignored=False))))
437 fm.end()
437 fm.end()
438
438
439 @command('perfannotate', formatteropts)
439 @command('perfannotate', formatteropts)
440 def perfannotate(ui, repo, f, **opts):
440 def perfannotate(ui, repo, f, **opts):
441 timer, fm = gettimer(ui, opts)
441 timer, fm = gettimer(ui, opts)
442 fc = repo['.'][f]
442 fc = repo['.'][f]
443 timer(lambda: len(fc.annotate(True)))
443 timer(lambda: len(fc.annotate(True)))
444 fm.end()
444 fm.end()
445
445
446 @command('perfstatus',
446 @command('perfstatus',
447 [('u', 'unknown', False,
447 [('u', 'unknown', False,
448 'ask status to look for unknown files')] + formatteropts)
448 'ask status to look for unknown files')] + formatteropts)
449 def perfstatus(ui, repo, **opts):
449 def perfstatus(ui, repo, **opts):
450 #m = match.always(repo.root, repo.getcwd())
450 #m = match.always(repo.root, repo.getcwd())
451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
452 # False))))
452 # False))))
453 timer, fm = gettimer(ui, opts)
453 timer, fm = gettimer(ui, opts)
454 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
454 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
455 fm.end()
455 fm.end()
456
456
457 @command('perfaddremove', formatteropts)
457 @command('perfaddremove', formatteropts)
458 def perfaddremove(ui, repo, **opts):
458 def perfaddremove(ui, repo, **opts):
459 timer, fm = gettimer(ui, opts)
459 timer, fm = gettimer(ui, opts)
460 try:
460 try:
461 oldquiet = repo.ui.quiet
461 oldquiet = repo.ui.quiet
462 repo.ui.quiet = True
462 repo.ui.quiet = True
463 matcher = scmutil.match(repo[None])
463 matcher = scmutil.match(repo[None])
464 opts['dry_run'] = True
464 opts['dry_run'] = True
465 timer(lambda: scmutil.addremove(repo, matcher, "", opts))
465 timer(lambda: scmutil.addremove(repo, matcher, "", opts))
466 finally:
466 finally:
467 repo.ui.quiet = oldquiet
467 repo.ui.quiet = oldquiet
468 fm.end()
468 fm.end()
469
469
470 def clearcaches(cl):
470 def clearcaches(cl):
471 # behave somewhat consistently across internal API changes
471 # behave somewhat consistently across internal API changes
472 if util.safehasattr(cl, 'clearcaches'):
472 if util.safehasattr(cl, 'clearcaches'):
473 cl.clearcaches()
473 cl.clearcaches()
474 elif util.safehasattr(cl, '_nodecache'):
474 elif util.safehasattr(cl, '_nodecache'):
475 from mercurial.node import nullid, nullrev
475 from mercurial.node import nullid, nullrev
476 cl._nodecache = {nullid: nullrev}
476 cl._nodecache = {nullid: nullrev}
477 cl._nodepos = None
477 cl._nodepos = None
478
478
479 @command('perfheads', formatteropts)
479 @command('perfheads', formatteropts)
480 def perfheads(ui, repo, **opts):
480 def perfheads(ui, repo, **opts):
481 timer, fm = gettimer(ui, opts)
481 timer, fm = gettimer(ui, opts)
482 cl = repo.changelog
482 cl = repo.changelog
483 def d():
483 def d():
484 len(cl.headrevs())
484 len(cl.headrevs())
485 clearcaches(cl)
485 clearcaches(cl)
486 timer(d)
486 timer(d)
487 fm.end()
487 fm.end()
488
488
489 @command('perftags', formatteropts)
489 @command('perftags', formatteropts)
490 def perftags(ui, repo, **opts):
490 def perftags(ui, repo, **opts):
491 import mercurial.changelog
491 import mercurial.changelog
492 import mercurial.manifest
492 import mercurial.manifest
493 timer, fm = gettimer(ui, opts)
493 timer, fm = gettimer(ui, opts)
494 svfs = getsvfs(repo)
494 svfs = getsvfs(repo)
495 repocleartagscache = repocleartagscachefunc(repo)
495 repocleartagscache = repocleartagscachefunc(repo)
496 def t():
496 def t():
497 repo.changelog = mercurial.changelog.changelog(svfs)
497 repo.changelog = mercurial.changelog.changelog(svfs)
498 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
498 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
499 repocleartagscache()
499 repocleartagscache()
500 return len(repo.tags())
500 return len(repo.tags())
501 timer(t)
501 timer(t)
502 fm.end()
502 fm.end()
503
503
504 @command('perfancestors', formatteropts)
504 @command('perfancestors', formatteropts)
505 def perfancestors(ui, repo, **opts):
505 def perfancestors(ui, repo, **opts):
506 timer, fm = gettimer(ui, opts)
506 timer, fm = gettimer(ui, opts)
507 heads = repo.changelog.headrevs()
507 heads = repo.changelog.headrevs()
508 def d():
508 def d():
509 for a in repo.changelog.ancestors(heads):
509 for a in repo.changelog.ancestors(heads):
510 pass
510 pass
511 timer(d)
511 timer(d)
512 fm.end()
512 fm.end()
513
513
514 @command('perfancestorset', formatteropts)
514 @command('perfancestorset', formatteropts)
515 def perfancestorset(ui, repo, revset, **opts):
515 def perfancestorset(ui, repo, revset, **opts):
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 revs = repo.revs(revset)
517 revs = repo.revs(revset)
518 heads = repo.changelog.headrevs()
518 heads = repo.changelog.headrevs()
519 def d():
519 def d():
520 s = repo.changelog.ancestors(heads)
520 s = repo.changelog.ancestors(heads)
521 for rev in revs:
521 for rev in revs:
522 rev in s
522 rev in s
523 timer(d)
523 timer(d)
524 fm.end()
524 fm.end()
525
525
526 @command('perfbookmarks', formatteropts)
526 @command('perfbookmarks', formatteropts)
527 def perfbookmarks(ui, repo, **opts):
527 def perfbookmarks(ui, repo, **opts):
528 """benchmark parsing bookmarks from disk to memory"""
528 """benchmark parsing bookmarks from disk to memory"""
529 timer, fm = gettimer(ui, opts)
529 timer, fm = gettimer(ui, opts)
530 def d():
530 def d():
531 clearfilecache(repo, '_bookmarks')
531 clearfilecache(repo, '_bookmarks')
532 repo._bookmarks
532 repo._bookmarks
533 timer(d)
533 timer(d)
534 fm.end()
534 fm.end()
535
535
536 @command('perfbundleread', formatteropts, 'BUNDLE')
536 @command('perfbundleread', formatteropts, 'BUNDLE')
537 def perfbundleread(ui, repo, bundlepath, **opts):
537 def perfbundleread(ui, repo, bundlepath, **opts):
538 """Benchmark reading of bundle files.
538 """Benchmark reading of bundle files.
539
539
540 This command is meant to isolate the I/O part of bundle reading as
540 This command is meant to isolate the I/O part of bundle reading as
541 much as possible.
541 much as possible.
542 """
542 """
543 from mercurial import (
543 from mercurial import (
544 bundle2,
544 bundle2,
545 exchange,
545 exchange,
546 streamclone,
546 streamclone,
547 )
547 )
548
548
549 def makebench(fn):
549 def makebench(fn):
550 def run():
550 def run():
551 with open(bundlepath, 'rb') as fh:
551 with open(bundlepath, 'rb') as fh:
552 bundle = exchange.readbundle(ui, fh, bundlepath)
552 bundle = exchange.readbundle(ui, fh, bundlepath)
553 fn(bundle)
553 fn(bundle)
554
554
555 return run
555 return run
556
556
557 def makereadnbytes(size):
557 def makereadnbytes(size):
558 def run():
558 def run():
559 with open(bundlepath, 'rb') as fh:
559 with open(bundlepath, 'rb') as fh:
560 bundle = exchange.readbundle(ui, fh, bundlepath)
560 bundle = exchange.readbundle(ui, fh, bundlepath)
561 while bundle.read(size):
561 while bundle.read(size):
562 pass
562 pass
563
563
564 return run
564 return run
565
565
566 def makestdioread(size):
566 def makestdioread(size):
567 def run():
567 def run():
568 with open(bundlepath, 'rb') as fh:
568 with open(bundlepath, 'rb') as fh:
569 while fh.read(size):
569 while fh.read(size):
570 pass
570 pass
571
571
572 return run
572 return run
573
573
574 # bundle1
574 # bundle1
575
575
576 def deltaiter(bundle):
576 def deltaiter(bundle):
577 for delta in bundle.deltaiter():
577 for delta in bundle.deltaiter():
578 pass
578 pass
579
579
580 def iterchunks(bundle):
580 def iterchunks(bundle):
581 for chunk in bundle.getchunks():
581 for chunk in bundle.getchunks():
582 pass
582 pass
583
583
584 # bundle2
584 # bundle2
585
585
586 def forwardchunks(bundle):
586 def forwardchunks(bundle):
587 for chunk in bundle._forwardchunks():
587 for chunk in bundle._forwardchunks():
588 pass
588 pass
589
589
590 def iterparts(bundle):
590 def iterparts(bundle):
591 for part in bundle.iterparts():
591 for part in bundle.iterparts():
592 pass
592 pass
593
593
594 def iterpartsseekable(bundle):
594 def iterpartsseekable(bundle):
595 for part in bundle.iterparts(seekable=True):
595 for part in bundle.iterparts(seekable=True):
596 pass
596 pass
597
597
598 def seek(bundle):
598 def seek(bundle):
599 for part in bundle.iterparts(seekable=True):
599 for part in bundle.iterparts(seekable=True):
600 part.seek(0, os.SEEK_END)
600 part.seek(0, os.SEEK_END)
601
601
602 def makepartreadnbytes(size):
602 def makepartreadnbytes(size):
603 def run():
603 def run():
604 with open(bundlepath, 'rb') as fh:
604 with open(bundlepath, 'rb') as fh:
605 bundle = exchange.readbundle(ui, fh, bundlepath)
605 bundle = exchange.readbundle(ui, fh, bundlepath)
606 for part in bundle.iterparts():
606 for part in bundle.iterparts():
607 while part.read(size):
607 while part.read(size):
608 pass
608 pass
609
609
610 return run
610 return run
611
611
612 benches = [
612 benches = [
613 (makestdioread(8192), 'read(8k)'),
613 (makestdioread(8192), 'read(8k)'),
614 (makestdioread(16384), 'read(16k)'),
614 (makestdioread(16384), 'read(16k)'),
615 (makestdioread(32768), 'read(32k)'),
615 (makestdioread(32768), 'read(32k)'),
616 (makestdioread(131072), 'read(128k)'),
616 (makestdioread(131072), 'read(128k)'),
617 ]
617 ]
618
618
619 with open(bundlepath, 'rb') as fh:
619 with open(bundlepath, 'rb') as fh:
620 bundle = exchange.readbundle(ui, fh, bundlepath)
620 bundle = exchange.readbundle(ui, fh, bundlepath)
621
621
622 if isinstance(bundle, changegroup.cg1unpacker):
622 if isinstance(bundle, changegroup.cg1unpacker):
623 benches.extend([
623 benches.extend([
624 (makebench(deltaiter), 'cg1 deltaiter()'),
624 (makebench(deltaiter), 'cg1 deltaiter()'),
625 (makebench(iterchunks), 'cg1 getchunks()'),
625 (makebench(iterchunks), 'cg1 getchunks()'),
626 (makereadnbytes(8192), 'cg1 read(8k)'),
626 (makereadnbytes(8192), 'cg1 read(8k)'),
627 (makereadnbytes(16384), 'cg1 read(16k)'),
627 (makereadnbytes(16384), 'cg1 read(16k)'),
628 (makereadnbytes(32768), 'cg1 read(32k)'),
628 (makereadnbytes(32768), 'cg1 read(32k)'),
629 (makereadnbytes(131072), 'cg1 read(128k)'),
629 (makereadnbytes(131072), 'cg1 read(128k)'),
630 ])
630 ])
631 elif isinstance(bundle, bundle2.unbundle20):
631 elif isinstance(bundle, bundle2.unbundle20):
632 benches.extend([
632 benches.extend([
633 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
633 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
634 (makebench(iterparts), 'bundle2 iterparts()'),
634 (makebench(iterparts), 'bundle2 iterparts()'),
635 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
635 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
636 (makebench(seek), 'bundle2 part seek()'),
636 (makebench(seek), 'bundle2 part seek()'),
637 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
637 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
638 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
638 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
639 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
639 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
640 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
640 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
641 ])
641 ])
642 elif isinstance(bundle, streamclone.streamcloneapplier):
642 elif isinstance(bundle, streamclone.streamcloneapplier):
643 raise error.Abort('stream clone bundles not supported')
643 raise error.Abort('stream clone bundles not supported')
644 else:
644 else:
645 raise error.Abort('unhandled bundle type: %s' % type(bundle))
645 raise error.Abort('unhandled bundle type: %s' % type(bundle))
646
646
647 for fn, title in benches:
647 for fn, title in benches:
648 timer, fm = gettimer(ui, opts)
648 timer, fm = gettimer(ui, opts)
649 timer(fn, title=title)
649 timer(fn, title=title)
650 fm.end()
650 fm.end()
651
651
652 @command('perfchangegroupchangelog', formatteropts +
652 @command('perfchangegroupchangelog', formatteropts +
653 [('', 'version', '02', 'changegroup version'),
653 [('', 'version', '02', 'changegroup version'),
654 ('r', 'rev', '', 'revisions to add to changegroup')])
654 ('r', 'rev', '', 'revisions to add to changegroup')])
655 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
655 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
656 """Benchmark producing a changelog group for a changegroup.
656 """Benchmark producing a changelog group for a changegroup.
657
657
658 This measures the time spent processing the changelog during a
658 This measures the time spent processing the changelog during a
659 bundle operation. This occurs during `hg bundle` and on a server
659 bundle operation. This occurs during `hg bundle` and on a server
660 processing a `getbundle` wire protocol request (handles clones
660 processing a `getbundle` wire protocol request (handles clones
661 and pull requests).
661 and pull requests).
662
662
663 By default, all revisions are added to the changegroup.
663 By default, all revisions are added to the changegroup.
664 """
664 """
665 cl = repo.changelog
665 cl = repo.changelog
666 nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
666 nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
667 bundler = changegroup.getbundler(version, repo)
667 bundler = changegroup.getbundler(version, repo)
668
668
669 def d():
669 def d():
670 state, chunks = bundler._generatechangelog(cl, nodes)
670 state, chunks = bundler._generatechangelog(cl, nodes)
671 for chunk in chunks:
671 for chunk in chunks:
672 pass
672 pass
673
673
674 timer, fm = gettimer(ui, opts)
674 timer, fm = gettimer(ui, opts)
675
675
676 # Terminal printing can interfere with timing. So disable it.
676 # Terminal printing can interfere with timing. So disable it.
677 with ui.configoverride({('progress', 'disable'): True}):
677 with ui.configoverride({('progress', 'disable'): True}):
678 timer(d)
678 timer(d)
679
679
680 fm.end()
680 fm.end()
681
681
682 @command('perfdirs', formatteropts)
682 @command('perfdirs', formatteropts)
683 def perfdirs(ui, repo, **opts):
683 def perfdirs(ui, repo, **opts):
684 timer, fm = gettimer(ui, opts)
684 timer, fm = gettimer(ui, opts)
685 dirstate = repo.dirstate
685 dirstate = repo.dirstate
686 'a' in dirstate
686 'a' in dirstate
687 def d():
687 def d():
688 dirstate.hasdir('a')
688 dirstate.hasdir('a')
689 del dirstate._map._dirs
689 del dirstate._map._dirs
690 timer(d)
690 timer(d)
691 fm.end()
691 fm.end()
692
692
693 @command('perfdirstate', formatteropts)
693 @command('perfdirstate', formatteropts)
694 def perfdirstate(ui, repo, **opts):
694 def perfdirstate(ui, repo, **opts):
695 timer, fm = gettimer(ui, opts)
695 timer, fm = gettimer(ui, opts)
696 "a" in repo.dirstate
696 "a" in repo.dirstate
697 def d():
697 def d():
698 repo.dirstate.invalidate()
698 repo.dirstate.invalidate()
699 "a" in repo.dirstate
699 "a" in repo.dirstate
700 timer(d)
700 timer(d)
701 fm.end()
701 fm.end()
702
702
703 @command('perfdirstatedirs', formatteropts)
703 @command('perfdirstatedirs', formatteropts)
704 def perfdirstatedirs(ui, repo, **opts):
704 def perfdirstatedirs(ui, repo, **opts):
705 timer, fm = gettimer(ui, opts)
705 timer, fm = gettimer(ui, opts)
706 "a" in repo.dirstate
706 "a" in repo.dirstate
707 def d():
707 def d():
708 repo.dirstate.hasdir("a")
708 repo.dirstate.hasdir("a")
709 del repo.dirstate._map._dirs
709 del repo.dirstate._map._dirs
710 timer(d)
710 timer(d)
711 fm.end()
711 fm.end()
712
712
713 @command('perfdirstatefoldmap', formatteropts)
713 @command('perfdirstatefoldmap', formatteropts)
714 def perfdirstatefoldmap(ui, repo, **opts):
714 def perfdirstatefoldmap(ui, repo, **opts):
715 timer, fm = gettimer(ui, opts)
715 timer, fm = gettimer(ui, opts)
716 dirstate = repo.dirstate
716 dirstate = repo.dirstate
717 'a' in dirstate
717 'a' in dirstate
718 def d():
718 def d():
719 dirstate._map.filefoldmap.get('a')
719 dirstate._map.filefoldmap.get('a')
720 del dirstate._map.filefoldmap
720 del dirstate._map.filefoldmap
721 timer(d)
721 timer(d)
722 fm.end()
722 fm.end()
723
723
724 @command('perfdirfoldmap', formatteropts)
724 @command('perfdirfoldmap', formatteropts)
725 def perfdirfoldmap(ui, repo, **opts):
725 def perfdirfoldmap(ui, repo, **opts):
726 timer, fm = gettimer(ui, opts)
726 timer, fm = gettimer(ui, opts)
727 dirstate = repo.dirstate
727 dirstate = repo.dirstate
728 'a' in dirstate
728 'a' in dirstate
729 def d():
729 def d():
730 dirstate._map.dirfoldmap.get('a')
730 dirstate._map.dirfoldmap.get('a')
731 del dirstate._map.dirfoldmap
731 del dirstate._map.dirfoldmap
732 del dirstate._map._dirs
732 del dirstate._map._dirs
733 timer(d)
733 timer(d)
734 fm.end()
734 fm.end()
735
735
736 @command('perfdirstatewrite', formatteropts)
736 @command('perfdirstatewrite', formatteropts)
737 def perfdirstatewrite(ui, repo, **opts):
737 def perfdirstatewrite(ui, repo, **opts):
738 timer, fm = gettimer(ui, opts)
738 timer, fm = gettimer(ui, opts)
739 ds = repo.dirstate
739 ds = repo.dirstate
740 "a" in ds
740 "a" in ds
741 def d():
741 def d():
742 ds._dirty = True
742 ds._dirty = True
743 ds.write(repo.currenttransaction())
743 ds.write(repo.currenttransaction())
744 timer(d)
744 timer(d)
745 fm.end()
745 fm.end()
746
746
747 @command('perfmergecalculate',
747 @command('perfmergecalculate',
748 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
748 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
749 def perfmergecalculate(ui, repo, rev, **opts):
749 def perfmergecalculate(ui, repo, rev, **opts):
750 timer, fm = gettimer(ui, opts)
750 timer, fm = gettimer(ui, opts)
751 wctx = repo[None]
751 wctx = repo[None]
752 rctx = scmutil.revsingle(repo, rev, rev)
752 rctx = scmutil.revsingle(repo, rev, rev)
753 ancestor = wctx.ancestor(rctx)
753 ancestor = wctx.ancestor(rctx)
754 # we don't want working dir files to be stat'd in the benchmark, so prime
754 # we don't want working dir files to be stat'd in the benchmark, so prime
755 # that cache
755 # that cache
756 wctx.dirty()
756 wctx.dirty()
757 def d():
757 def d():
758 # acceptremote is True because we don't want prompts in the middle of
758 # acceptremote is True because we don't want prompts in the middle of
759 # our benchmark
759 # our benchmark
760 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
760 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
761 acceptremote=True, followcopies=True)
761 acceptremote=True, followcopies=True)
762 timer(d)
762 timer(d)
763 fm.end()
763 fm.end()
764
764
765 @command('perfpathcopies', [], "REV REV")
765 @command('perfpathcopies', [], "REV REV")
766 def perfpathcopies(ui, repo, rev1, rev2, **opts):
766 def perfpathcopies(ui, repo, rev1, rev2, **opts):
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768 ctx1 = scmutil.revsingle(repo, rev1, rev1)
768 ctx1 = scmutil.revsingle(repo, rev1, rev1)
769 ctx2 = scmutil.revsingle(repo, rev2, rev2)
769 ctx2 = scmutil.revsingle(repo, rev2, rev2)
770 def d():
770 def d():
771 copies.pathcopies(ctx1, ctx2)
771 copies.pathcopies(ctx1, ctx2)
772 timer(d)
772 timer(d)
773 fm.end()
773 fm.end()
774
774
775 @command('perfphases',
775 @command('perfphases',
776 [('', 'full', False, 'include file reading time too'),
776 [('', 'full', False, 'include file reading time too'),
777 ], "")
777 ], "")
778 def perfphases(ui, repo, **opts):
778 def perfphases(ui, repo, **opts):
779 """benchmark phasesets computation"""
779 """benchmark phasesets computation"""
780 timer, fm = gettimer(ui, opts)
780 timer, fm = gettimer(ui, opts)
781 _phases = repo._phasecache
781 _phases = repo._phasecache
782 full = opts.get('full')
782 full = opts.get('full')
783 def d():
783 def d():
784 phases = _phases
784 phases = _phases
785 if full:
785 if full:
786 clearfilecache(repo, '_phasecache')
786 clearfilecache(repo, '_phasecache')
787 phases = repo._phasecache
787 phases = repo._phasecache
788 phases.invalidate()
788 phases.invalidate()
789 phases.loadphaserevs(repo)
789 phases.loadphaserevs(repo)
790 timer(d)
790 timer(d)
791 fm.end()
791 fm.end()
792
792
793 @command('perfphasesremote',
794 [], "[DEST]")
795 def perfphasesremote(ui, repo, dest=None, **opts):
796 """benchmark time needed to analyse phases of the remote server"""
797 from mercurial.node import (
798 bin,
799 )
800 from mercurial import (
801 exchange,
802 hg,
803 phases,
804 )
805 timer, fm = gettimer(ui, opts)
806
807 path = ui.paths.getpath(dest, default=('default-push', 'default'))
808 if not path:
809 raise error.abort(('default repository not configured!'),
810 hint=("see 'hg help config.paths'"))
811 dest = path.pushloc or path.loc
812 branches = (path.branch, opts.get('branch') or [])
813 ui.status(('analysing phase of %s\n') % util.hidepassword(dest))
814 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
815 other = hg.peer(repo, opts, dest)
816
817 # easier to perform discovery through the operation
818 op = exchange.pushoperation(repo, other)
819 exchange._pushdiscoverychangeset(op)
820
821 remotesubset = op.fallbackheads
822
823 with other.commandexecutor() as e:
824 remotephases = e.callcommand('listkeys',
825 {'namespace': 'phases'}).result()
826 del other
827 publishing = remotephases.get('publishing', False)
828 if publishing:
829 ui.status(('publishing: yes\n'))
830 else:
831 ui.status(('publishing: no\n'))
832
833 nodemap = repo.changelog.nodemap
834 nonpublishroots = 0
835 for nhex, phase in remotephases.iteritems():
836 if nhex == 'publishing': # ignore data related to publish option
837 continue
838 node = bin(nhex)
839 if node in nodemap and int(phase):
840 nonpublishroots += 1
841 ui.status(('number of roots: %d\n') % len(remotephases))
842 ui.status(('number of known non public roots: %d\n') % nonpublishroots)
843 def d():
844 phases.remotephasessummary(repo,
845 remotesubset,
846 remotephases)
847 timer(d)
848 fm.end()
849
793 @command('perfmanifest',[
850 @command('perfmanifest',[
794 ('m', 'manifest-rev', False, 'Look up a manifest node revision'),
851 ('m', 'manifest-rev', False, 'Look up a manifest node revision'),
795 ('', 'clear-disk', False, 'clear on-disk caches too'),
852 ('', 'clear-disk', False, 'clear on-disk caches too'),
796 ], 'REV|NODE')
853 ], 'REV|NODE')
797 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
854 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
798 """benchmark the time to read a manifest from disk and return a usable
855 """benchmark the time to read a manifest from disk and return a usable
799 dict-like object
856 dict-like object
800
857
801 Manifest caches are cleared before retrieval."""
858 Manifest caches are cleared before retrieval."""
802 timer, fm = gettimer(ui, opts)
859 timer, fm = gettimer(ui, opts)
803 if not manifest_rev:
860 if not manifest_rev:
804 ctx = scmutil.revsingle(repo, rev, rev)
861 ctx = scmutil.revsingle(repo, rev, rev)
805 t = ctx.manifestnode()
862 t = ctx.manifestnode()
806 else:
863 else:
807 t = repo.manifestlog._revlog.lookup(rev)
864 t = repo.manifestlog._revlog.lookup(rev)
808 def d():
865 def d():
809 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
866 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
810 repo.manifestlog[t].read()
867 repo.manifestlog[t].read()
811 timer(d)
868 timer(d)
812 fm.end()
869 fm.end()
813
870
814 @command('perfchangeset', formatteropts)
871 @command('perfchangeset', formatteropts)
815 def perfchangeset(ui, repo, rev, **opts):
872 def perfchangeset(ui, repo, rev, **opts):
816 timer, fm = gettimer(ui, opts)
873 timer, fm = gettimer(ui, opts)
817 n = scmutil.revsingle(repo, rev).node()
874 n = scmutil.revsingle(repo, rev).node()
818 def d():
875 def d():
819 repo.changelog.read(n)
876 repo.changelog.read(n)
820 #repo.changelog._cache = None
877 #repo.changelog._cache = None
821 timer(d)
878 timer(d)
822 fm.end()
879 fm.end()
823
880
824 @command('perfindex', formatteropts)
881 @command('perfindex', formatteropts)
825 def perfindex(ui, repo, **opts):
882 def perfindex(ui, repo, **opts):
826 import mercurial.revlog
883 import mercurial.revlog
827 timer, fm = gettimer(ui, opts)
884 timer, fm = gettimer(ui, opts)
828 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
885 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
829 n = repo["tip"].node()
886 n = repo["tip"].node()
830 svfs = getsvfs(repo)
887 svfs = getsvfs(repo)
831 def d():
888 def d():
832 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
889 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
833 cl.rev(n)
890 cl.rev(n)
834 timer(d)
891 timer(d)
835 fm.end()
892 fm.end()
836
893
837 @command('perfstartup', formatteropts)
894 @command('perfstartup', formatteropts)
838 def perfstartup(ui, repo, **opts):
895 def perfstartup(ui, repo, **opts):
839 timer, fm = gettimer(ui, opts)
896 timer, fm = gettimer(ui, opts)
840 cmd = sys.argv[0]
897 cmd = sys.argv[0]
841 def d():
898 def d():
842 if os.name != 'nt':
899 if os.name != 'nt':
843 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
900 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
844 else:
901 else:
845 os.environ['HGRCPATH'] = ' '
902 os.environ['HGRCPATH'] = ' '
846 os.system("%s version -q > NUL" % cmd)
903 os.system("%s version -q > NUL" % cmd)
847 timer(d)
904 timer(d)
848 fm.end()
905 fm.end()
849
906
850 @command('perfparents', formatteropts)
907 @command('perfparents', formatteropts)
851 def perfparents(ui, repo, **opts):
908 def perfparents(ui, repo, **opts):
852 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
853 # control the number of commits perfparents iterates over
910 # control the number of commits perfparents iterates over
854 # experimental config: perf.parentscount
911 # experimental config: perf.parentscount
855 count = getint(ui, "perf", "parentscount", 1000)
912 count = getint(ui, "perf", "parentscount", 1000)
856 if len(repo.changelog) < count:
913 if len(repo.changelog) < count:
857 raise error.Abort("repo needs %d commits for this test" % count)
914 raise error.Abort("repo needs %d commits for this test" % count)
858 repo = repo.unfiltered()
915 repo = repo.unfiltered()
859 nl = [repo.changelog.node(i) for i in xrange(count)]
916 nl = [repo.changelog.node(i) for i in xrange(count)]
860 def d():
917 def d():
861 for n in nl:
918 for n in nl:
862 repo.changelog.parents(n)
919 repo.changelog.parents(n)
863 timer(d)
920 timer(d)
864 fm.end()
921 fm.end()
865
922
866 @command('perfctxfiles', formatteropts)
923 @command('perfctxfiles', formatteropts)
867 def perfctxfiles(ui, repo, x, **opts):
924 def perfctxfiles(ui, repo, x, **opts):
868 x = int(x)
925 x = int(x)
869 timer, fm = gettimer(ui, opts)
926 timer, fm = gettimer(ui, opts)
870 def d():
927 def d():
871 len(repo[x].files())
928 len(repo[x].files())
872 timer(d)
929 timer(d)
873 fm.end()
930 fm.end()
874
931
875 @command('perfrawfiles', formatteropts)
932 @command('perfrawfiles', formatteropts)
876 def perfrawfiles(ui, repo, x, **opts):
933 def perfrawfiles(ui, repo, x, **opts):
877 x = int(x)
934 x = int(x)
878 timer, fm = gettimer(ui, opts)
935 timer, fm = gettimer(ui, opts)
879 cl = repo.changelog
936 cl = repo.changelog
880 def d():
937 def d():
881 len(cl.read(x)[3])
938 len(cl.read(x)[3])
882 timer(d)
939 timer(d)
883 fm.end()
940 fm.end()
884
941
885 @command('perflookup', formatteropts)
942 @command('perflookup', formatteropts)
886 def perflookup(ui, repo, rev, **opts):
943 def perflookup(ui, repo, rev, **opts):
887 timer, fm = gettimer(ui, opts)
944 timer, fm = gettimer(ui, opts)
888 timer(lambda: len(repo.lookup(rev)))
945 timer(lambda: len(repo.lookup(rev)))
889 fm.end()
946 fm.end()
890
947
891 @command('perflinelogedits',
948 @command('perflinelogedits',
892 [('n', 'edits', 10000, 'number of edits'),
949 [('n', 'edits', 10000, 'number of edits'),
893 ('', 'max-hunk-lines', 10, 'max lines in a hunk'),
950 ('', 'max-hunk-lines', 10, 'max lines in a hunk'),
894 ], norepo=True)
951 ], norepo=True)
895 def perflinelogedits(ui, **opts):
952 def perflinelogedits(ui, **opts):
896 from mercurial import linelog
953 from mercurial import linelog
897
954
898 edits = opts['edits']
955 edits = opts['edits']
899 maxhunklines = opts['max_hunk_lines']
956 maxhunklines = opts['max_hunk_lines']
900
957
901 maxb1 = 100000
958 maxb1 = 100000
902 random.seed(0)
959 random.seed(0)
903 randint = random.randint
960 randint = random.randint
904 currentlines = 0
961 currentlines = 0
905 arglist = []
962 arglist = []
906 for rev in xrange(edits):
963 for rev in xrange(edits):
907 a1 = randint(0, currentlines)
964 a1 = randint(0, currentlines)
908 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
965 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
909 b1 = randint(0, maxb1)
966 b1 = randint(0, maxb1)
910 b2 = randint(b1, b1 + maxhunklines)
967 b2 = randint(b1, b1 + maxhunklines)
911 currentlines += (b2 - b1) - (a2 - a1)
968 currentlines += (b2 - b1) - (a2 - a1)
912 arglist.append((rev, a1, a2, b1, b2))
969 arglist.append((rev, a1, a2, b1, b2))
913
970
914 def d():
971 def d():
915 ll = linelog.linelog()
972 ll = linelog.linelog()
916 for args in arglist:
973 for args in arglist:
917 ll.replacelines(*args)
974 ll.replacelines(*args)
918
975
919 timer, fm = gettimer(ui, opts)
976 timer, fm = gettimer(ui, opts)
920 timer(d)
977 timer(d)
921 fm.end()
978 fm.end()
922
979
923 @command('perfrevrange', formatteropts)
980 @command('perfrevrange', formatteropts)
924 def perfrevrange(ui, repo, *specs, **opts):
981 def perfrevrange(ui, repo, *specs, **opts):
925 timer, fm = gettimer(ui, opts)
982 timer, fm = gettimer(ui, opts)
926 revrange = scmutil.revrange
983 revrange = scmutil.revrange
927 timer(lambda: len(revrange(repo, specs)))
984 timer(lambda: len(revrange(repo, specs)))
928 fm.end()
985 fm.end()
929
986
930 @command('perfnodelookup', formatteropts)
987 @command('perfnodelookup', formatteropts)
931 def perfnodelookup(ui, repo, rev, **opts):
988 def perfnodelookup(ui, repo, rev, **opts):
932 timer, fm = gettimer(ui, opts)
989 timer, fm = gettimer(ui, opts)
933 import mercurial.revlog
990 import mercurial.revlog
934 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
991 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
935 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
936 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
993 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
937 def d():
994 def d():
938 cl.rev(n)
995 cl.rev(n)
939 clearcaches(cl)
996 clearcaches(cl)
940 timer(d)
997 timer(d)
941 fm.end()
998 fm.end()
942
999
943 @command('perflog',
1000 @command('perflog',
944 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
1001 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
945 def perflog(ui, repo, rev=None, **opts):
1002 def perflog(ui, repo, rev=None, **opts):
946 if rev is None:
1003 if rev is None:
947 rev=[]
1004 rev=[]
948 timer, fm = gettimer(ui, opts)
1005 timer, fm = gettimer(ui, opts)
949 ui.pushbuffer()
1006 ui.pushbuffer()
950 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
1007 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
951 copies=opts.get('rename')))
1008 copies=opts.get('rename')))
952 ui.popbuffer()
1009 ui.popbuffer()
953 fm.end()
1010 fm.end()
954
1011
955 @command('perfmoonwalk', formatteropts)
1012 @command('perfmoonwalk', formatteropts)
956 def perfmoonwalk(ui, repo, **opts):
1013 def perfmoonwalk(ui, repo, **opts):
957 """benchmark walking the changelog backwards
1014 """benchmark walking the changelog backwards
958
1015
959 This also loads the changelog data for each revision in the changelog.
1016 This also loads the changelog data for each revision in the changelog.
960 """
1017 """
961 timer, fm = gettimer(ui, opts)
1018 timer, fm = gettimer(ui, opts)
962 def moonwalk():
1019 def moonwalk():
963 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1020 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
964 ctx = repo[i]
1021 ctx = repo[i]
965 ctx.branch() # read changelog data (in addition to the index)
1022 ctx.branch() # read changelog data (in addition to the index)
966 timer(moonwalk)
1023 timer(moonwalk)
967 fm.end()
1024 fm.end()
968
1025
969 @command('perftemplating',
1026 @command('perftemplating',
970 [('r', 'rev', [], 'revisions to run the template on'),
1027 [('r', 'rev', [], 'revisions to run the template on'),
971 ] + formatteropts)
1028 ] + formatteropts)
972 def perftemplating(ui, repo, testedtemplate=None, **opts):
1029 def perftemplating(ui, repo, testedtemplate=None, **opts):
973 """test the rendering time of a given template"""
1030 """test the rendering time of a given template"""
974 if makelogtemplater is None:
1031 if makelogtemplater is None:
975 raise error.Abort(("perftemplating not available with this Mercurial"),
1032 raise error.Abort(("perftemplating not available with this Mercurial"),
976 hint="use 4.3 or later")
1033 hint="use 4.3 or later")
977
1034
978 nullui = ui.copy()
1035 nullui = ui.copy()
979 nullui.fout = open(os.devnull, 'wb')
1036 nullui.fout = open(os.devnull, 'wb')
980 nullui.disablepager()
1037 nullui.disablepager()
981 revs = opts.get('rev')
1038 revs = opts.get('rev')
982 if not revs:
1039 if not revs:
983 revs = ['all()']
1040 revs = ['all()']
984 revs = list(scmutil.revrange(repo, revs))
1041 revs = list(scmutil.revrange(repo, revs))
985
1042
986 defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
1043 defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
987 ' {author|person}: {desc|firstline}\n')
1044 ' {author|person}: {desc|firstline}\n')
988 if testedtemplate is None:
1045 if testedtemplate is None:
989 testedtemplate = defaulttemplate
1046 testedtemplate = defaulttemplate
990 displayer = makelogtemplater(nullui, repo, testedtemplate)
1047 displayer = makelogtemplater(nullui, repo, testedtemplate)
991 def format():
1048 def format():
992 for r in revs:
1049 for r in revs:
993 ctx = repo[r]
1050 ctx = repo[r]
994 displayer.show(ctx)
1051 displayer.show(ctx)
995 displayer.flush(ctx)
1052 displayer.flush(ctx)
996
1053
997 timer, fm = gettimer(ui, opts)
1054 timer, fm = gettimer(ui, opts)
998 timer(format)
1055 timer(format)
999 fm.end()
1056 fm.end()
1000
1057
1001 @command('perfcca', formatteropts)
1058 @command('perfcca', formatteropts)
1002 def perfcca(ui, repo, **opts):
1059 def perfcca(ui, repo, **opts):
1003 timer, fm = gettimer(ui, opts)
1060 timer, fm = gettimer(ui, opts)
1004 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1061 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1005 fm.end()
1062 fm.end()
1006
1063
1007 @command('perffncacheload', formatteropts)
1064 @command('perffncacheload', formatteropts)
1008 def perffncacheload(ui, repo, **opts):
1065 def perffncacheload(ui, repo, **opts):
1009 timer, fm = gettimer(ui, opts)
1066 timer, fm = gettimer(ui, opts)
1010 s = repo.store
1067 s = repo.store
1011 def d():
1068 def d():
1012 s.fncache._load()
1069 s.fncache._load()
1013 timer(d)
1070 timer(d)
1014 fm.end()
1071 fm.end()
1015
1072
1016 @command('perffncachewrite', formatteropts)
1073 @command('perffncachewrite', formatteropts)
1017 def perffncachewrite(ui, repo, **opts):
1074 def perffncachewrite(ui, repo, **opts):
1018 timer, fm = gettimer(ui, opts)
1075 timer, fm = gettimer(ui, opts)
1019 s = repo.store
1076 s = repo.store
1020 lock = repo.lock()
1077 lock = repo.lock()
1021 s.fncache._load()
1078 s.fncache._load()
1022 tr = repo.transaction('perffncachewrite')
1079 tr = repo.transaction('perffncachewrite')
1023 tr.addbackup('fncache')
1080 tr.addbackup('fncache')
1024 def d():
1081 def d():
1025 s.fncache._dirty = True
1082 s.fncache._dirty = True
1026 s.fncache.write(tr)
1083 s.fncache.write(tr)
1027 timer(d)
1084 timer(d)
1028 tr.close()
1085 tr.close()
1029 lock.release()
1086 lock.release()
1030 fm.end()
1087 fm.end()
1031
1088
1032 @command('perffncacheencode', formatteropts)
1089 @command('perffncacheencode', formatteropts)
1033 def perffncacheencode(ui, repo, **opts):
1090 def perffncacheencode(ui, repo, **opts):
1034 timer, fm = gettimer(ui, opts)
1091 timer, fm = gettimer(ui, opts)
1035 s = repo.store
1092 s = repo.store
1036 s.fncache._load()
1093 s.fncache._load()
1037 def d():
1094 def d():
1038 for p in s.fncache.entries:
1095 for p in s.fncache.entries:
1039 s.encode(p)
1096 s.encode(p)
1040 timer(d)
1097 timer(d)
1041 fm.end()
1098 fm.end()
1042
1099
1043 def _bdiffworker(q, blocks, xdiff, ready, done):
1100 def _bdiffworker(q, blocks, xdiff, ready, done):
1044 while not done.is_set():
1101 while not done.is_set():
1045 pair = q.get()
1102 pair = q.get()
1046 while pair is not None:
1103 while pair is not None:
1047 if xdiff:
1104 if xdiff:
1048 mdiff.bdiff.xdiffblocks(*pair)
1105 mdiff.bdiff.xdiffblocks(*pair)
1049 elif blocks:
1106 elif blocks:
1050 mdiff.bdiff.blocks(*pair)
1107 mdiff.bdiff.blocks(*pair)
1051 else:
1108 else:
1052 mdiff.textdiff(*pair)
1109 mdiff.textdiff(*pair)
1053 q.task_done()
1110 q.task_done()
1054 pair = q.get()
1111 pair = q.get()
1055 q.task_done() # for the None one
1112 q.task_done() # for the None one
1056 with ready:
1113 with ready:
1057 ready.wait()
1114 ready.wait()
1058
1115
1059 @command('perfbdiff', revlogopts + formatteropts + [
1116 @command('perfbdiff', revlogopts + formatteropts + [
1060 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1117 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1061 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
1118 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
1062 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
1119 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
1063 ('', 'blocks', False, 'test computing diffs into blocks'),
1120 ('', 'blocks', False, 'test computing diffs into blocks'),
1064 ('', 'xdiff', False, 'use xdiff algorithm'),
1121 ('', 'xdiff', False, 'use xdiff algorithm'),
1065 ],
1122 ],
1066
1123
1067 '-c|-m|FILE REV')
1124 '-c|-m|FILE REV')
1068 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1125 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1069 """benchmark a bdiff between revisions
1126 """benchmark a bdiff between revisions
1070
1127
1071 By default, benchmark a bdiff between its delta parent and itself.
1128 By default, benchmark a bdiff between its delta parent and itself.
1072
1129
1073 With ``--count``, benchmark bdiffs between delta parents and self for N
1130 With ``--count``, benchmark bdiffs between delta parents and self for N
1074 revisions starting at the specified revision.
1131 revisions starting at the specified revision.
1075
1132
1076 With ``--alldata``, assume the requested revision is a changeset and
1133 With ``--alldata``, assume the requested revision is a changeset and
1077 measure bdiffs for all changes related to that changeset (manifest
1134 measure bdiffs for all changes related to that changeset (manifest
1078 and filelogs).
1135 and filelogs).
1079 """
1136 """
1080 opts = pycompat.byteskwargs(opts)
1137 opts = pycompat.byteskwargs(opts)
1081
1138
1082 if opts['xdiff'] and not opts['blocks']:
1139 if opts['xdiff'] and not opts['blocks']:
1083 raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
1140 raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
1084
1141
1085 if opts['alldata']:
1142 if opts['alldata']:
1086 opts['changelog'] = True
1143 opts['changelog'] = True
1087
1144
1088 if opts.get('changelog') or opts.get('manifest'):
1145 if opts.get('changelog') or opts.get('manifest'):
1089 file_, rev = None, file_
1146 file_, rev = None, file_
1090 elif rev is None:
1147 elif rev is None:
1091 raise error.CommandError('perfbdiff', 'invalid arguments')
1148 raise error.CommandError('perfbdiff', 'invalid arguments')
1092
1149
1093 blocks = opts['blocks']
1150 blocks = opts['blocks']
1094 xdiff = opts['xdiff']
1151 xdiff = opts['xdiff']
1095 textpairs = []
1152 textpairs = []
1096
1153
1097 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
1154 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
1098
1155
1099 startrev = r.rev(r.lookup(rev))
1156 startrev = r.rev(r.lookup(rev))
1100 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1157 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1101 if opts['alldata']:
1158 if opts['alldata']:
1102 # Load revisions associated with changeset.
1159 # Load revisions associated with changeset.
1103 ctx = repo[rev]
1160 ctx = repo[rev]
1104 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1161 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1105 for pctx in ctx.parents():
1162 for pctx in ctx.parents():
1106 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1163 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1107 textpairs.append((pman, mtext))
1164 textpairs.append((pman, mtext))
1108
1165
1109 # Load filelog revisions by iterating manifest delta.
1166 # Load filelog revisions by iterating manifest delta.
1110 man = ctx.manifest()
1167 man = ctx.manifest()
1111 pman = ctx.p1().manifest()
1168 pman = ctx.p1().manifest()
1112 for filename, change in pman.diff(man).items():
1169 for filename, change in pman.diff(man).items():
1113 fctx = repo.file(filename)
1170 fctx = repo.file(filename)
1114 f1 = fctx.revision(change[0][0] or -1)
1171 f1 = fctx.revision(change[0][0] or -1)
1115 f2 = fctx.revision(change[1][0] or -1)
1172 f2 = fctx.revision(change[1][0] or -1)
1116 textpairs.append((f1, f2))
1173 textpairs.append((f1, f2))
1117 else:
1174 else:
1118 dp = r.deltaparent(rev)
1175 dp = r.deltaparent(rev)
1119 textpairs.append((r.revision(dp), r.revision(rev)))
1176 textpairs.append((r.revision(dp), r.revision(rev)))
1120
1177
1121 withthreads = threads > 0
1178 withthreads = threads > 0
1122 if not withthreads:
1179 if not withthreads:
1123 def d():
1180 def d():
1124 for pair in textpairs:
1181 for pair in textpairs:
1125 if xdiff:
1182 if xdiff:
1126 mdiff.bdiff.xdiffblocks(*pair)
1183 mdiff.bdiff.xdiffblocks(*pair)
1127 elif blocks:
1184 elif blocks:
1128 mdiff.bdiff.blocks(*pair)
1185 mdiff.bdiff.blocks(*pair)
1129 else:
1186 else:
1130 mdiff.textdiff(*pair)
1187 mdiff.textdiff(*pair)
1131 else:
1188 else:
1132 q = queue()
1189 q = queue()
1133 for i in xrange(threads):
1190 for i in xrange(threads):
1134 q.put(None)
1191 q.put(None)
1135 ready = threading.Condition()
1192 ready = threading.Condition()
1136 done = threading.Event()
1193 done = threading.Event()
1137 for i in xrange(threads):
1194 for i in xrange(threads):
1138 threading.Thread(target=_bdiffworker,
1195 threading.Thread(target=_bdiffworker,
1139 args=(q, blocks, xdiff, ready, done)).start()
1196 args=(q, blocks, xdiff, ready, done)).start()
1140 q.join()
1197 q.join()
1141 def d():
1198 def d():
1142 for pair in textpairs:
1199 for pair in textpairs:
1143 q.put(pair)
1200 q.put(pair)
1144 for i in xrange(threads):
1201 for i in xrange(threads):
1145 q.put(None)
1202 q.put(None)
1146 with ready:
1203 with ready:
1147 ready.notify_all()
1204 ready.notify_all()
1148 q.join()
1205 q.join()
1149 timer, fm = gettimer(ui, opts)
1206 timer, fm = gettimer(ui, opts)
1150 timer(d)
1207 timer(d)
1151 fm.end()
1208 fm.end()
1152
1209
1153 if withthreads:
1210 if withthreads:
1154 done.set()
1211 done.set()
1155 for i in xrange(threads):
1212 for i in xrange(threads):
1156 q.put(None)
1213 q.put(None)
1157 with ready:
1214 with ready:
1158 ready.notify_all()
1215 ready.notify_all()
1159
1216
1160 @command('perfunidiff', revlogopts + formatteropts + [
1217 @command('perfunidiff', revlogopts + formatteropts + [
1161 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1218 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1162 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1219 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1163 ], '-c|-m|FILE REV')
1220 ], '-c|-m|FILE REV')
1164 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1221 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1165 """benchmark a unified diff between revisions
1222 """benchmark a unified diff between revisions
1166
1223
1167 This doesn't include any copy tracing - it's just a unified diff
1224 This doesn't include any copy tracing - it's just a unified diff
1168 of the texts.
1225 of the texts.
1169
1226
1170 By default, benchmark a diff between its delta parent and itself.
1227 By default, benchmark a diff between its delta parent and itself.
1171
1228
1172 With ``--count``, benchmark diffs between delta parents and self for N
1229 With ``--count``, benchmark diffs between delta parents and self for N
1173 revisions starting at the specified revision.
1230 revisions starting at the specified revision.
1174
1231
1175 With ``--alldata``, assume the requested revision is a changeset and
1232 With ``--alldata``, assume the requested revision is a changeset and
1176 measure diffs for all changes related to that changeset (manifest
1233 measure diffs for all changes related to that changeset (manifest
1177 and filelogs).
1234 and filelogs).
1178 """
1235 """
1179 if opts['alldata']:
1236 if opts['alldata']:
1180 opts['changelog'] = True
1237 opts['changelog'] = True
1181
1238
1182 if opts.get('changelog') or opts.get('manifest'):
1239 if opts.get('changelog') or opts.get('manifest'):
1183 file_, rev = None, file_
1240 file_, rev = None, file_
1184 elif rev is None:
1241 elif rev is None:
1185 raise error.CommandError('perfunidiff', 'invalid arguments')
1242 raise error.CommandError('perfunidiff', 'invalid arguments')
1186
1243
1187 textpairs = []
1244 textpairs = []
1188
1245
1189 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1246 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1190
1247
1191 startrev = r.rev(r.lookup(rev))
1248 startrev = r.rev(r.lookup(rev))
1192 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1249 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1193 if opts['alldata']:
1250 if opts['alldata']:
1194 # Load revisions associated with changeset.
1251 # Load revisions associated with changeset.
1195 ctx = repo[rev]
1252 ctx = repo[rev]
1196 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1253 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1197 for pctx in ctx.parents():
1254 for pctx in ctx.parents():
1198 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1255 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1199 textpairs.append((pman, mtext))
1256 textpairs.append((pman, mtext))
1200
1257
1201 # Load filelog revisions by iterating manifest delta.
1258 # Load filelog revisions by iterating manifest delta.
1202 man = ctx.manifest()
1259 man = ctx.manifest()
1203 pman = ctx.p1().manifest()
1260 pman = ctx.p1().manifest()
1204 for filename, change in pman.diff(man).items():
1261 for filename, change in pman.diff(man).items():
1205 fctx = repo.file(filename)
1262 fctx = repo.file(filename)
1206 f1 = fctx.revision(change[0][0] or -1)
1263 f1 = fctx.revision(change[0][0] or -1)
1207 f2 = fctx.revision(change[1][0] or -1)
1264 f2 = fctx.revision(change[1][0] or -1)
1208 textpairs.append((f1, f2))
1265 textpairs.append((f1, f2))
1209 else:
1266 else:
1210 dp = r.deltaparent(rev)
1267 dp = r.deltaparent(rev)
1211 textpairs.append((r.revision(dp), r.revision(rev)))
1268 textpairs.append((r.revision(dp), r.revision(rev)))
1212
1269
1213 def d():
1270 def d():
1214 for left, right in textpairs:
1271 for left, right in textpairs:
1215 # The date strings don't matter, so we pass empty strings.
1272 # The date strings don't matter, so we pass empty strings.
1216 headerlines, hunks = mdiff.unidiff(
1273 headerlines, hunks = mdiff.unidiff(
1217 left, '', right, '', 'left', 'right', binary=False)
1274 left, '', right, '', 'left', 'right', binary=False)
1218 # consume iterators in roughly the way patch.py does
1275 # consume iterators in roughly the way patch.py does
1219 b'\n'.join(headerlines)
1276 b'\n'.join(headerlines)
1220 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1277 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1221 timer, fm = gettimer(ui, opts)
1278 timer, fm = gettimer(ui, opts)
1222 timer(d)
1279 timer(d)
1223 fm.end()
1280 fm.end()
1224
1281
1225 @command('perfdiffwd', formatteropts)
1282 @command('perfdiffwd', formatteropts)
1226 def perfdiffwd(ui, repo, **opts):
1283 def perfdiffwd(ui, repo, **opts):
1227 """Profile diff of working directory changes"""
1284 """Profile diff of working directory changes"""
1228 timer, fm = gettimer(ui, opts)
1285 timer, fm = gettimer(ui, opts)
1229 options = {
1286 options = {
1230 'w': 'ignore_all_space',
1287 'w': 'ignore_all_space',
1231 'b': 'ignore_space_change',
1288 'b': 'ignore_space_change',
1232 'B': 'ignore_blank_lines',
1289 'B': 'ignore_blank_lines',
1233 }
1290 }
1234
1291
1235 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1292 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1236 opts = dict((options[c], '1') for c in diffopt)
1293 opts = dict((options[c], '1') for c in diffopt)
1237 def d():
1294 def d():
1238 ui.pushbuffer()
1295 ui.pushbuffer()
1239 commands.diff(ui, repo, **opts)
1296 commands.diff(ui, repo, **opts)
1240 ui.popbuffer()
1297 ui.popbuffer()
1241 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1298 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1242 timer(d, title)
1299 timer(d, title)
1243 fm.end()
1300 fm.end()
1244
1301
1245 @command('perfrevlogindex', revlogopts + formatteropts,
1302 @command('perfrevlogindex', revlogopts + formatteropts,
1246 '-c|-m|FILE')
1303 '-c|-m|FILE')
1247 def perfrevlogindex(ui, repo, file_=None, **opts):
1304 def perfrevlogindex(ui, repo, file_=None, **opts):
1248 """Benchmark operations against a revlog index.
1305 """Benchmark operations against a revlog index.
1249
1306
1250 This tests constructing a revlog instance, reading index data,
1307 This tests constructing a revlog instance, reading index data,
1251 parsing index data, and performing various operations related to
1308 parsing index data, and performing various operations related to
1252 index data.
1309 index data.
1253 """
1310 """
1254
1311
1255 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1312 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1256
1313
1257 opener = getattr(rl, 'opener') # trick linter
1314 opener = getattr(rl, 'opener') # trick linter
1258 indexfile = rl.indexfile
1315 indexfile = rl.indexfile
1259 data = opener.read(indexfile)
1316 data = opener.read(indexfile)
1260
1317
1261 header = struct.unpack('>I', data[0:4])[0]
1318 header = struct.unpack('>I', data[0:4])[0]
1262 version = header & 0xFFFF
1319 version = header & 0xFFFF
1263 if version == 1:
1320 if version == 1:
1264 revlogio = revlog.revlogio()
1321 revlogio = revlog.revlogio()
1265 inline = header & (1 << 16)
1322 inline = header & (1 << 16)
1266 else:
1323 else:
1267 raise error.Abort(('unsupported revlog version: %d') % version)
1324 raise error.Abort(('unsupported revlog version: %d') % version)
1268
1325
1269 rllen = len(rl)
1326 rllen = len(rl)
1270
1327
1271 node0 = rl.node(0)
1328 node0 = rl.node(0)
1272 node25 = rl.node(rllen // 4)
1329 node25 = rl.node(rllen // 4)
1273 node50 = rl.node(rllen // 2)
1330 node50 = rl.node(rllen // 2)
1274 node75 = rl.node(rllen // 4 * 3)
1331 node75 = rl.node(rllen // 4 * 3)
1275 node100 = rl.node(rllen - 1)
1332 node100 = rl.node(rllen - 1)
1276
1333
1277 allrevs = range(rllen)
1334 allrevs = range(rllen)
1278 allrevsrev = list(reversed(allrevs))
1335 allrevsrev = list(reversed(allrevs))
1279 allnodes = [rl.node(rev) for rev in range(rllen)]
1336 allnodes = [rl.node(rev) for rev in range(rllen)]
1280 allnodesrev = list(reversed(allnodes))
1337 allnodesrev = list(reversed(allnodes))
1281
1338
1282 def constructor():
1339 def constructor():
1283 revlog.revlog(opener, indexfile)
1340 revlog.revlog(opener, indexfile)
1284
1341
1285 def read():
1342 def read():
1286 with opener(indexfile) as fh:
1343 with opener(indexfile) as fh:
1287 fh.read()
1344 fh.read()
1288
1345
1289 def parseindex():
1346 def parseindex():
1290 revlogio.parseindex(data, inline)
1347 revlogio.parseindex(data, inline)
1291
1348
1292 def getentry(revornode):
1349 def getentry(revornode):
1293 index = revlogio.parseindex(data, inline)[0]
1350 index = revlogio.parseindex(data, inline)[0]
1294 index[revornode]
1351 index[revornode]
1295
1352
1296 def getentries(revs, count=1):
1353 def getentries(revs, count=1):
1297 index = revlogio.parseindex(data, inline)[0]
1354 index = revlogio.parseindex(data, inline)[0]
1298
1355
1299 for i in range(count):
1356 for i in range(count):
1300 for rev in revs:
1357 for rev in revs:
1301 index[rev]
1358 index[rev]
1302
1359
1303 def resolvenode(node):
1360 def resolvenode(node):
1304 nodemap = revlogio.parseindex(data, inline)[1]
1361 nodemap = revlogio.parseindex(data, inline)[1]
1305 # This only works for the C code.
1362 # This only works for the C code.
1306 if nodemap is None:
1363 if nodemap is None:
1307 return
1364 return
1308
1365
1309 try:
1366 try:
1310 nodemap[node]
1367 nodemap[node]
1311 except error.RevlogError:
1368 except error.RevlogError:
1312 pass
1369 pass
1313
1370
1314 def resolvenodes(nodes, count=1):
1371 def resolvenodes(nodes, count=1):
1315 nodemap = revlogio.parseindex(data, inline)[1]
1372 nodemap = revlogio.parseindex(data, inline)[1]
1316 if nodemap is None:
1373 if nodemap is None:
1317 return
1374 return
1318
1375
1319 for i in range(count):
1376 for i in range(count):
1320 for node in nodes:
1377 for node in nodes:
1321 try:
1378 try:
1322 nodemap[node]
1379 nodemap[node]
1323 except error.RevlogError:
1380 except error.RevlogError:
1324 pass
1381 pass
1325
1382
1326 benches = [
1383 benches = [
1327 (constructor, 'revlog constructor'),
1384 (constructor, 'revlog constructor'),
1328 (read, 'read'),
1385 (read, 'read'),
1329 (parseindex, 'create index object'),
1386 (parseindex, 'create index object'),
1330 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1387 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1331 (lambda: resolvenode('a' * 20), 'look up missing node'),
1388 (lambda: resolvenode('a' * 20), 'look up missing node'),
1332 (lambda: resolvenode(node0), 'look up node at rev 0'),
1389 (lambda: resolvenode(node0), 'look up node at rev 0'),
1333 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1390 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1334 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1391 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1335 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1392 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1336 (lambda: resolvenode(node100), 'look up node at tip'),
1393 (lambda: resolvenode(node100), 'look up node at tip'),
1337 # 2x variation is to measure caching impact.
1394 # 2x variation is to measure caching impact.
1338 (lambda: resolvenodes(allnodes),
1395 (lambda: resolvenodes(allnodes),
1339 'look up all nodes (forward)'),
1396 'look up all nodes (forward)'),
1340 (lambda: resolvenodes(allnodes, 2),
1397 (lambda: resolvenodes(allnodes, 2),
1341 'look up all nodes 2x (forward)'),
1398 'look up all nodes 2x (forward)'),
1342 (lambda: resolvenodes(allnodesrev),
1399 (lambda: resolvenodes(allnodesrev),
1343 'look up all nodes (reverse)'),
1400 'look up all nodes (reverse)'),
1344 (lambda: resolvenodes(allnodesrev, 2),
1401 (lambda: resolvenodes(allnodesrev, 2),
1345 'look up all nodes 2x (reverse)'),
1402 'look up all nodes 2x (reverse)'),
1346 (lambda: getentries(allrevs),
1403 (lambda: getentries(allrevs),
1347 'retrieve all index entries (forward)'),
1404 'retrieve all index entries (forward)'),
1348 (lambda: getentries(allrevs, 2),
1405 (lambda: getentries(allrevs, 2),
1349 'retrieve all index entries 2x (forward)'),
1406 'retrieve all index entries 2x (forward)'),
1350 (lambda: getentries(allrevsrev),
1407 (lambda: getentries(allrevsrev),
1351 'retrieve all index entries (reverse)'),
1408 'retrieve all index entries (reverse)'),
1352 (lambda: getentries(allrevsrev, 2),
1409 (lambda: getentries(allrevsrev, 2),
1353 'retrieve all index entries 2x (reverse)'),
1410 'retrieve all index entries 2x (reverse)'),
1354 ]
1411 ]
1355
1412
1356 for fn, title in benches:
1413 for fn, title in benches:
1357 timer, fm = gettimer(ui, opts)
1414 timer, fm = gettimer(ui, opts)
1358 timer(fn, title=title)
1415 timer(fn, title=title)
1359 fm.end()
1416 fm.end()
1360
1417
1361 @command('perfrevlogrevisions', revlogopts + formatteropts +
1418 @command('perfrevlogrevisions', revlogopts + formatteropts +
1362 [('d', 'dist', 100, 'distance between the revisions'),
1419 [('d', 'dist', 100, 'distance between the revisions'),
1363 ('s', 'startrev', 0, 'revision to start reading at'),
1420 ('s', 'startrev', 0, 'revision to start reading at'),
1364 ('', 'reverse', False, 'read in reverse')],
1421 ('', 'reverse', False, 'read in reverse')],
1365 '-c|-m|FILE')
1422 '-c|-m|FILE')
1366 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1423 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1367 **opts):
1424 **opts):
1368 """Benchmark reading a series of revisions from a revlog.
1425 """Benchmark reading a series of revisions from a revlog.
1369
1426
1370 By default, we read every ``-d/--dist`` revision from 0 to tip of
1427 By default, we read every ``-d/--dist`` revision from 0 to tip of
1371 the specified revlog.
1428 the specified revlog.
1372
1429
1373 The start revision can be defined via ``-s/--startrev``.
1430 The start revision can be defined via ``-s/--startrev``.
1374 """
1431 """
1375 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1432 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1376 rllen = getlen(ui)(rl)
1433 rllen = getlen(ui)(rl)
1377
1434
1378 def d():
1435 def d():
1379 rl.clearcaches()
1436 rl.clearcaches()
1380
1437
1381 beginrev = startrev
1438 beginrev = startrev
1382 endrev = rllen
1439 endrev = rllen
1383 dist = opts['dist']
1440 dist = opts['dist']
1384
1441
1385 if reverse:
1442 if reverse:
1386 beginrev, endrev = endrev, beginrev
1443 beginrev, endrev = endrev, beginrev
1387 dist = -1 * dist
1444 dist = -1 * dist
1388
1445
1389 for x in xrange(beginrev, endrev, dist):
1446 for x in xrange(beginrev, endrev, dist):
1390 # Old revisions don't support passing int.
1447 # Old revisions don't support passing int.
1391 n = rl.node(x)
1448 n = rl.node(x)
1392 rl.revision(n)
1449 rl.revision(n)
1393
1450
1394 timer, fm = gettimer(ui, opts)
1451 timer, fm = gettimer(ui, opts)
1395 timer(d)
1452 timer(d)
1396 fm.end()
1453 fm.end()
1397
1454
1398 @command('perfrevlogchunks', revlogopts + formatteropts +
1455 @command('perfrevlogchunks', revlogopts + formatteropts +
1399 [('e', 'engines', '', 'compression engines to use'),
1456 [('e', 'engines', '', 'compression engines to use'),
1400 ('s', 'startrev', 0, 'revision to start at')],
1457 ('s', 'startrev', 0, 'revision to start at')],
1401 '-c|-m|FILE')
1458 '-c|-m|FILE')
1402 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1459 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1403 """Benchmark operations on revlog chunks.
1460 """Benchmark operations on revlog chunks.
1404
1461
1405 Logically, each revlog is a collection of fulltext revisions. However,
1462 Logically, each revlog is a collection of fulltext revisions. However,
1406 stored within each revlog are "chunks" of possibly compressed data. This
1463 stored within each revlog are "chunks" of possibly compressed data. This
1407 data needs to be read and decompressed or compressed and written.
1464 data needs to be read and decompressed or compressed and written.
1408
1465
1409 This command measures the time it takes to read+decompress and recompress
1466 This command measures the time it takes to read+decompress and recompress
1410 chunks in a revlog. It effectively isolates I/O and compression performance.
1467 chunks in a revlog. It effectively isolates I/O and compression performance.
1411 For measurements of higher-level operations like resolving revisions,
1468 For measurements of higher-level operations like resolving revisions,
1412 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1469 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1413 """
1470 """
1414 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1471 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1415
1472
1416 # _chunkraw was renamed to _getsegmentforrevs.
1473 # _chunkraw was renamed to _getsegmentforrevs.
1417 try:
1474 try:
1418 segmentforrevs = rl._getsegmentforrevs
1475 segmentforrevs = rl._getsegmentforrevs
1419 except AttributeError:
1476 except AttributeError:
1420 segmentforrevs = rl._chunkraw
1477 segmentforrevs = rl._chunkraw
1421
1478
1422 # Verify engines argument.
1479 # Verify engines argument.
1423 if engines:
1480 if engines:
1424 engines = set(e.strip() for e in engines.split(','))
1481 engines = set(e.strip() for e in engines.split(','))
1425 for engine in engines:
1482 for engine in engines:
1426 try:
1483 try:
1427 util.compressionengines[engine]
1484 util.compressionengines[engine]
1428 except KeyError:
1485 except KeyError:
1429 raise error.Abort('unknown compression engine: %s' % engine)
1486 raise error.Abort('unknown compression engine: %s' % engine)
1430 else:
1487 else:
1431 engines = []
1488 engines = []
1432 for e in util.compengines:
1489 for e in util.compengines:
1433 engine = util.compengines[e]
1490 engine = util.compengines[e]
1434 try:
1491 try:
1435 if engine.available():
1492 if engine.available():
1436 engine.revlogcompressor().compress('dummy')
1493 engine.revlogcompressor().compress('dummy')
1437 engines.append(e)
1494 engines.append(e)
1438 except NotImplementedError:
1495 except NotImplementedError:
1439 pass
1496 pass
1440
1497
1441 revs = list(rl.revs(startrev, len(rl) - 1))
1498 revs = list(rl.revs(startrev, len(rl) - 1))
1442
1499
1443 def rlfh(rl):
1500 def rlfh(rl):
1444 if rl._inline:
1501 if rl._inline:
1445 return getsvfs(repo)(rl.indexfile)
1502 return getsvfs(repo)(rl.indexfile)
1446 else:
1503 else:
1447 return getsvfs(repo)(rl.datafile)
1504 return getsvfs(repo)(rl.datafile)
1448
1505
1449 def doread():
1506 def doread():
1450 rl.clearcaches()
1507 rl.clearcaches()
1451 for rev in revs:
1508 for rev in revs:
1452 segmentforrevs(rev, rev)
1509 segmentforrevs(rev, rev)
1453
1510
1454 def doreadcachedfh():
1511 def doreadcachedfh():
1455 rl.clearcaches()
1512 rl.clearcaches()
1456 fh = rlfh(rl)
1513 fh = rlfh(rl)
1457 for rev in revs:
1514 for rev in revs:
1458 segmentforrevs(rev, rev, df=fh)
1515 segmentforrevs(rev, rev, df=fh)
1459
1516
1460 def doreadbatch():
1517 def doreadbatch():
1461 rl.clearcaches()
1518 rl.clearcaches()
1462 segmentforrevs(revs[0], revs[-1])
1519 segmentforrevs(revs[0], revs[-1])
1463
1520
1464 def doreadbatchcachedfh():
1521 def doreadbatchcachedfh():
1465 rl.clearcaches()
1522 rl.clearcaches()
1466 fh = rlfh(rl)
1523 fh = rlfh(rl)
1467 segmentforrevs(revs[0], revs[-1], df=fh)
1524 segmentforrevs(revs[0], revs[-1], df=fh)
1468
1525
1469 def dochunk():
1526 def dochunk():
1470 rl.clearcaches()
1527 rl.clearcaches()
1471 fh = rlfh(rl)
1528 fh = rlfh(rl)
1472 for rev in revs:
1529 for rev in revs:
1473 rl._chunk(rev, df=fh)
1530 rl._chunk(rev, df=fh)
1474
1531
1475 chunks = [None]
1532 chunks = [None]
1476
1533
1477 def dochunkbatch():
1534 def dochunkbatch():
1478 rl.clearcaches()
1535 rl.clearcaches()
1479 fh = rlfh(rl)
1536 fh = rlfh(rl)
1480 # Save chunks as a side-effect.
1537 # Save chunks as a side-effect.
1481 chunks[0] = rl._chunks(revs, df=fh)
1538 chunks[0] = rl._chunks(revs, df=fh)
1482
1539
1483 def docompress(compressor):
1540 def docompress(compressor):
1484 rl.clearcaches()
1541 rl.clearcaches()
1485
1542
1486 try:
1543 try:
1487 # Swap in the requested compression engine.
1544 # Swap in the requested compression engine.
1488 oldcompressor = rl._compressor
1545 oldcompressor = rl._compressor
1489 rl._compressor = compressor
1546 rl._compressor = compressor
1490 for chunk in chunks[0]:
1547 for chunk in chunks[0]:
1491 rl.compress(chunk)
1548 rl.compress(chunk)
1492 finally:
1549 finally:
1493 rl._compressor = oldcompressor
1550 rl._compressor = oldcompressor
1494
1551
1495 benches = [
1552 benches = [
1496 (lambda: doread(), 'read'),
1553 (lambda: doread(), 'read'),
1497 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1554 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1498 (lambda: doreadbatch(), 'read batch'),
1555 (lambda: doreadbatch(), 'read batch'),
1499 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1556 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1500 (lambda: dochunk(), 'chunk'),
1557 (lambda: dochunk(), 'chunk'),
1501 (lambda: dochunkbatch(), 'chunk batch'),
1558 (lambda: dochunkbatch(), 'chunk batch'),
1502 ]
1559 ]
1503
1560
1504 for engine in sorted(engines):
1561 for engine in sorted(engines):
1505 compressor = util.compengines[engine].revlogcompressor()
1562 compressor = util.compengines[engine].revlogcompressor()
1506 benches.append((functools.partial(docompress, compressor),
1563 benches.append((functools.partial(docompress, compressor),
1507 'compress w/ %s' % engine))
1564 'compress w/ %s' % engine))
1508
1565
1509 for fn, title in benches:
1566 for fn, title in benches:
1510 timer, fm = gettimer(ui, opts)
1567 timer, fm = gettimer(ui, opts)
1511 timer(fn, title=title)
1568 timer(fn, title=title)
1512 fm.end()
1569 fm.end()
1513
1570
1514 @command('perfrevlogrevision', revlogopts + formatteropts +
1571 @command('perfrevlogrevision', revlogopts + formatteropts +
1515 [('', 'cache', False, 'use caches instead of clearing')],
1572 [('', 'cache', False, 'use caches instead of clearing')],
1516 '-c|-m|FILE REV')
1573 '-c|-m|FILE REV')
1517 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1574 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1518 """Benchmark obtaining a revlog revision.
1575 """Benchmark obtaining a revlog revision.
1519
1576
1520 Obtaining a revlog revision consists of roughly the following steps:
1577 Obtaining a revlog revision consists of roughly the following steps:
1521
1578
1522 1. Compute the delta chain
1579 1. Compute the delta chain
1523 2. Obtain the raw chunks for that delta chain
1580 2. Obtain the raw chunks for that delta chain
1524 3. Decompress each raw chunk
1581 3. Decompress each raw chunk
1525 4. Apply binary patches to obtain fulltext
1582 4. Apply binary patches to obtain fulltext
1526 5. Verify hash of fulltext
1583 5. Verify hash of fulltext
1527
1584
1528 This command measures the time spent in each of these phases.
1585 This command measures the time spent in each of these phases.
1529 """
1586 """
1530 if opts.get('changelog') or opts.get('manifest'):
1587 if opts.get('changelog') or opts.get('manifest'):
1531 file_, rev = None, file_
1588 file_, rev = None, file_
1532 elif rev is None:
1589 elif rev is None:
1533 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1590 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1534
1591
1535 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1592 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1536
1593
1537 # _chunkraw was renamed to _getsegmentforrevs.
1594 # _chunkraw was renamed to _getsegmentforrevs.
1538 try:
1595 try:
1539 segmentforrevs = r._getsegmentforrevs
1596 segmentforrevs = r._getsegmentforrevs
1540 except AttributeError:
1597 except AttributeError:
1541 segmentforrevs = r._chunkraw
1598 segmentforrevs = r._chunkraw
1542
1599
1543 node = r.lookup(rev)
1600 node = r.lookup(rev)
1544 rev = r.rev(node)
1601 rev = r.rev(node)
1545
1602
1546 def getrawchunks(data, chain):
1603 def getrawchunks(data, chain):
1547 start = r.start
1604 start = r.start
1548 length = r.length
1605 length = r.length
1549 inline = r._inline
1606 inline = r._inline
1550 iosize = r._io.size
1607 iosize = r._io.size
1551 buffer = util.buffer
1608 buffer = util.buffer
1552 offset = start(chain[0])
1609 offset = start(chain[0])
1553
1610
1554 chunks = []
1611 chunks = []
1555 ladd = chunks.append
1612 ladd = chunks.append
1556
1613
1557 for rev in chain:
1614 for rev in chain:
1558 chunkstart = start(rev)
1615 chunkstart = start(rev)
1559 if inline:
1616 if inline:
1560 chunkstart += (rev + 1) * iosize
1617 chunkstart += (rev + 1) * iosize
1561 chunklength = length(rev)
1618 chunklength = length(rev)
1562 ladd(buffer(data, chunkstart - offset, chunklength))
1619 ladd(buffer(data, chunkstart - offset, chunklength))
1563
1620
1564 return chunks
1621 return chunks
1565
1622
1566 def dodeltachain(rev):
1623 def dodeltachain(rev):
1567 if not cache:
1624 if not cache:
1568 r.clearcaches()
1625 r.clearcaches()
1569 r._deltachain(rev)
1626 r._deltachain(rev)
1570
1627
1571 def doread(chain):
1628 def doread(chain):
1572 if not cache:
1629 if not cache:
1573 r.clearcaches()
1630 r.clearcaches()
1574 segmentforrevs(chain[0], chain[-1])
1631 segmentforrevs(chain[0], chain[-1])
1575
1632
1576 def dorawchunks(data, chain):
1633 def dorawchunks(data, chain):
1577 if not cache:
1634 if not cache:
1578 r.clearcaches()
1635 r.clearcaches()
1579 getrawchunks(data, chain)
1636 getrawchunks(data, chain)
1580
1637
1581 def dodecompress(chunks):
1638 def dodecompress(chunks):
1582 decomp = r.decompress
1639 decomp = r.decompress
1583 for chunk in chunks:
1640 for chunk in chunks:
1584 decomp(chunk)
1641 decomp(chunk)
1585
1642
1586 def dopatch(text, bins):
1643 def dopatch(text, bins):
1587 if not cache:
1644 if not cache:
1588 r.clearcaches()
1645 r.clearcaches()
1589 mdiff.patches(text, bins)
1646 mdiff.patches(text, bins)
1590
1647
1591 def dohash(text):
1648 def dohash(text):
1592 if not cache:
1649 if not cache:
1593 r.clearcaches()
1650 r.clearcaches()
1594 r.checkhash(text, node, rev=rev)
1651 r.checkhash(text, node, rev=rev)
1595
1652
1596 def dorevision():
1653 def dorevision():
1597 if not cache:
1654 if not cache:
1598 r.clearcaches()
1655 r.clearcaches()
1599 r.revision(node)
1656 r.revision(node)
1600
1657
1601 chain = r._deltachain(rev)[0]
1658 chain = r._deltachain(rev)[0]
1602 data = segmentforrevs(chain[0], chain[-1])[1]
1659 data = segmentforrevs(chain[0], chain[-1])[1]
1603 rawchunks = getrawchunks(data, chain)
1660 rawchunks = getrawchunks(data, chain)
1604 bins = r._chunks(chain)
1661 bins = r._chunks(chain)
1605 text = str(bins[0])
1662 text = str(bins[0])
1606 bins = bins[1:]
1663 bins = bins[1:]
1607 text = mdiff.patches(text, bins)
1664 text = mdiff.patches(text, bins)
1608
1665
1609 benches = [
1666 benches = [
1610 (lambda: dorevision(), 'full'),
1667 (lambda: dorevision(), 'full'),
1611 (lambda: dodeltachain(rev), 'deltachain'),
1668 (lambda: dodeltachain(rev), 'deltachain'),
1612 (lambda: doread(chain), 'read'),
1669 (lambda: doread(chain), 'read'),
1613 (lambda: dorawchunks(data, chain), 'rawchunks'),
1670 (lambda: dorawchunks(data, chain), 'rawchunks'),
1614 (lambda: dodecompress(rawchunks), 'decompress'),
1671 (lambda: dodecompress(rawchunks), 'decompress'),
1615 (lambda: dopatch(text, bins), 'patch'),
1672 (lambda: dopatch(text, bins), 'patch'),
1616 (lambda: dohash(text), 'hash'),
1673 (lambda: dohash(text), 'hash'),
1617 ]
1674 ]
1618
1675
1619 for fn, title in benches:
1676 for fn, title in benches:
1620 timer, fm = gettimer(ui, opts)
1677 timer, fm = gettimer(ui, opts)
1621 timer(fn, title=title)
1678 timer(fn, title=title)
1622 fm.end()
1679 fm.end()
1623
1680
1624 @command('perfrevset',
1681 @command('perfrevset',
1625 [('C', 'clear', False, 'clear volatile cache between each call.'),
1682 [('C', 'clear', False, 'clear volatile cache between each call.'),
1626 ('', 'contexts', False, 'obtain changectx for each revision')]
1683 ('', 'contexts', False, 'obtain changectx for each revision')]
1627 + formatteropts, "REVSET")
1684 + formatteropts, "REVSET")
1628 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1685 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1629 """benchmark the execution time of a revset
1686 """benchmark the execution time of a revset
1630
1687
1631 Use the --clean option if need to evaluate the impact of build volatile
1688 Use the --clean option if need to evaluate the impact of build volatile
1632 revisions set cache on the revset execution. Volatile cache hold filtered
1689 revisions set cache on the revset execution. Volatile cache hold filtered
1633 and obsolete related cache."""
1690 and obsolete related cache."""
1634 timer, fm = gettimer(ui, opts)
1691 timer, fm = gettimer(ui, opts)
1635 def d():
1692 def d():
1636 if clear:
1693 if clear:
1637 repo.invalidatevolatilesets()
1694 repo.invalidatevolatilesets()
1638 if contexts:
1695 if contexts:
1639 for ctx in repo.set(expr): pass
1696 for ctx in repo.set(expr): pass
1640 else:
1697 else:
1641 for r in repo.revs(expr): pass
1698 for r in repo.revs(expr): pass
1642 timer(d)
1699 timer(d)
1643 fm.end()
1700 fm.end()
1644
1701
1645 @command('perfvolatilesets',
1702 @command('perfvolatilesets',
1646 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1703 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1647 ] + formatteropts)
1704 ] + formatteropts)
1648 def perfvolatilesets(ui, repo, *names, **opts):
1705 def perfvolatilesets(ui, repo, *names, **opts):
1649 """benchmark the computation of various volatile set
1706 """benchmark the computation of various volatile set
1650
1707
1651 Volatile set computes element related to filtering and obsolescence."""
1708 Volatile set computes element related to filtering and obsolescence."""
1652 timer, fm = gettimer(ui, opts)
1709 timer, fm = gettimer(ui, opts)
1653 repo = repo.unfiltered()
1710 repo = repo.unfiltered()
1654
1711
1655 def getobs(name):
1712 def getobs(name):
1656 def d():
1713 def d():
1657 repo.invalidatevolatilesets()
1714 repo.invalidatevolatilesets()
1658 if opts['clear_obsstore']:
1715 if opts['clear_obsstore']:
1659 clearfilecache(repo, 'obsstore')
1716 clearfilecache(repo, 'obsstore')
1660 obsolete.getrevs(repo, name)
1717 obsolete.getrevs(repo, name)
1661 return d
1718 return d
1662
1719
1663 allobs = sorted(obsolete.cachefuncs)
1720 allobs = sorted(obsolete.cachefuncs)
1664 if names:
1721 if names:
1665 allobs = [n for n in allobs if n in names]
1722 allobs = [n for n in allobs if n in names]
1666
1723
1667 for name in allobs:
1724 for name in allobs:
1668 timer(getobs(name), title=name)
1725 timer(getobs(name), title=name)
1669
1726
1670 def getfiltered(name):
1727 def getfiltered(name):
1671 def d():
1728 def d():
1672 repo.invalidatevolatilesets()
1729 repo.invalidatevolatilesets()
1673 if opts['clear_obsstore']:
1730 if opts['clear_obsstore']:
1674 clearfilecache(repo, 'obsstore')
1731 clearfilecache(repo, 'obsstore')
1675 repoview.filterrevs(repo, name)
1732 repoview.filterrevs(repo, name)
1676 return d
1733 return d
1677
1734
1678 allfilter = sorted(repoview.filtertable)
1735 allfilter = sorted(repoview.filtertable)
1679 if names:
1736 if names:
1680 allfilter = [n for n in allfilter if n in names]
1737 allfilter = [n for n in allfilter if n in names]
1681
1738
1682 for name in allfilter:
1739 for name in allfilter:
1683 timer(getfiltered(name), title=name)
1740 timer(getfiltered(name), title=name)
1684 fm.end()
1741 fm.end()
1685
1742
1686 @command('perfbranchmap',
1743 @command('perfbranchmap',
1687 [('f', 'full', False,
1744 [('f', 'full', False,
1688 'Includes build time of subset'),
1745 'Includes build time of subset'),
1689 ('', 'clear-revbranch', False,
1746 ('', 'clear-revbranch', False,
1690 'purge the revbranch cache between computation'),
1747 'purge the revbranch cache between computation'),
1691 ] + formatteropts)
1748 ] + formatteropts)
1692 def perfbranchmap(ui, repo, *filternames, **opts):
1749 def perfbranchmap(ui, repo, *filternames, **opts):
1693 """benchmark the update of a branchmap
1750 """benchmark the update of a branchmap
1694
1751
1695 This benchmarks the full repo.branchmap() call with read and write disabled
1752 This benchmarks the full repo.branchmap() call with read and write disabled
1696 """
1753 """
1697 full = opts.get("full", False)
1754 full = opts.get("full", False)
1698 clear_revbranch = opts.get("clear_revbranch", False)
1755 clear_revbranch = opts.get("clear_revbranch", False)
1699 timer, fm = gettimer(ui, opts)
1756 timer, fm = gettimer(ui, opts)
1700 def getbranchmap(filtername):
1757 def getbranchmap(filtername):
1701 """generate a benchmark function for the filtername"""
1758 """generate a benchmark function for the filtername"""
1702 if filtername is None:
1759 if filtername is None:
1703 view = repo
1760 view = repo
1704 else:
1761 else:
1705 view = repo.filtered(filtername)
1762 view = repo.filtered(filtername)
1706 def d():
1763 def d():
1707 if clear_revbranch:
1764 if clear_revbranch:
1708 repo.revbranchcache()._clear()
1765 repo.revbranchcache()._clear()
1709 if full:
1766 if full:
1710 view._branchcaches.clear()
1767 view._branchcaches.clear()
1711 else:
1768 else:
1712 view._branchcaches.pop(filtername, None)
1769 view._branchcaches.pop(filtername, None)
1713 view.branchmap()
1770 view.branchmap()
1714 return d
1771 return d
1715 # add filter in smaller subset to bigger subset
1772 # add filter in smaller subset to bigger subset
1716 possiblefilters = set(repoview.filtertable)
1773 possiblefilters = set(repoview.filtertable)
1717 if filternames:
1774 if filternames:
1718 possiblefilters &= set(filternames)
1775 possiblefilters &= set(filternames)
1719 subsettable = getbranchmapsubsettable()
1776 subsettable = getbranchmapsubsettable()
1720 allfilters = []
1777 allfilters = []
1721 while possiblefilters:
1778 while possiblefilters:
1722 for name in possiblefilters:
1779 for name in possiblefilters:
1723 subset = subsettable.get(name)
1780 subset = subsettable.get(name)
1724 if subset not in possiblefilters:
1781 if subset not in possiblefilters:
1725 break
1782 break
1726 else:
1783 else:
1727 assert False, 'subset cycle %s!' % possiblefilters
1784 assert False, 'subset cycle %s!' % possiblefilters
1728 allfilters.append(name)
1785 allfilters.append(name)
1729 possiblefilters.remove(name)
1786 possiblefilters.remove(name)
1730
1787
1731 # warm the cache
1788 # warm the cache
1732 if not full:
1789 if not full:
1733 for name in allfilters:
1790 for name in allfilters:
1734 repo.filtered(name).branchmap()
1791 repo.filtered(name).branchmap()
1735 if not filternames or 'unfiltered' in filternames:
1792 if not filternames or 'unfiltered' in filternames:
1736 # add unfiltered
1793 # add unfiltered
1737 allfilters.append(None)
1794 allfilters.append(None)
1738
1795
1739 branchcacheread = safeattrsetter(branchmap, 'read')
1796 branchcacheread = safeattrsetter(branchmap, 'read')
1740 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1797 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1741 branchcacheread.set(lambda repo: None)
1798 branchcacheread.set(lambda repo: None)
1742 branchcachewrite.set(lambda bc, repo: None)
1799 branchcachewrite.set(lambda bc, repo: None)
1743 try:
1800 try:
1744 for name in allfilters:
1801 for name in allfilters:
1745 printname = name
1802 printname = name
1746 if name is None:
1803 if name is None:
1747 printname = 'unfiltered'
1804 printname = 'unfiltered'
1748 timer(getbranchmap(name), title=str(printname))
1805 timer(getbranchmap(name), title=str(printname))
1749 finally:
1806 finally:
1750 branchcacheread.restore()
1807 branchcacheread.restore()
1751 branchcachewrite.restore()
1808 branchcachewrite.restore()
1752 fm.end()
1809 fm.end()
1753
1810
1754 @command('perfbranchmapload', [
1811 @command('perfbranchmapload', [
1755 ('f', 'filter', '', 'Specify repoview filter'),
1812 ('f', 'filter', '', 'Specify repoview filter'),
1756 ('', 'list', False, 'List brachmap filter caches'),
1813 ('', 'list', False, 'List brachmap filter caches'),
1757 ] + formatteropts)
1814 ] + formatteropts)
1758 def perfbranchmapread(ui, repo, filter='', list=False, **opts):
1815 def perfbranchmapread(ui, repo, filter='', list=False, **opts):
1759 """benchmark reading the branchmap"""
1816 """benchmark reading the branchmap"""
1760 if list:
1817 if list:
1761 for name, kind, st in repo.cachevfs.readdir(stat=True):
1818 for name, kind, st in repo.cachevfs.readdir(stat=True):
1762 if name.startswith('branch2'):
1819 if name.startswith('branch2'):
1763 filtername = name.partition('-')[2] or 'unfiltered'
1820 filtername = name.partition('-')[2] or 'unfiltered'
1764 ui.status('%s - %s\n'
1821 ui.status('%s - %s\n'
1765 % (filtername, util.bytecount(st.st_size)))
1822 % (filtername, util.bytecount(st.st_size)))
1766 return
1823 return
1767 if filter:
1824 if filter:
1768 repo = repoview.repoview(repo, filter)
1825 repo = repoview.repoview(repo, filter)
1769 else:
1826 else:
1770 repo = repo.unfiltered()
1827 repo = repo.unfiltered()
1771 # try once without timer, the filter may not be cached
1828 # try once without timer, the filter may not be cached
1772 if branchmap.read(repo) is None:
1829 if branchmap.read(repo) is None:
1773 raise error.Abort('No brachmap cached for %s repo'
1830 raise error.Abort('No brachmap cached for %s repo'
1774 % (filter or 'unfiltered'))
1831 % (filter or 'unfiltered'))
1775 timer, fm = gettimer(ui, opts)
1832 timer, fm = gettimer(ui, opts)
1776 timer(lambda: branchmap.read(repo) and None)
1833 timer(lambda: branchmap.read(repo) and None)
1777 fm.end()
1834 fm.end()
1778
1835
1779 @command('perfloadmarkers')
1836 @command('perfloadmarkers')
1780 def perfloadmarkers(ui, repo):
1837 def perfloadmarkers(ui, repo):
1781 """benchmark the time to parse the on-disk markers for a repo
1838 """benchmark the time to parse the on-disk markers for a repo
1782
1839
1783 Result is the number of markers in the repo."""
1840 Result is the number of markers in the repo."""
1784 timer, fm = gettimer(ui)
1841 timer, fm = gettimer(ui)
1785 svfs = getsvfs(repo)
1842 svfs = getsvfs(repo)
1786 timer(lambda: len(obsolete.obsstore(svfs)))
1843 timer(lambda: len(obsolete.obsstore(svfs)))
1787 fm.end()
1844 fm.end()
1788
1845
1789 @command('perflrucachedict', formatteropts +
1846 @command('perflrucachedict', formatteropts +
1790 [('', 'size', 4, 'size of cache'),
1847 [('', 'size', 4, 'size of cache'),
1791 ('', 'gets', 10000, 'number of key lookups'),
1848 ('', 'gets', 10000, 'number of key lookups'),
1792 ('', 'sets', 10000, 'number of key sets'),
1849 ('', 'sets', 10000, 'number of key sets'),
1793 ('', 'mixed', 10000, 'number of mixed mode operations'),
1850 ('', 'mixed', 10000, 'number of mixed mode operations'),
1794 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1851 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1795 norepo=True)
1852 norepo=True)
1796 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1853 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1797 mixedgetfreq=50, **opts):
1854 mixedgetfreq=50, **opts):
1798 def doinit():
1855 def doinit():
1799 for i in xrange(10000):
1856 for i in xrange(10000):
1800 util.lrucachedict(size)
1857 util.lrucachedict(size)
1801
1858
1802 values = []
1859 values = []
1803 for i in xrange(size):
1860 for i in xrange(size):
1804 values.append(random.randint(0, sys.maxint))
1861 values.append(random.randint(0, sys.maxint))
1805
1862
1806 # Get mode fills the cache and tests raw lookup performance with no
1863 # Get mode fills the cache and tests raw lookup performance with no
1807 # eviction.
1864 # eviction.
1808 getseq = []
1865 getseq = []
1809 for i in xrange(gets):
1866 for i in xrange(gets):
1810 getseq.append(random.choice(values))
1867 getseq.append(random.choice(values))
1811
1868
1812 def dogets():
1869 def dogets():
1813 d = util.lrucachedict(size)
1870 d = util.lrucachedict(size)
1814 for v in values:
1871 for v in values:
1815 d[v] = v
1872 d[v] = v
1816 for key in getseq:
1873 for key in getseq:
1817 value = d[key]
1874 value = d[key]
1818 value # silence pyflakes warning
1875 value # silence pyflakes warning
1819
1876
1820 # Set mode tests insertion speed with cache eviction.
1877 # Set mode tests insertion speed with cache eviction.
1821 setseq = []
1878 setseq = []
1822 for i in xrange(sets):
1879 for i in xrange(sets):
1823 setseq.append(random.randint(0, sys.maxint))
1880 setseq.append(random.randint(0, sys.maxint))
1824
1881
1825 def dosets():
1882 def dosets():
1826 d = util.lrucachedict(size)
1883 d = util.lrucachedict(size)
1827 for v in setseq:
1884 for v in setseq:
1828 d[v] = v
1885 d[v] = v
1829
1886
1830 # Mixed mode randomly performs gets and sets with eviction.
1887 # Mixed mode randomly performs gets and sets with eviction.
1831 mixedops = []
1888 mixedops = []
1832 for i in xrange(mixed):
1889 for i in xrange(mixed):
1833 r = random.randint(0, 100)
1890 r = random.randint(0, 100)
1834 if r < mixedgetfreq:
1891 if r < mixedgetfreq:
1835 op = 0
1892 op = 0
1836 else:
1893 else:
1837 op = 1
1894 op = 1
1838
1895
1839 mixedops.append((op, random.randint(0, size * 2)))
1896 mixedops.append((op, random.randint(0, size * 2)))
1840
1897
1841 def domixed():
1898 def domixed():
1842 d = util.lrucachedict(size)
1899 d = util.lrucachedict(size)
1843
1900
1844 for op, v in mixedops:
1901 for op, v in mixedops:
1845 if op == 0:
1902 if op == 0:
1846 try:
1903 try:
1847 d[v]
1904 d[v]
1848 except KeyError:
1905 except KeyError:
1849 pass
1906 pass
1850 else:
1907 else:
1851 d[v] = v
1908 d[v] = v
1852
1909
1853 benches = [
1910 benches = [
1854 (doinit, 'init'),
1911 (doinit, 'init'),
1855 (dogets, 'gets'),
1912 (dogets, 'gets'),
1856 (dosets, 'sets'),
1913 (dosets, 'sets'),
1857 (domixed, 'mixed')
1914 (domixed, 'mixed')
1858 ]
1915 ]
1859
1916
1860 for fn, title in benches:
1917 for fn, title in benches:
1861 timer, fm = gettimer(ui, opts)
1918 timer, fm = gettimer(ui, opts)
1862 timer(fn, title=title)
1919 timer(fn, title=title)
1863 fm.end()
1920 fm.end()
1864
1921
1865 @command('perfwrite', formatteropts)
1922 @command('perfwrite', formatteropts)
1866 def perfwrite(ui, repo, **opts):
1923 def perfwrite(ui, repo, **opts):
1867 """microbenchmark ui.write
1924 """microbenchmark ui.write
1868 """
1925 """
1869 timer, fm = gettimer(ui, opts)
1926 timer, fm = gettimer(ui, opts)
1870 def write():
1927 def write():
1871 for i in range(100000):
1928 for i in range(100000):
1872 ui.write(('Testing write performance\n'))
1929 ui.write(('Testing write performance\n'))
1873 timer(write)
1930 timer(write)
1874 fm.end()
1931 fm.end()
1875
1932
1876 def uisetup(ui):
1933 def uisetup(ui):
1877 if (util.safehasattr(cmdutil, 'openrevlog') and
1934 if (util.safehasattr(cmdutil, 'openrevlog') and
1878 not util.safehasattr(commands, 'debugrevlogopts')):
1935 not util.safehasattr(commands, 'debugrevlogopts')):
1879 # for "historical portability":
1936 # for "historical portability":
1880 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1937 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1881 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1938 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1882 # openrevlog() should cause failure, because it has been
1939 # openrevlog() should cause failure, because it has been
1883 # available since 3.5 (or 49c583ca48c4).
1940 # available since 3.5 (or 49c583ca48c4).
1884 def openrevlog(orig, repo, cmd, file_, opts):
1941 def openrevlog(orig, repo, cmd, file_, opts):
1885 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1942 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1886 raise error.Abort("This version doesn't support --dir option",
1943 raise error.Abort("This version doesn't support --dir option",
1887 hint="use 3.5 or later")
1944 hint="use 3.5 or later")
1888 return orig(repo, cmd, file_, opts)
1945 return orig(repo, cmd, file_, opts)
1889 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
1946 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,698 +1,728 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 from __future__ import absolute_import
103 from __future__ import absolute_import
104
104
105 import errno
105 import errno
106 import struct
106 import struct
107
107
108 from .i18n import _
108 from .i18n import _
109 from .node import (
109 from .node import (
110 bin,
110 bin,
111 hex,
111 hex,
112 nullid,
112 nullid,
113 nullrev,
113 nullrev,
114 short,
114 short,
115 )
115 )
116 from . import (
116 from . import (
117 error,
117 error,
118 pycompat,
118 pycompat,
119 smartset,
119 smartset,
120 txnutil,
120 txnutil,
121 util,
121 util,
122 )
122 )
123
123
124 _fphasesentry = struct.Struct('>i20s')
124 _fphasesentry = struct.Struct('>i20s')
125
125
126 allphases = public, draft, secret = range(3)
126 allphases = public, draft, secret = range(3)
127 trackedphases = allphases[1:]
127 trackedphases = allphases[1:]
128 phasenames = ['public', 'draft', 'secret']
128 phasenames = ['public', 'draft', 'secret']
129 mutablephases = tuple(allphases[1:])
129 mutablephases = tuple(allphases[1:])
130 remotehiddenphases = tuple(allphases[2:])
130 remotehiddenphases = tuple(allphases[2:])
131
131
132 def _readroots(repo, phasedefaults=None):
132 def _readroots(repo, phasedefaults=None):
133 """Read phase roots from disk
133 """Read phase roots from disk
134
134
135 phasedefaults is a list of fn(repo, roots) callable, which are
135 phasedefaults is a list of fn(repo, roots) callable, which are
136 executed if the phase roots file does not exist. When phases are
136 executed if the phase roots file does not exist. When phases are
137 being initialized on an existing repository, this could be used to
137 being initialized on an existing repository, this could be used to
138 set selected changesets phase to something else than public.
138 set selected changesets phase to something else than public.
139
139
140 Return (roots, dirty) where dirty is true if roots differ from
140 Return (roots, dirty) where dirty is true if roots differ from
141 what is being stored.
141 what is being stored.
142 """
142 """
143 repo = repo.unfiltered()
143 repo = repo.unfiltered()
144 dirty = False
144 dirty = False
145 roots = [set() for i in allphases]
145 roots = [set() for i in allphases]
146 try:
146 try:
147 f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
147 f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
148 try:
148 try:
149 for line in f:
149 for line in f:
150 phase, nh = line.split()
150 phase, nh = line.split()
151 roots[int(phase)].add(bin(nh))
151 roots[int(phase)].add(bin(nh))
152 finally:
152 finally:
153 f.close()
153 f.close()
154 except IOError as inst:
154 except IOError as inst:
155 if inst.errno != errno.ENOENT:
155 if inst.errno != errno.ENOENT:
156 raise
156 raise
157 if phasedefaults:
157 if phasedefaults:
158 for f in phasedefaults:
158 for f in phasedefaults:
159 roots = f(repo, roots)
159 roots = f(repo, roots)
160 dirty = True
160 dirty = True
161 return roots, dirty
161 return roots, dirty
162
162
163 def binaryencode(phasemapping):
163 def binaryencode(phasemapping):
164 """encode a 'phase -> nodes' mapping into a binary stream
164 """encode a 'phase -> nodes' mapping into a binary stream
165
165
166 Since phases are integer the mapping is actually a python list:
166 Since phases are integer the mapping is actually a python list:
167 [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
167 [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
168 """
168 """
169 binarydata = []
169 binarydata = []
170 for phase, nodes in enumerate(phasemapping):
170 for phase, nodes in enumerate(phasemapping):
171 for head in nodes:
171 for head in nodes:
172 binarydata.append(_fphasesentry.pack(phase, head))
172 binarydata.append(_fphasesentry.pack(phase, head))
173 return ''.join(binarydata)
173 return ''.join(binarydata)
174
174
175 def binarydecode(stream):
175 def binarydecode(stream):
176 """decode a binary stream into a 'phase -> nodes' mapping
176 """decode a binary stream into a 'phase -> nodes' mapping
177
177
178 Since phases are integer the mapping is actually a python list."""
178 Since phases are integer the mapping is actually a python list."""
179 headsbyphase = [[] for i in allphases]
179 headsbyphase = [[] for i in allphases]
180 entrysize = _fphasesentry.size
180 entrysize = _fphasesentry.size
181 while True:
181 while True:
182 entry = stream.read(entrysize)
182 entry = stream.read(entrysize)
183 if len(entry) < entrysize:
183 if len(entry) < entrysize:
184 if entry:
184 if entry:
185 raise error.Abort(_('bad phase-heads stream'))
185 raise error.Abort(_('bad phase-heads stream'))
186 break
186 break
187 phase, node = _fphasesentry.unpack(entry)
187 phase, node = _fphasesentry.unpack(entry)
188 headsbyphase[phase].append(node)
188 headsbyphase[phase].append(node)
189 return headsbyphase
189 return headsbyphase
190
190
191 def _trackphasechange(data, rev, old, new):
191 def _trackphasechange(data, rev, old, new):
192 """add a phase move the <data> dictionnary
192 """add a phase move the <data> dictionnary
193
193
194 If data is None, nothing happens.
194 If data is None, nothing happens.
195 """
195 """
196 if data is None:
196 if data is None:
197 return
197 return
198 existing = data.get(rev)
198 existing = data.get(rev)
199 if existing is not None:
199 if existing is not None:
200 old = existing[0]
200 old = existing[0]
201 data[rev] = (old, new)
201 data[rev] = (old, new)
202
202
203 class phasecache(object):
203 class phasecache(object):
204 def __init__(self, repo, phasedefaults, _load=True):
204 def __init__(self, repo, phasedefaults, _load=True):
205 if _load:
205 if _load:
206 # Cheap trick to allow shallow-copy without copy module
206 # Cheap trick to allow shallow-copy without copy module
207 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
207 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
208 self._loadedrevslen = 0
208 self._loadedrevslen = 0
209 self._phasesets = None
209 self._phasesets = None
210 self.filterunknown(repo)
210 self.filterunknown(repo)
211 self.opener = repo.svfs
211 self.opener = repo.svfs
212
212
213 def getrevset(self, repo, phases, subset=None):
213 def getrevset(self, repo, phases, subset=None):
214 """return a smartset for the given phases"""
214 """return a smartset for the given phases"""
215 self.loadphaserevs(repo) # ensure phase's sets are loaded
215 self.loadphaserevs(repo) # ensure phase's sets are loaded
216 phases = set(phases)
216 phases = set(phases)
217 if public not in phases:
217 if public not in phases:
218 # fast path: _phasesets contains the interesting sets,
218 # fast path: _phasesets contains the interesting sets,
219 # might only need a union and post-filtering.
219 # might only need a union and post-filtering.
220 if len(phases) == 1:
220 if len(phases) == 1:
221 [p] = phases
221 [p] = phases
222 revs = self._phasesets[p]
222 revs = self._phasesets[p]
223 else:
223 else:
224 revs = set.union(*[self._phasesets[p] for p in phases])
224 revs = set.union(*[self._phasesets[p] for p in phases])
225 if repo.changelog.filteredrevs:
225 if repo.changelog.filteredrevs:
226 revs = revs - repo.changelog.filteredrevs
226 revs = revs - repo.changelog.filteredrevs
227 if subset is None:
227 if subset is None:
228 return smartset.baseset(revs)
228 return smartset.baseset(revs)
229 else:
229 else:
230 return subset & smartset.baseset(revs)
230 return subset & smartset.baseset(revs)
231 else:
231 else:
232 phases = set(allphases).difference(phases)
232 phases = set(allphases).difference(phases)
233 if not phases:
233 if not phases:
234 return smartset.fullreposet(repo)
234 return smartset.fullreposet(repo)
235 if len(phases) == 1:
235 if len(phases) == 1:
236 [p] = phases
236 [p] = phases
237 revs = self._phasesets[p]
237 revs = self._phasesets[p]
238 else:
238 else:
239 revs = set.union(*[self._phasesets[p] for p in phases])
239 revs = set.union(*[self._phasesets[p] for p in phases])
240 if subset is None:
240 if subset is None:
241 subset = smartset.fullreposet(repo)
241 subset = smartset.fullreposet(repo)
242 if not revs:
242 if not revs:
243 return subset
243 return subset
244 return subset.filter(lambda r: r not in revs)
244 return subset.filter(lambda r: r not in revs)
245
245
246 def copy(self):
246 def copy(self):
247 # Shallow copy meant to ensure isolation in
247 # Shallow copy meant to ensure isolation in
248 # advance/retractboundary(), nothing more.
248 # advance/retractboundary(), nothing more.
249 ph = self.__class__(None, None, _load=False)
249 ph = self.__class__(None, None, _load=False)
250 ph.phaseroots = self.phaseroots[:]
250 ph.phaseroots = self.phaseroots[:]
251 ph.dirty = self.dirty
251 ph.dirty = self.dirty
252 ph.opener = self.opener
252 ph.opener = self.opener
253 ph._loadedrevslen = self._loadedrevslen
253 ph._loadedrevslen = self._loadedrevslen
254 ph._phasesets = self._phasesets
254 ph._phasesets = self._phasesets
255 return ph
255 return ph
256
256
257 def replace(self, phcache):
257 def replace(self, phcache):
258 """replace all values in 'self' with content of phcache"""
258 """replace all values in 'self' with content of phcache"""
259 for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
259 for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
260 '_phasesets'):
260 '_phasesets'):
261 setattr(self, a, getattr(phcache, a))
261 setattr(self, a, getattr(phcache, a))
262
262
263 def _getphaserevsnative(self, repo):
263 def _getphaserevsnative(self, repo):
264 repo = repo.unfiltered()
264 repo = repo.unfiltered()
265 nativeroots = []
265 nativeroots = []
266 for phase in trackedphases:
266 for phase in trackedphases:
267 nativeroots.append(pycompat.maplist(repo.changelog.rev,
267 nativeroots.append(pycompat.maplist(repo.changelog.rev,
268 self.phaseroots[phase]))
268 self.phaseroots[phase]))
269 return repo.changelog.computephases(nativeroots)
269 return repo.changelog.computephases(nativeroots)
270
270
271 def _computephaserevspure(self, repo):
271 def _computephaserevspure(self, repo):
272 repo = repo.unfiltered()
272 repo = repo.unfiltered()
273 cl = repo.changelog
273 cl = repo.changelog
274 self._phasesets = [set() for phase in allphases]
274 self._phasesets = [set() for phase in allphases]
275 roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
275 roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
276 if roots:
276 if roots:
277 ps = set(cl.descendants(roots))
277 ps = set(cl.descendants(roots))
278 for root in roots:
278 for root in roots:
279 ps.add(root)
279 ps.add(root)
280 self._phasesets[secret] = ps
280 self._phasesets[secret] = ps
281 roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
281 roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
282 if roots:
282 if roots:
283 ps = set(cl.descendants(roots))
283 ps = set(cl.descendants(roots))
284 for root in roots:
284 for root in roots:
285 ps.add(root)
285 ps.add(root)
286 ps.difference_update(self._phasesets[secret])
286 ps.difference_update(self._phasesets[secret])
287 self._phasesets[draft] = ps
287 self._phasesets[draft] = ps
288 self._loadedrevslen = len(cl)
288 self._loadedrevslen = len(cl)
289
289
290 def loadphaserevs(self, repo):
290 def loadphaserevs(self, repo):
291 """ensure phase information is loaded in the object"""
291 """ensure phase information is loaded in the object"""
292 if self._phasesets is None:
292 if self._phasesets is None:
293 try:
293 try:
294 res = self._getphaserevsnative(repo)
294 res = self._getphaserevsnative(repo)
295 self._loadedrevslen, self._phasesets = res
295 self._loadedrevslen, self._phasesets = res
296 except AttributeError:
296 except AttributeError:
297 self._computephaserevspure(repo)
297 self._computephaserevspure(repo)
298
298
299 def invalidate(self):
299 def invalidate(self):
300 self._loadedrevslen = 0
300 self._loadedrevslen = 0
301 self._phasesets = None
301 self._phasesets = None
302
302
303 def phase(self, repo, rev):
303 def phase(self, repo, rev):
304 # We need a repo argument here to be able to build _phasesets
304 # We need a repo argument here to be able to build _phasesets
305 # if necessary. The repository instance is not stored in
305 # if necessary. The repository instance is not stored in
306 # phasecache to avoid reference cycles. The changelog instance
306 # phasecache to avoid reference cycles. The changelog instance
307 # is not stored because it is a filecache() property and can
307 # is not stored because it is a filecache() property and can
308 # be replaced without us being notified.
308 # be replaced without us being notified.
309 if rev == nullrev:
309 if rev == nullrev:
310 return public
310 return public
311 if rev < nullrev:
311 if rev < nullrev:
312 raise ValueError(_('cannot lookup negative revision'))
312 raise ValueError(_('cannot lookup negative revision'))
313 if rev >= self._loadedrevslen:
313 if rev >= self._loadedrevslen:
314 self.invalidate()
314 self.invalidate()
315 self.loadphaserevs(repo)
315 self.loadphaserevs(repo)
316 for phase in trackedphases:
316 for phase in trackedphases:
317 if rev in self._phasesets[phase]:
317 if rev in self._phasesets[phase]:
318 return phase
318 return phase
319 return public
319 return public
320
320
321 def write(self):
321 def write(self):
322 if not self.dirty:
322 if not self.dirty:
323 return
323 return
324 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
324 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
325 try:
325 try:
326 self._write(f)
326 self._write(f)
327 finally:
327 finally:
328 f.close()
328 f.close()
329
329
330 def _write(self, fp):
330 def _write(self, fp):
331 for phase, roots in enumerate(self.phaseroots):
331 for phase, roots in enumerate(self.phaseroots):
332 for h in sorted(roots):
332 for h in sorted(roots):
333 fp.write('%i %s\n' % (phase, hex(h)))
333 fp.write('%i %s\n' % (phase, hex(h)))
334 self.dirty = False
334 self.dirty = False
335
335
336 def _updateroots(self, phase, newroots, tr):
336 def _updateroots(self, phase, newroots, tr):
337 self.phaseroots[phase] = newroots
337 self.phaseroots[phase] = newroots
338 self.invalidate()
338 self.invalidate()
339 self.dirty = True
339 self.dirty = True
340
340
341 tr.addfilegenerator('phase', ('phaseroots',), self._write)
341 tr.addfilegenerator('phase', ('phaseroots',), self._write)
342 tr.hookargs['phases_moved'] = '1'
342 tr.hookargs['phases_moved'] = '1'
343
343
344 def registernew(self, repo, tr, targetphase, nodes):
344 def registernew(self, repo, tr, targetphase, nodes):
345 repo = repo.unfiltered()
345 repo = repo.unfiltered()
346 self._retractboundary(repo, tr, targetphase, nodes)
346 self._retractboundary(repo, tr, targetphase, nodes)
347 if tr is not None and 'phases' in tr.changes:
347 if tr is not None and 'phases' in tr.changes:
348 phasetracking = tr.changes['phases']
348 phasetracking = tr.changes['phases']
349 torev = repo.changelog.rev
349 torev = repo.changelog.rev
350 phase = self.phase
350 phase = self.phase
351 for n in nodes:
351 for n in nodes:
352 rev = torev(n)
352 rev = torev(n)
353 revphase = phase(repo, rev)
353 revphase = phase(repo, rev)
354 _trackphasechange(phasetracking, rev, None, revphase)
354 _trackphasechange(phasetracking, rev, None, revphase)
355 repo.invalidatevolatilesets()
355 repo.invalidatevolatilesets()
356
356
357 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
357 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
358 """Set all 'nodes' to phase 'targetphase'
358 """Set all 'nodes' to phase 'targetphase'
359
359
360 Nodes with a phase lower than 'targetphase' are not affected.
360 Nodes with a phase lower than 'targetphase' are not affected.
361
361
362 If dryrun is True, no actions will be performed
362 If dryrun is True, no actions will be performed
363
363
364 Returns a set of revs whose phase is changed or should be changed
364 Returns a set of revs whose phase is changed or should be changed
365 """
365 """
366 # Be careful to preserve shallow-copied values: do not update
366 # Be careful to preserve shallow-copied values: do not update
367 # phaseroots values, replace them.
367 # phaseroots values, replace them.
368 if tr is None:
368 if tr is None:
369 phasetracking = None
369 phasetracking = None
370 else:
370 else:
371 phasetracking = tr.changes.get('phases')
371 phasetracking = tr.changes.get('phases')
372
372
373 repo = repo.unfiltered()
373 repo = repo.unfiltered()
374
374
375 changes = set() # set of revisions to be changed
375 changes = set() # set of revisions to be changed
376 delroots = [] # set of root deleted by this path
376 delroots = [] # set of root deleted by this path
377 for phase in pycompat.xrange(targetphase + 1, len(allphases)):
377 for phase in pycompat.xrange(targetphase + 1, len(allphases)):
378 # filter nodes that are not in a compatible phase already
378 # filter nodes that are not in a compatible phase already
379 nodes = [n for n in nodes
379 nodes = [n for n in nodes
380 if self.phase(repo, repo[n].rev()) >= phase]
380 if self.phase(repo, repo[n].rev()) >= phase]
381 if not nodes:
381 if not nodes:
382 break # no roots to move anymore
382 break # no roots to move anymore
383
383
384 olds = self.phaseroots[phase]
384 olds = self.phaseroots[phase]
385
385
386 affected = repo.revs('%ln::%ln', olds, nodes)
386 affected = repo.revs('%ln::%ln', olds, nodes)
387 changes.update(affected)
387 changes.update(affected)
388 if dryrun:
388 if dryrun:
389 continue
389 continue
390 for r in affected:
390 for r in affected:
391 _trackphasechange(phasetracking, r, self.phase(repo, r),
391 _trackphasechange(phasetracking, r, self.phase(repo, r),
392 targetphase)
392 targetphase)
393
393
394 roots = set(ctx.node() for ctx in repo.set(
394 roots = set(ctx.node() for ctx in repo.set(
395 'roots((%ln::) - %ld)', olds, affected))
395 'roots((%ln::) - %ld)', olds, affected))
396 if olds != roots:
396 if olds != roots:
397 self._updateroots(phase, roots, tr)
397 self._updateroots(phase, roots, tr)
398 # some roots may need to be declared for lower phases
398 # some roots may need to be declared for lower phases
399 delroots.extend(olds - roots)
399 delroots.extend(olds - roots)
400 if not dryrun:
400 if not dryrun:
401 # declare deleted root in the target phase
401 # declare deleted root in the target phase
402 if targetphase != 0:
402 if targetphase != 0:
403 self._retractboundary(repo, tr, targetphase, delroots)
403 self._retractboundary(repo, tr, targetphase, delroots)
404 repo.invalidatevolatilesets()
404 repo.invalidatevolatilesets()
405 return changes
405 return changes
406
406
407 def retractboundary(self, repo, tr, targetphase, nodes):
407 def retractboundary(self, repo, tr, targetphase, nodes):
408 oldroots = self.phaseroots[:targetphase + 1]
408 oldroots = self.phaseroots[:targetphase + 1]
409 if tr is None:
409 if tr is None:
410 phasetracking = None
410 phasetracking = None
411 else:
411 else:
412 phasetracking = tr.changes.get('phases')
412 phasetracking = tr.changes.get('phases')
413 repo = repo.unfiltered()
413 repo = repo.unfiltered()
414 if (self._retractboundary(repo, tr, targetphase, nodes)
414 if (self._retractboundary(repo, tr, targetphase, nodes)
415 and phasetracking is not None):
415 and phasetracking is not None):
416
416
417 # find the affected revisions
417 # find the affected revisions
418 new = self.phaseroots[targetphase]
418 new = self.phaseroots[targetphase]
419 old = oldroots[targetphase]
419 old = oldroots[targetphase]
420 affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
420 affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
421
421
422 # find the phase of the affected revision
422 # find the phase of the affected revision
423 for phase in pycompat.xrange(targetphase, -1, -1):
423 for phase in pycompat.xrange(targetphase, -1, -1):
424 if phase:
424 if phase:
425 roots = oldroots[phase]
425 roots = oldroots[phase]
426 revs = set(repo.revs('%ln::%ld', roots, affected))
426 revs = set(repo.revs('%ln::%ld', roots, affected))
427 affected -= revs
427 affected -= revs
428 else: # public phase
428 else: # public phase
429 revs = affected
429 revs = affected
430 for r in revs:
430 for r in revs:
431 _trackphasechange(phasetracking, r, phase, targetphase)
431 _trackphasechange(phasetracking, r, phase, targetphase)
432 repo.invalidatevolatilesets()
432 repo.invalidatevolatilesets()
433
433
434 def _retractboundary(self, repo, tr, targetphase, nodes):
434 def _retractboundary(self, repo, tr, targetphase, nodes):
435 # Be careful to preserve shallow-copied values: do not update
435 # Be careful to preserve shallow-copied values: do not update
436 # phaseroots values, replace them.
436 # phaseroots values, replace them.
437
437
438 repo = repo.unfiltered()
438 repo = repo.unfiltered()
439 currentroots = self.phaseroots[targetphase]
439 currentroots = self.phaseroots[targetphase]
440 finalroots = oldroots = set(currentroots)
440 finalroots = oldroots = set(currentroots)
441 newroots = [n for n in nodes
441 newroots = [n for n in nodes
442 if self.phase(repo, repo[n].rev()) < targetphase]
442 if self.phase(repo, repo[n].rev()) < targetphase]
443 if newroots:
443 if newroots:
444
444
445 if nullid in newroots:
445 if nullid in newroots:
446 raise error.Abort(_('cannot change null revision phase'))
446 raise error.Abort(_('cannot change null revision phase'))
447 currentroots = currentroots.copy()
447 currentroots = currentroots.copy()
448 currentroots.update(newroots)
448 currentroots.update(newroots)
449
449
450 # Only compute new roots for revs above the roots that are being
450 # Only compute new roots for revs above the roots that are being
451 # retracted.
451 # retracted.
452 minnewroot = min(repo[n].rev() for n in newroots)
452 minnewroot = min(repo[n].rev() for n in newroots)
453 aboveroots = [n for n in currentroots
453 aboveroots = [n for n in currentroots
454 if repo[n].rev() >= minnewroot]
454 if repo[n].rev() >= minnewroot]
455 updatedroots = repo.set('roots(%ln::)', aboveroots)
455 updatedroots = repo.set('roots(%ln::)', aboveroots)
456
456
457 finalroots = set(n for n in currentroots if repo[n].rev() <
457 finalroots = set(n for n in currentroots if repo[n].rev() <
458 minnewroot)
458 minnewroot)
459 finalroots.update(ctx.node() for ctx in updatedroots)
459 finalroots.update(ctx.node() for ctx in updatedroots)
460 if finalroots != oldroots:
460 if finalroots != oldroots:
461 self._updateroots(targetphase, finalroots, tr)
461 self._updateroots(targetphase, finalroots, tr)
462 return True
462 return True
463 return False
463 return False
464
464
465 def filterunknown(self, repo):
465 def filterunknown(self, repo):
466 """remove unknown nodes from the phase boundary
466 """remove unknown nodes from the phase boundary
467
467
468 Nothing is lost as unknown nodes only hold data for their descendants.
468 Nothing is lost as unknown nodes only hold data for their descendants.
469 """
469 """
470 filtered = False
470 filtered = False
471 nodemap = repo.changelog.nodemap # to filter unknown nodes
471 nodemap = repo.changelog.nodemap # to filter unknown nodes
472 for phase, nodes in enumerate(self.phaseroots):
472 for phase, nodes in enumerate(self.phaseroots):
473 missing = sorted(node for node in nodes if node not in nodemap)
473 missing = sorted(node for node in nodes if node not in nodemap)
474 if missing:
474 if missing:
475 for mnode in missing:
475 for mnode in missing:
476 repo.ui.debug(
476 repo.ui.debug(
477 'removing unknown node %s from %i-phase boundary\n'
477 'removing unknown node %s from %i-phase boundary\n'
478 % (short(mnode), phase))
478 % (short(mnode), phase))
479 nodes.symmetric_difference_update(missing)
479 nodes.symmetric_difference_update(missing)
480 filtered = True
480 filtered = True
481 if filtered:
481 if filtered:
482 self.dirty = True
482 self.dirty = True
483 # filterunknown is called by repo.destroyed, we may have no changes in
483 # filterunknown is called by repo.destroyed, we may have no changes in
484 # root but _phasesets contents is certainly invalid (or at least we
484 # root but _phasesets contents is certainly invalid (or at least we
485 # have not proper way to check that). related to issue 3858.
485 # have not proper way to check that). related to issue 3858.
486 #
486 #
487 # The other caller is __init__ that have no _phasesets initialized
487 # The other caller is __init__ that have no _phasesets initialized
488 # anyway. If this change we should consider adding a dedicated
488 # anyway. If this change we should consider adding a dedicated
489 # "destroyed" function to phasecache or a proper cache key mechanism
489 # "destroyed" function to phasecache or a proper cache key mechanism
490 # (see branchmap one)
490 # (see branchmap one)
491 self.invalidate()
491 self.invalidate()
492
492
493 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
493 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
494 """Add nodes to a phase changing other nodes phases if necessary.
494 """Add nodes to a phase changing other nodes phases if necessary.
495
495
496 This function move boundary *forward* this means that all nodes
496 This function move boundary *forward* this means that all nodes
497 are set in the target phase or kept in a *lower* phase.
497 are set in the target phase or kept in a *lower* phase.
498
498
499 Simplify boundary to contains phase roots only.
499 Simplify boundary to contains phase roots only.
500
500
501 If dryrun is True, no actions will be performed
501 If dryrun is True, no actions will be performed
502
502
503 Returns a set of revs whose phase is changed or should be changed
503 Returns a set of revs whose phase is changed or should be changed
504 """
504 """
505 phcache = repo._phasecache.copy()
505 phcache = repo._phasecache.copy()
506 changes = phcache.advanceboundary(repo, tr, targetphase, nodes,
506 changes = phcache.advanceboundary(repo, tr, targetphase, nodes,
507 dryrun=dryrun)
507 dryrun=dryrun)
508 if not dryrun:
508 if not dryrun:
509 repo._phasecache.replace(phcache)
509 repo._phasecache.replace(phcache)
510 return changes
510 return changes
511
511
512 def retractboundary(repo, tr, targetphase, nodes):
512 def retractboundary(repo, tr, targetphase, nodes):
513 """Set nodes back to a phase changing other nodes phases if
513 """Set nodes back to a phase changing other nodes phases if
514 necessary.
514 necessary.
515
515
516 This function move boundary *backward* this means that all nodes
516 This function move boundary *backward* this means that all nodes
517 are set in the target phase or kept in a *higher* phase.
517 are set in the target phase or kept in a *higher* phase.
518
518
519 Simplify boundary to contains phase roots only."""
519 Simplify boundary to contains phase roots only."""
520 phcache = repo._phasecache.copy()
520 phcache = repo._phasecache.copy()
521 phcache.retractboundary(repo, tr, targetphase, nodes)
521 phcache.retractboundary(repo, tr, targetphase, nodes)
522 repo._phasecache.replace(phcache)
522 repo._phasecache.replace(phcache)
523
523
524 def registernew(repo, tr, targetphase, nodes):
524 def registernew(repo, tr, targetphase, nodes):
525 """register a new revision and its phase
525 """register a new revision and its phase
526
526
527 Code adding revisions to the repository should use this function to
527 Code adding revisions to the repository should use this function to
528 set new changeset in their target phase (or higher).
528 set new changeset in their target phase (or higher).
529 """
529 """
530 phcache = repo._phasecache.copy()
530 phcache = repo._phasecache.copy()
531 phcache.registernew(repo, tr, targetphase, nodes)
531 phcache.registernew(repo, tr, targetphase, nodes)
532 repo._phasecache.replace(phcache)
532 repo._phasecache.replace(phcache)
533
533
534 def listphases(repo):
534 def listphases(repo):
535 """List phases root for serialization over pushkey"""
535 """List phases root for serialization over pushkey"""
536 # Use ordered dictionary so behavior is deterministic.
536 # Use ordered dictionary so behavior is deterministic.
537 keys = util.sortdict()
537 keys = util.sortdict()
538 value = '%i' % draft
538 value = '%i' % draft
539 cl = repo.unfiltered().changelog
539 cl = repo.unfiltered().changelog
540 for root in repo._phasecache.phaseroots[draft]:
540 for root in repo._phasecache.phaseroots[draft]:
541 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
541 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
542 keys[hex(root)] = value
542 keys[hex(root)] = value
543
543
544 if repo.publishing():
544 if repo.publishing():
545 # Add an extra data to let remote know we are a publishing
545 # Add an extra data to let remote know we are a publishing
546 # repo. Publishing repo can't just pretend they are old repo.
546 # repo. Publishing repo can't just pretend they are old repo.
547 # When pushing to a publishing repo, the client still need to
547 # When pushing to a publishing repo, the client still need to
548 # push phase boundary
548 # push phase boundary
549 #
549 #
550 # Push do not only push changeset. It also push phase data.
550 # Push do not only push changeset. It also push phase data.
551 # New phase data may apply to common changeset which won't be
551 # New phase data may apply to common changeset which won't be
552 # push (as they are common). Here is a very simple example:
552 # push (as they are common). Here is a very simple example:
553 #
553 #
554 # 1) repo A push changeset X as draft to repo B
554 # 1) repo A push changeset X as draft to repo B
555 # 2) repo B make changeset X public
555 # 2) repo B make changeset X public
556 # 3) repo B push to repo A. X is not pushed but the data that
556 # 3) repo B push to repo A. X is not pushed but the data that
557 # X as now public should
557 # X as now public should
558 #
558 #
559 # The server can't handle it on it's own as it has no idea of
559 # The server can't handle it on it's own as it has no idea of
560 # client phase data.
560 # client phase data.
561 keys['publishing'] = 'True'
561 keys['publishing'] = 'True'
562 return keys
562 return keys
563
563
564 def pushphase(repo, nhex, oldphasestr, newphasestr):
564 def pushphase(repo, nhex, oldphasestr, newphasestr):
565 """List phases root for serialization over pushkey"""
565 """List phases root for serialization over pushkey"""
566 repo = repo.unfiltered()
566 repo = repo.unfiltered()
567 with repo.lock():
567 with repo.lock():
568 currentphase = repo[nhex].phase()
568 currentphase = repo[nhex].phase()
569 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
569 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
570 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
570 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
571 if currentphase == oldphase and newphase < oldphase:
571 if currentphase == oldphase and newphase < oldphase:
572 with repo.transaction('pushkey-phase') as tr:
572 with repo.transaction('pushkey-phase') as tr:
573 advanceboundary(repo, tr, newphase, [bin(nhex)])
573 advanceboundary(repo, tr, newphase, [bin(nhex)])
574 return True
574 return True
575 elif currentphase == newphase:
575 elif currentphase == newphase:
576 # raced, but got correct result
576 # raced, but got correct result
577 return True
577 return True
578 else:
578 else:
579 return False
579 return False
580
580
581 def subsetphaseheads(repo, subset):
581 def subsetphaseheads(repo, subset):
582 """Finds the phase heads for a subset of a history
582 """Finds the phase heads for a subset of a history
583
583
584 Returns a list indexed by phase number where each item is a list of phase
584 Returns a list indexed by phase number where each item is a list of phase
585 head nodes.
585 head nodes.
586 """
586 """
587 cl = repo.changelog
587 cl = repo.changelog
588
588
589 headsbyphase = [[] for i in allphases]
589 headsbyphase = [[] for i in allphases]
590 # No need to keep track of secret phase; any heads in the subset that
590 # No need to keep track of secret phase; any heads in the subset that
591 # are not mentioned are implicitly secret.
591 # are not mentioned are implicitly secret.
592 for phase in allphases[:-1]:
592 for phase in allphases[:-1]:
593 revset = "heads(%%ln & %s())" % phasenames[phase]
593 revset = "heads(%%ln & %s())" % phasenames[phase]
594 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
594 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
595 return headsbyphase
595 return headsbyphase
596
596
597 def updatephases(repo, trgetter, headsbyphase):
597 def updatephases(repo, trgetter, headsbyphase):
598 """Updates the repo with the given phase heads"""
598 """Updates the repo with the given phase heads"""
599 # Now advance phase boundaries of all but secret phase
599 # Now advance phase boundaries of all but secret phase
600 #
600 #
601 # run the update (and fetch transaction) only if there are actually things
601 # run the update (and fetch transaction) only if there are actually things
602 # to update. This avoid creating empty transaction during no-op operation.
602 # to update. This avoid creating empty transaction during no-op operation.
603
603
604 for phase in allphases[:-1]:
604 for phase in allphases[:-1]:
605 revset = '%%ln - %s()' % phasenames[phase]
605 revset = '%%ln - %s()' % phasenames[phase]
606 heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
606 heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
607 if heads:
607 if heads:
608 advanceboundary(repo, trgetter(), phase, heads)
608 advanceboundary(repo, trgetter(), phase, heads)
609
609
610 def analyzeremotephases(repo, subset, roots):
610 def analyzeremotephases(repo, subset, roots):
611 """Compute phases heads and root in a subset of node from root dict
611 """Compute phases heads and root in a subset of node from root dict
612
612
613 * subset is heads of the subset
613 * subset is heads of the subset
614 * roots is {<nodeid> => phase} mapping. key and value are string.
614 * roots is {<nodeid> => phase} mapping. key and value are string.
615
615
616 Accept unknown element input
616 Accept unknown element input
617 """
617 """
618 repo = repo.unfiltered()
618 repo = repo.unfiltered()
619 # build list from dictionary
619 # build list from dictionary
620 draftroots = []
620 draftroots = []
621 nodemap = repo.changelog.nodemap # to filter unknown nodes
621 nodemap = repo.changelog.nodemap # to filter unknown nodes
622 for nhex, phase in roots.iteritems():
622 for nhex, phase in roots.iteritems():
623 if nhex == 'publishing': # ignore data related to publish option
623 if nhex == 'publishing': # ignore data related to publish option
624 continue
624 continue
625 node = bin(nhex)
625 node = bin(nhex)
626 phase = int(phase)
626 phase = int(phase)
627 if phase == public:
627 if phase == public:
628 if node != nullid:
628 if node != nullid:
629 repo.ui.warn(_('ignoring inconsistent public root'
629 repo.ui.warn(_('ignoring inconsistent public root'
630 ' from remote: %s\n') % nhex)
630 ' from remote: %s\n') % nhex)
631 elif phase == draft:
631 elif phase == draft:
632 if node in nodemap:
632 if node in nodemap:
633 draftroots.append(node)
633 draftroots.append(node)
634 else:
634 else:
635 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
635 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
636 % (phase, nhex))
636 % (phase, nhex))
637 # compute heads
637 # compute heads
638 publicheads = newheads(repo, subset, draftroots)
638 publicheads = newheads(repo, subset, draftroots)
639 return publicheads, draftroots
639 return publicheads, draftroots
640
640
641 class remotephasessummary(object):
641 class remotephasessummary(object):
642 """summarize phase information on the remote side
642 """summarize phase information on the remote side
643
643
644 :publishing: True is the remote is publishing
644 :publishing: True is the remote is publishing
645 :publicheads: list of remote public phase heads (nodes)
645 :publicheads: list of remote public phase heads (nodes)
646 :draftheads: list of remote draft phase heads (nodes)
646 :draftheads: list of remote draft phase heads (nodes)
647 :draftroots: list of remote draft phase root (nodes)
647 :draftroots: list of remote draft phase root (nodes)
648 """
648 """
649
649
650 def __init__(self, repo, remotesubset, remoteroots):
650 def __init__(self, repo, remotesubset, remoteroots):
651 unfi = repo.unfiltered()
651 unfi = repo.unfiltered()
652 self._allremoteroots = remoteroots
652 self._allremoteroots = remoteroots
653
653
654 self.publishing = remoteroots.get('publishing', False)
654 self.publishing = remoteroots.get('publishing', False)
655
655
656 ana = analyzeremotephases(repo, remotesubset, remoteroots)
656 ana = analyzeremotephases(repo, remotesubset, remoteroots)
657 self.publicheads, self.draftroots = ana
657 self.publicheads, self.draftroots = ana
658 # Get the list of all "heads" revs draft on remote
658 # Get the list of all "heads" revs draft on remote
659 dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
659 dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
660 self.draftheads = [c.node() for c in dheads]
660 self.draftheads = [c.node() for c in dheads]
661
661
662 def newheads(repo, heads, roots):
662 def newheads(repo, heads, roots):
663 """compute new head of a subset minus another
663 """compute new head of a subset minus another
664
664
665 * `heads`: define the first subset
665 * `heads`: define the first subset
666 * `roots`: define the second we subtract from the first"""
666 * `roots`: define the second we subtract from the first"""
667 # prevent an import cycle
668 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
669 from . import dagop
670
667 repo = repo.unfiltered()
671 repo = repo.unfiltered()
668 revs = repo.revs('heads(::%ln - (%ln::%ln))', heads, roots, heads)
672 cl = repo.changelog
669 return pycompat.maplist(repo.changelog.node, revs)
673 rev = cl.nodemap.get
674 if not roots:
675 return heads
676 if not heads or heads == [nullrev]:
677 return []
678 # The logic operated on revisions, convert arguments early for convenience
679 new_heads = set(rev(n) for n in heads if n != nullid)
680 roots = [rev(n) for n in roots]
681 if not heads or not roots:
682 return heads
683 # compute the area we need to remove
684 affected_zone = repo.revs("(%ld::%ld)", roots, new_heads)
685 # heads in the area are no longer heads
686 new_heads.difference_update(affected_zone)
687 # revisions in the area have children outside of it,
688 # They might be new heads
689 candidates = repo.revs("parents(%ld + (%ld and merge())) and not null",
690 roots, affected_zone)
691 candidates -= affected_zone
692 if new_heads or candidates:
693 # remove candidate that are ancestors of other heads
694 new_heads.update(candidates)
695 prunestart = repo.revs("parents(%ld) and not null", new_heads)
696 pruned = dagop.reachableroots(repo, candidates, prunestart)
697 new_heads.difference_update(pruned)
698
699 return pycompat.maplist(cl.node, sorted(new_heads))
670
700
671 def newcommitphase(ui):
701 def newcommitphase(ui):
672 """helper to get the target phase of new commit
702 """helper to get the target phase of new commit
673
703
674 Handle all possible values for the phases.new-commit options.
704 Handle all possible values for the phases.new-commit options.
675
705
676 """
706 """
677 v = ui.config('phases', 'new-commit')
707 v = ui.config('phases', 'new-commit')
678 try:
708 try:
679 return phasenames.index(v)
709 return phasenames.index(v)
680 except ValueError:
710 except ValueError:
681 try:
711 try:
682 return int(v)
712 return int(v)
683 except ValueError:
713 except ValueError:
684 msg = _("phases.new-commit: not a valid phase name ('%s')")
714 msg = _("phases.new-commit: not a valid phase name ('%s')")
685 raise error.ConfigError(msg % v)
715 raise error.ConfigError(msg % v)
686
716
687 def hassecret(repo):
717 def hassecret(repo):
688 """utility function that check if a repo have any secret changeset."""
718 """utility function that check if a repo have any secret changeset."""
689 return bool(repo._phasecache.phaseroots[2])
719 return bool(repo._phasecache.phaseroots[2])
690
720
691 def preparehookargs(node, old, new):
721 def preparehookargs(node, old, new):
692 if old is None:
722 if old is None:
693 old = ''
723 old = ''
694 else:
724 else:
695 old = phasenames[old]
725 old = phasenames[old]
696 return {'node': node,
726 return {'node': node,
697 'oldphase': old,
727 'oldphase': old,
698 'phase': phasenames[new]}
728 'phase': phasenames[new]}
@@ -1,3008 +1,3020 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import heapq
20 import heapq
21 import os
21 import os
22 import re
22 import re
23 import struct
23 import struct
24 import zlib
24 import zlib
25
25
26 # import stuff from node for others to import from revlog
26 # import stuff from node for others to import from revlog
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullid,
30 nullid,
31 nullrev,
31 nullrev,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .thirdparty import (
38 from .thirdparty import (
39 attr,
39 attr,
40 )
40 )
41 from . import (
41 from . import (
42 ancestor,
42 ancestor,
43 error,
43 error,
44 mdiff,
44 mdiff,
45 policy,
45 policy,
46 pycompat,
46 pycompat,
47 templatefilters,
47 templatefilters,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 parsers = policy.importmod(r'parsers')
54 parsers = policy.importmod(r'parsers')
55
55
56 # Aliased for performance.
56 # Aliased for performance.
57 _zlibdecompress = zlib.decompress
57 _zlibdecompress = zlib.decompress
58
58
59 # revlog header flags
59 # revlog header flags
60 REVLOGV0 = 0
60 REVLOGV0 = 0
61 REVLOGV1 = 1
61 REVLOGV1 = 1
62 # Dummy value until file format is finalized.
62 # Dummy value until file format is finalized.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
64 REVLOGV2 = 0xDEAD
64 REVLOGV2 = 0xDEAD
65 FLAG_INLINE_DATA = (1 << 16)
65 FLAG_INLINE_DATA = (1 << 16)
66 FLAG_GENERALDELTA = (1 << 17)
66 FLAG_GENERALDELTA = (1 << 17)
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
68 REVLOG_DEFAULT_FORMAT = REVLOGV1
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
72
72
73 # revlog index flags
73 # revlog index flags
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
77 REVIDX_DEFAULT_FLAGS = 0
77 REVIDX_DEFAULT_FLAGS = 0
78 # stable order in which flags need to be processed and their processors applied
78 # stable order in which flags need to be processed and their processors applied
79 REVIDX_FLAGS_ORDER = [
79 REVIDX_FLAGS_ORDER = [
80 REVIDX_ISCENSORED,
80 REVIDX_ISCENSORED,
81 REVIDX_ELLIPSIS,
81 REVIDX_ELLIPSIS,
82 REVIDX_EXTSTORED,
82 REVIDX_EXTSTORED,
83 ]
83 ]
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
85 # bitmark for flags that could cause rawdata content change
85 # bitmark for flags that could cause rawdata content change
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
87
87
88 # max size of revlog with inline data
88 # max size of revlog with inline data
89 _maxinline = 131072
89 _maxinline = 131072
90 _chunksize = 1048576
90 _chunksize = 1048576
91
91
92 RevlogError = error.RevlogError
92 RevlogError = error.RevlogError
93 LookupError = error.LookupError
93 LookupError = error.LookupError
94 AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
94 AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
95 CensoredNodeError = error.CensoredNodeError
95 CensoredNodeError = error.CensoredNodeError
96 ProgrammingError = error.ProgrammingError
96 ProgrammingError = error.ProgrammingError
97
97
98 # Store flag processors (cf. 'addflagprocessor()' to register)
98 # Store flag processors (cf. 'addflagprocessor()' to register)
99 _flagprocessors = {
99 _flagprocessors = {
100 REVIDX_ISCENSORED: None,
100 REVIDX_ISCENSORED: None,
101 }
101 }
102
102
103 _mdre = re.compile('\1\n')
103 _mdre = re.compile('\1\n')
104 def parsemeta(text):
104 def parsemeta(text):
105 """return (metadatadict, metadatasize)"""
105 """return (metadatadict, metadatasize)"""
106 # text can be buffer, so we can't use .startswith or .index
106 # text can be buffer, so we can't use .startswith or .index
107 if text[:2] != '\1\n':
107 if text[:2] != '\1\n':
108 return None, None
108 return None, None
109 s = _mdre.search(text, 2).start()
109 s = _mdre.search(text, 2).start()
110 mtext = text[2:s]
110 mtext = text[2:s]
111 meta = {}
111 meta = {}
112 for l in mtext.splitlines():
112 for l in mtext.splitlines():
113 k, v = l.split(": ", 1)
113 k, v = l.split(": ", 1)
114 meta[k] = v
114 meta[k] = v
115 return meta, (s + 2)
115 return meta, (s + 2)
116
116
117 def packmeta(meta, text):
117 def packmeta(meta, text):
118 keys = sorted(meta)
118 keys = sorted(meta)
119 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
119 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
120 return "\1\n%s\1\n%s" % (metatext, text)
120 return "\1\n%s\1\n%s" % (metatext, text)
121
121
122 def _censoredtext(text):
122 def _censoredtext(text):
123 m, offs = parsemeta(text)
123 m, offs = parsemeta(text)
124 return m and "censored" in m
124 return m and "censored" in m
125
125
126 def addflagprocessor(flag, processor):
126 def addflagprocessor(flag, processor):
127 """Register a flag processor on a revision data flag.
127 """Register a flag processor on a revision data flag.
128
128
129 Invariant:
129 Invariant:
130 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
130 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
131 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
131 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
132 - Only one flag processor can be registered on a specific flag.
132 - Only one flag processor can be registered on a specific flag.
133 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
133 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
134 following signatures:
134 following signatures:
135 - (read) f(self, rawtext) -> text, bool
135 - (read) f(self, rawtext) -> text, bool
136 - (write) f(self, text) -> rawtext, bool
136 - (write) f(self, text) -> rawtext, bool
137 - (raw) f(self, rawtext) -> bool
137 - (raw) f(self, rawtext) -> bool
138 "text" is presented to the user. "rawtext" is stored in revlog data, not
138 "text" is presented to the user. "rawtext" is stored in revlog data, not
139 directly visible to the user.
139 directly visible to the user.
140 The boolean returned by these transforms is used to determine whether
140 The boolean returned by these transforms is used to determine whether
141 the returned text can be used for hash integrity checking. For example,
141 the returned text can be used for hash integrity checking. For example,
142 if "write" returns False, then "text" is used to generate hash. If
142 if "write" returns False, then "text" is used to generate hash. If
143 "write" returns True, that basically means "rawtext" returned by "write"
143 "write" returns True, that basically means "rawtext" returned by "write"
144 should be used to generate hash. Usually, "write" and "read" return
144 should be used to generate hash. Usually, "write" and "read" return
145 different booleans. And "raw" returns a same boolean as "write".
145 different booleans. And "raw" returns a same boolean as "write".
146
146
147 Note: The 'raw' transform is used for changegroup generation and in some
147 Note: The 'raw' transform is used for changegroup generation and in some
148 debug commands. In this case the transform only indicates whether the
148 debug commands. In this case the transform only indicates whether the
149 contents can be used for hash integrity checks.
149 contents can be used for hash integrity checks.
150 """
150 """
151 if not flag & REVIDX_KNOWN_FLAGS:
151 if not flag & REVIDX_KNOWN_FLAGS:
152 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
152 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
153 raise ProgrammingError(msg)
153 raise ProgrammingError(msg)
154 if flag not in REVIDX_FLAGS_ORDER:
154 if flag not in REVIDX_FLAGS_ORDER:
155 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
155 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
156 raise ProgrammingError(msg)
156 raise ProgrammingError(msg)
157 if flag in _flagprocessors:
157 if flag in _flagprocessors:
158 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
158 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
159 raise error.Abort(msg)
159 raise error.Abort(msg)
160 _flagprocessors[flag] = processor
160 _flagprocessors[flag] = processor
161
161
162 def getoffset(q):
162 def getoffset(q):
163 return int(q >> 16)
163 return int(q >> 16)
164
164
165 def gettype(q):
165 def gettype(q):
166 return int(q & 0xFFFF)
166 return int(q & 0xFFFF)
167
167
168 def offset_type(offset, type):
168 def offset_type(offset, type):
169 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
169 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
170 raise ValueError('unknown revlog index flags')
170 raise ValueError('unknown revlog index flags')
171 return int(int(offset) << 16 | type)
171 return int(int(offset) << 16 | type)
172
172
173 _nullhash = hashlib.sha1(nullid)
173 _nullhash = hashlib.sha1(nullid)
174
174
175 def hash(text, p1, p2):
175 def hash(text, p1, p2):
176 """generate a hash from the given text and its parent hashes
176 """generate a hash from the given text and its parent hashes
177
177
178 This hash combines both the current file contents and its history
178 This hash combines both the current file contents and its history
179 in a manner that makes it easy to distinguish nodes with the same
179 in a manner that makes it easy to distinguish nodes with the same
180 content in the revision graph.
180 content in the revision graph.
181 """
181 """
182 # As of now, if one of the parent node is null, p2 is null
182 # As of now, if one of the parent node is null, p2 is null
183 if p2 == nullid:
183 if p2 == nullid:
184 # deep copy of a hash is faster than creating one
184 # deep copy of a hash is faster than creating one
185 s = _nullhash.copy()
185 s = _nullhash.copy()
186 s.update(p1)
186 s.update(p1)
187 else:
187 else:
188 # none of the parent nodes are nullid
188 # none of the parent nodes are nullid
189 if p1 < p2:
189 if p1 < p2:
190 a = p1
190 a = p1
191 b = p2
191 b = p2
192 else:
192 else:
193 a = p2
193 a = p2
194 b = p1
194 b = p1
195 s = hashlib.sha1(a)
195 s = hashlib.sha1(a)
196 s.update(b)
196 s.update(b)
197 s.update(text)
197 s.update(text)
198 return s.digest()
198 return s.digest()
199
199
200 class _testrevlog(object):
200 class _testrevlog(object):
201 """minimalist fake revlog to use in doctests"""
201 """minimalist fake revlog to use in doctests"""
202
202
203 def __init__(self, data, density=0.5, mingap=0):
203 def __init__(self, data, density=0.5, mingap=0):
204 """data is an list of revision payload boundaries"""
204 """data is an list of revision payload boundaries"""
205 self._data = data
205 self._data = data
206 self._srdensitythreshold = density
206 self._srdensitythreshold = density
207 self._srmingapsize = mingap
207 self._srmingapsize = mingap
208
208
209 def start(self, rev):
209 def start(self, rev):
210 if rev == 0:
210 if rev == 0:
211 return 0
211 return 0
212 return self._data[rev - 1]
212 return self._data[rev - 1]
213
213
214 def end(self, rev):
214 def end(self, rev):
215 return self._data[rev]
215 return self._data[rev]
216
216
217 def length(self, rev):
217 def length(self, rev):
218 return self.end(rev) - self.start(rev)
218 return self.end(rev) - self.start(rev)
219
219
220 def __len__(self):
220 def __len__(self):
221 return len(self._data)
221 return len(self._data)
222
222
223 def _trimchunk(revlog, revs, startidx, endidx=None):
223 def _trimchunk(revlog, revs, startidx, endidx=None):
224 """returns revs[startidx:endidx] without empty trailing revs
224 """returns revs[startidx:endidx] without empty trailing revs
225
225
226 Doctest Setup
226 Doctest Setup
227 >>> revlog = _testrevlog([
227 >>> revlog = _testrevlog([
228 ... 5, #0
228 ... 5, #0
229 ... 10, #1
229 ... 10, #1
230 ... 12, #2
230 ... 12, #2
231 ... 12, #3 (empty)
231 ... 12, #3 (empty)
232 ... 17, #4
232 ... 17, #4
233 ... 21, #5
233 ... 21, #5
234 ... 21, #6 (empty)
234 ... 21, #6 (empty)
235 ... ])
235 ... ])
236
236
237 Contiguous cases:
237 Contiguous cases:
238 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
238 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
239 [0, 1, 2, 3, 4, 5]
239 [0, 1, 2, 3, 4, 5]
240 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
240 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
241 [0, 1, 2, 3, 4]
241 [0, 1, 2, 3, 4]
242 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
242 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
243 [0, 1, 2]
243 [0, 1, 2]
244 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
244 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
245 [2]
245 [2]
246 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
246 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
247 [3, 4, 5]
247 [3, 4, 5]
248 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
248 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
249 [3, 4]
249 [3, 4]
250
250
251 Discontiguous cases:
251 Discontiguous cases:
252 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
252 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
253 [1, 3, 5]
253 [1, 3, 5]
254 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
254 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
255 [1]
255 [1]
256 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
256 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
257 [3, 5]
257 [3, 5]
258 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
258 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
259 [3, 5]
259 [3, 5]
260 """
260 """
261 length = revlog.length
261 length = revlog.length
262
262
263 if endidx is None:
263 if endidx is None:
264 endidx = len(revs)
264 endidx = len(revs)
265
265
266 # Trim empty revs at the end, but never the very first revision of a chain
266 # If we have a non-emtpy delta candidate, there are nothing to trim
267 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
267 if revs[endidx - 1] < len(revlog):
268 endidx -= 1
268 # Trim empty revs at the end, except the very first revision of a chain
269 while (endidx > 1
270 and endidx > startidx
271 and length(revs[endidx - 1]) == 0):
272 endidx -= 1
269
273
270 return revs[startidx:endidx]
274 return revs[startidx:endidx]
271
275
272 def _segmentspan(revlog, revs):
276 def _segmentspan(revlog, revs, deltainfo=None):
273 """Get the byte span of a segment of revisions
277 """Get the byte span of a segment of revisions
274
278
275 revs is a sorted array of revision numbers
279 revs is a sorted array of revision numbers
276
280
277 >>> revlog = _testrevlog([
281 >>> revlog = _testrevlog([
278 ... 5, #0
282 ... 5, #0
279 ... 10, #1
283 ... 10, #1
280 ... 12, #2
284 ... 12, #2
281 ... 12, #3 (empty)
285 ... 12, #3 (empty)
282 ... 17, #4
286 ... 17, #4
283 ... ])
287 ... ])
284
288
285 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
289 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
286 17
290 17
287 >>> _segmentspan(revlog, [0, 4])
291 >>> _segmentspan(revlog, [0, 4])
288 17
292 17
289 >>> _segmentspan(revlog, [3, 4])
293 >>> _segmentspan(revlog, [3, 4])
290 5
294 5
291 >>> _segmentspan(revlog, [1, 2, 3,])
295 >>> _segmentspan(revlog, [1, 2, 3,])
292 7
296 7
293 >>> _segmentspan(revlog, [1, 3])
297 >>> _segmentspan(revlog, [1, 3])
294 7
298 7
295 """
299 """
296 if not revs:
300 if not revs:
297 return 0
301 return 0
298 return revlog.end(revs[-1]) - revlog.start(revs[0])
302 if deltainfo is not None and len(revlog) <= revs[-1]:
303 if len(revs) == 1:
304 return deltainfo.deltalen
305 offset = revlog.end(len(revlog) - 1)
306 end = deltainfo.deltalen + offset
307 else:
308 end = revlog.end(revs[-1])
309 return end - revlog.start(revs[0])
299
310
300 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
311 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
301 """slice revs to reduce the amount of unrelated data to be read from disk.
312 """slice revs to reduce the amount of unrelated data to be read from disk.
302
313
303 ``revs`` is sliced into groups that should be read in one time.
314 ``revs`` is sliced into groups that should be read in one time.
304 Assume that revs are sorted.
315 Assume that revs are sorted.
305
316
306 The initial chunk is sliced until the overall density (payload/chunks-span
317 The initial chunk is sliced until the overall density (payload/chunks-span
307 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
318 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
308 `revlog._srmingapsize` is skipped.
319 `revlog._srmingapsize` is skipped.
309
320
310 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
321 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
311 For consistency with other slicing choice, this limit won't go lower than
322 For consistency with other slicing choice, this limit won't go lower than
312 `revlog._srmingapsize`.
323 `revlog._srmingapsize`.
313
324
314 If individual revisions chunk are larger than this limit, they will still
325 If individual revisions chunk are larger than this limit, they will still
315 be raised individually.
326 be raised individually.
316
327
317 >>> revlog = _testrevlog([
328 >>> revlog = _testrevlog([
318 ... 5, #00 (5)
329 ... 5, #00 (5)
319 ... 10, #01 (5)
330 ... 10, #01 (5)
320 ... 12, #02 (2)
331 ... 12, #02 (2)
321 ... 12, #03 (empty)
332 ... 12, #03 (empty)
322 ... 27, #04 (15)
333 ... 27, #04 (15)
323 ... 31, #05 (4)
334 ... 31, #05 (4)
324 ... 31, #06 (empty)
335 ... 31, #06 (empty)
325 ... 42, #07 (11)
336 ... 42, #07 (11)
326 ... 47, #08 (5)
337 ... 47, #08 (5)
327 ... 47, #09 (empty)
338 ... 47, #09 (empty)
328 ... 48, #10 (1)
339 ... 48, #10 (1)
329 ... 51, #11 (3)
340 ... 51, #11 (3)
330 ... 74, #12 (23)
341 ... 74, #12 (23)
331 ... 85, #13 (11)
342 ... 85, #13 (11)
332 ... 86, #14 (1)
343 ... 86, #14 (1)
333 ... 91, #15 (5)
344 ... 91, #15 (5)
334 ... ])
345 ... ])
335
346
336 >>> list(_slicechunk(revlog, list(range(16))))
347 >>> list(_slicechunk(revlog, list(range(16))))
337 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
348 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
338 >>> list(_slicechunk(revlog, [0, 15]))
349 >>> list(_slicechunk(revlog, [0, 15]))
339 [[0], [15]]
350 [[0], [15]]
340 >>> list(_slicechunk(revlog, [0, 11, 15]))
351 >>> list(_slicechunk(revlog, [0, 11, 15]))
341 [[0], [11], [15]]
352 [[0], [11], [15]]
342 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
353 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
343 [[0], [11, 13, 15]]
354 [[0], [11, 13, 15]]
344 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
355 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
345 [[1, 2], [5, 8, 10, 11], [14]]
356 [[1, 2], [5, 8, 10, 11], [14]]
346
357
347 Slicing with a maximum chunk size
358 Slicing with a maximum chunk size
348 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
359 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
349 [[0], [11], [13], [15]]
360 [[0], [11], [13], [15]]
350 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
361 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
351 [[0], [11], [13, 15]]
362 [[0], [11], [13, 15]]
352 """
363 """
353 if targetsize is not None:
364 if targetsize is not None:
354 targetsize = max(targetsize, revlog._srmingapsize)
365 targetsize = max(targetsize, revlog._srmingapsize)
355 # targetsize should not be specified when evaluating delta candidates:
366 # targetsize should not be specified when evaluating delta candidates:
356 # * targetsize is used to ensure we stay within specification when reading,
367 # * targetsize is used to ensure we stay within specification when reading,
357 # * deltainfo is used to pick are good delta chain when writing.
368 # * deltainfo is used to pick are good delta chain when writing.
358 if not (deltainfo is None or targetsize is None):
369 if not (deltainfo is None or targetsize is None):
359 msg = 'cannot use `targetsize` with a `deltainfo`'
370 msg = 'cannot use `targetsize` with a `deltainfo`'
360 raise error.ProgrammingError(msg)
371 raise error.ProgrammingError(msg)
361 for chunk in _slicechunktodensity(revlog, revs,
372 for chunk in _slicechunktodensity(revlog, revs,
362 deltainfo,
373 deltainfo,
363 revlog._srdensitythreshold,
374 revlog._srdensitythreshold,
364 revlog._srmingapsize):
375 revlog._srmingapsize):
365 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
376 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
366 yield subchunk
377 yield subchunk
367
378
368 def _slicechunktosize(revlog, revs, targetsize=None):
379 def _slicechunktosize(revlog, revs, targetsize=None):
369 """slice revs to match the target size
380 """slice revs to match the target size
370
381
371 This is intended to be used on chunk that density slicing selected by that
382 This is intended to be used on chunk that density slicing selected by that
372 are still too large compared to the read garantee of revlog. This might
383 are still too large compared to the read garantee of revlog. This might
373 happens when "minimal gap size" interrupted the slicing or when chain are
384 happens when "minimal gap size" interrupted the slicing or when chain are
374 built in a way that create large blocks next to each other.
385 built in a way that create large blocks next to each other.
375
386
376 >>> revlog = _testrevlog([
387 >>> revlog = _testrevlog([
377 ... 3, #0 (3)
388 ... 3, #0 (3)
378 ... 5, #1 (2)
389 ... 5, #1 (2)
379 ... 6, #2 (1)
390 ... 6, #2 (1)
380 ... 8, #3 (2)
391 ... 8, #3 (2)
381 ... 8, #4 (empty)
392 ... 8, #4 (empty)
382 ... 11, #5 (3)
393 ... 11, #5 (3)
383 ... 12, #6 (1)
394 ... 12, #6 (1)
384 ... 13, #7 (1)
395 ... 13, #7 (1)
385 ... 14, #8 (1)
396 ... 14, #8 (1)
386 ... ])
397 ... ])
387
398
388 Cases where chunk is already small enough
399 Cases where chunk is already small enough
389 >>> list(_slicechunktosize(revlog, [0], 3))
400 >>> list(_slicechunktosize(revlog, [0], 3))
390 [[0]]
401 [[0]]
391 >>> list(_slicechunktosize(revlog, [6, 7], 3))
402 >>> list(_slicechunktosize(revlog, [6, 7], 3))
392 [[6, 7]]
403 [[6, 7]]
393 >>> list(_slicechunktosize(revlog, [0], None))
404 >>> list(_slicechunktosize(revlog, [0], None))
394 [[0]]
405 [[0]]
395 >>> list(_slicechunktosize(revlog, [6, 7], None))
406 >>> list(_slicechunktosize(revlog, [6, 7], None))
396 [[6, 7]]
407 [[6, 7]]
397
408
398 cases where we need actual slicing
409 cases where we need actual slicing
399 >>> list(_slicechunktosize(revlog, [0, 1], 3))
410 >>> list(_slicechunktosize(revlog, [0, 1], 3))
400 [[0], [1]]
411 [[0], [1]]
401 >>> list(_slicechunktosize(revlog, [1, 3], 3))
412 >>> list(_slicechunktosize(revlog, [1, 3], 3))
402 [[1], [3]]
413 [[1], [3]]
403 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
414 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
404 [[1, 2], [3]]
415 [[1, 2], [3]]
405 >>> list(_slicechunktosize(revlog, [3, 5], 3))
416 >>> list(_slicechunktosize(revlog, [3, 5], 3))
406 [[3], [5]]
417 [[3], [5]]
407 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
418 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
408 [[3], [5]]
419 [[3], [5]]
409 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
420 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
410 [[5], [6, 7, 8]]
421 [[5], [6, 7, 8]]
411 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
422 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
412 [[0], [1, 2], [3], [5], [6, 7, 8]]
423 [[0], [1, 2], [3], [5], [6, 7, 8]]
413
424
414 Case with too large individual chunk (must return valid chunk)
425 Case with too large individual chunk (must return valid chunk)
415 >>> list(_slicechunktosize(revlog, [0, 1], 2))
426 >>> list(_slicechunktosize(revlog, [0, 1], 2))
416 [[0], [1]]
427 [[0], [1]]
417 >>> list(_slicechunktosize(revlog, [1, 3], 1))
428 >>> list(_slicechunktosize(revlog, [1, 3], 1))
418 [[1], [3]]
429 [[1], [3]]
419 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
430 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
420 [[3], [5]]
431 [[3], [5]]
421 """
432 """
422 assert targetsize is None or 0 <= targetsize
433 assert targetsize is None or 0 <= targetsize
423 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
434 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
424 yield revs
435 yield revs
425 return
436 return
426
437
427 startrevidx = 0
438 startrevidx = 0
428 startdata = revlog.start(revs[0])
439 startdata = revlog.start(revs[0])
429 endrevidx = 0
440 endrevidx = 0
430 iterrevs = enumerate(revs)
441 iterrevs = enumerate(revs)
431 next(iterrevs) # skip first rev.
442 next(iterrevs) # skip first rev.
432 for idx, r in iterrevs:
443 for idx, r in iterrevs:
433 span = revlog.end(r) - startdata
444 span = revlog.end(r) - startdata
434 if span <= targetsize:
445 if span <= targetsize:
435 endrevidx = idx
446 endrevidx = idx
436 else:
447 else:
437 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
448 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
438 if chunk:
449 if chunk:
439 yield chunk
450 yield chunk
440 startrevidx = idx
451 startrevidx = idx
441 startdata = revlog.start(r)
452 startdata = revlog.start(r)
442 endrevidx = idx
453 endrevidx = idx
443 yield _trimchunk(revlog, revs, startrevidx)
454 yield _trimchunk(revlog, revs, startrevidx)
444
455
445 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
456 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
446 mingapsize=0):
457 mingapsize=0):
447 """slice revs to reduce the amount of unrelated data to be read from disk.
458 """slice revs to reduce the amount of unrelated data to be read from disk.
448
459
449 ``revs`` is sliced into groups that should be read in one time.
460 ``revs`` is sliced into groups that should be read in one time.
450 Assume that revs are sorted.
461 Assume that revs are sorted.
451
462
452 ``deltainfo`` is a _deltainfo instance of a revision that we would append
463 ``deltainfo`` is a _deltainfo instance of a revision that we would append
453 to the top of the revlog.
464 to the top of the revlog.
454
465
455 The initial chunk is sliced until the overall density (payload/chunks-span
466 The initial chunk is sliced until the overall density (payload/chunks-span
456 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
467 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
457 skipped.
468 skipped.
458
469
459 >>> revlog = _testrevlog([
470 >>> revlog = _testrevlog([
460 ... 5, #00 (5)
471 ... 5, #00 (5)
461 ... 10, #01 (5)
472 ... 10, #01 (5)
462 ... 12, #02 (2)
473 ... 12, #02 (2)
463 ... 12, #03 (empty)
474 ... 12, #03 (empty)
464 ... 27, #04 (15)
475 ... 27, #04 (15)
465 ... 31, #05 (4)
476 ... 31, #05 (4)
466 ... 31, #06 (empty)
477 ... 31, #06 (empty)
467 ... 42, #07 (11)
478 ... 42, #07 (11)
468 ... 47, #08 (5)
479 ... 47, #08 (5)
469 ... 47, #09 (empty)
480 ... 47, #09 (empty)
470 ... 48, #10 (1)
481 ... 48, #10 (1)
471 ... 51, #11 (3)
482 ... 51, #11 (3)
472 ... 74, #12 (23)
483 ... 74, #12 (23)
473 ... 85, #13 (11)
484 ... 85, #13 (11)
474 ... 86, #14 (1)
485 ... 86, #14 (1)
475 ... 91, #15 (5)
486 ... 91, #15 (5)
476 ... ])
487 ... ])
477
488
478 >>> list(_slicechunktodensity(revlog, list(range(16))))
489 >>> list(_slicechunktodensity(revlog, list(range(16))))
479 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
490 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
480 >>> list(_slicechunktodensity(revlog, [0, 15]))
491 >>> list(_slicechunktodensity(revlog, [0, 15]))
481 [[0], [15]]
492 [[0], [15]]
482 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
493 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
483 [[0], [11], [15]]
494 [[0], [11], [15]]
484 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
495 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
485 [[0], [11, 13, 15]]
496 [[0], [11, 13, 15]]
486 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
497 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
487 [[1, 2], [5, 8, 10, 11], [14]]
498 [[1, 2], [5, 8, 10, 11], [14]]
488 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
499 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
489 ... mingapsize=20))
500 ... mingapsize=20))
490 [[1, 2, 3, 5, 8, 10, 11], [14]]
501 [[1, 2, 3, 5, 8, 10, 11], [14]]
491 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
502 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
492 ... targetdensity=0.95))
503 ... targetdensity=0.95))
493 [[1, 2], [5], [8, 10, 11], [14]]
504 [[1, 2], [5], [8, 10, 11], [14]]
494 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
505 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
495 ... targetdensity=0.95, mingapsize=12))
506 ... targetdensity=0.95, mingapsize=12))
496 [[1, 2], [5, 8, 10, 11], [14]]
507 [[1, 2], [5, 8, 10, 11], [14]]
497 """
508 """
498 start = revlog.start
509 start = revlog.start
499 length = revlog.length
510 length = revlog.length
500
511
501 if len(revs) <= 1:
512 if len(revs) <= 1:
502 yield revs
513 yield revs
503 return
514 return
504
515
505 nextrev = len(revlog)
516 nextrev = len(revlog)
506 nextoffset = revlog.end(nextrev - 1)
517 nextoffset = revlog.end(nextrev - 1)
507
518
508 if deltainfo is None:
519 if deltainfo is None:
509 deltachainspan = _segmentspan(revlog, revs)
520 deltachainspan = _segmentspan(revlog, revs)
510 chainpayload = sum(length(r) for r in revs)
521 chainpayload = sum(length(r) for r in revs)
511 else:
522 else:
512 deltachainspan = deltainfo.distance
523 deltachainspan = deltainfo.distance
513 chainpayload = deltainfo.compresseddeltalen
524 chainpayload = deltainfo.compresseddeltalen
514
525
515 if deltachainspan < mingapsize:
526 if deltachainspan < mingapsize:
516 yield revs
527 yield revs
517 return
528 return
518
529
519 readdata = deltachainspan
530 readdata = deltachainspan
520
531
521 if deltachainspan:
532 if deltachainspan:
522 density = chainpayload / float(deltachainspan)
533 density = chainpayload / float(deltachainspan)
523 else:
534 else:
524 density = 1.0
535 density = 1.0
525
536
526 if density >= targetdensity:
537 if density >= targetdensity:
527 yield revs
538 yield revs
528 return
539 return
529
540
530 if deltainfo is not None:
541 if deltainfo is not None and deltainfo.deltalen:
531 revs = list(revs)
542 revs = list(revs)
532 revs.append(nextrev)
543 revs.append(nextrev)
533
544
534 # Store the gaps in a heap to have them sorted by decreasing size
545 # Store the gaps in a heap to have them sorted by decreasing size
535 gapsheap = []
546 gapsheap = []
536 heapq.heapify(gapsheap)
547 heapq.heapify(gapsheap)
537 prevend = None
548 prevend = None
538 for i, rev in enumerate(revs):
549 for i, rev in enumerate(revs):
539 if rev < nextrev:
550 if rev < nextrev:
540 revstart = start(rev)
551 revstart = start(rev)
541 revlen = length(rev)
552 revlen = length(rev)
542 else:
553 else:
543 revstart = nextoffset
554 revstart = nextoffset
544 revlen = deltainfo.deltalen
555 revlen = deltainfo.deltalen
545
556
546 # Skip empty revisions to form larger holes
557 # Skip empty revisions to form larger holes
547 if revlen == 0:
558 if revlen == 0:
548 continue
559 continue
549
560
550 if prevend is not None:
561 if prevend is not None:
551 gapsize = revstart - prevend
562 gapsize = revstart - prevend
552 # only consider holes that are large enough
563 # only consider holes that are large enough
553 if gapsize > mingapsize:
564 if gapsize > mingapsize:
554 heapq.heappush(gapsheap, (-gapsize, i))
565 heapq.heappush(gapsheap, (-gapsize, i))
555
566
556 prevend = revstart + revlen
567 prevend = revstart + revlen
557
568
558 # Collect the indices of the largest holes until the density is acceptable
569 # Collect the indices of the largest holes until the density is acceptable
559 indicesheap = []
570 indicesheap = []
560 heapq.heapify(indicesheap)
571 heapq.heapify(indicesheap)
561 while gapsheap and density < targetdensity:
572 while gapsheap and density < targetdensity:
562 oppgapsize, gapidx = heapq.heappop(gapsheap)
573 oppgapsize, gapidx = heapq.heappop(gapsheap)
563
574
564 heapq.heappush(indicesheap, gapidx)
575 heapq.heappush(indicesheap, gapidx)
565
576
566 # the gap sizes are stored as negatives to be sorted decreasingly
577 # the gap sizes are stored as negatives to be sorted decreasingly
567 # by the heap
578 # by the heap
568 readdata -= (-oppgapsize)
579 readdata -= (-oppgapsize)
569 if readdata > 0:
580 if readdata > 0:
570 density = chainpayload / float(readdata)
581 density = chainpayload / float(readdata)
571 else:
582 else:
572 density = 1.0
583 density = 1.0
573
584
574 # Cut the revs at collected indices
585 # Cut the revs at collected indices
575 previdx = 0
586 previdx = 0
576 while indicesheap:
587 while indicesheap:
577 idx = heapq.heappop(indicesheap)
588 idx = heapq.heappop(indicesheap)
578
589
579 chunk = _trimchunk(revlog, revs, previdx, idx)
590 chunk = _trimchunk(revlog, revs, previdx, idx)
580 if chunk:
591 if chunk:
581 yield chunk
592 yield chunk
582
593
583 previdx = idx
594 previdx = idx
584
595
585 chunk = _trimchunk(revlog, revs, previdx)
596 chunk = _trimchunk(revlog, revs, previdx)
586 if chunk:
597 if chunk:
587 yield chunk
598 yield chunk
588
599
589 @attr.s(slots=True, frozen=True)
600 @attr.s(slots=True, frozen=True)
590 class _deltainfo(object):
601 class _deltainfo(object):
591 distance = attr.ib()
602 distance = attr.ib()
592 deltalen = attr.ib()
603 deltalen = attr.ib()
593 data = attr.ib()
604 data = attr.ib()
594 base = attr.ib()
605 base = attr.ib()
595 chainbase = attr.ib()
606 chainbase = attr.ib()
596 chainlen = attr.ib()
607 chainlen = attr.ib()
597 compresseddeltalen = attr.ib()
608 compresseddeltalen = attr.ib()
598
609
599 class _deltacomputer(object):
610 class _deltacomputer(object):
600 def __init__(self, revlog):
611 def __init__(self, revlog):
601 self.revlog = revlog
612 self.revlog = revlog
602
613
603 def _getcandidaterevs(self, p1, p2, cachedelta):
614 def _getcandidaterevs(self, p1, p2, cachedelta):
604 """
615 """
605 Provides revisions that present an interest to be diffed against,
616 Provides revisions that present an interest to be diffed against,
606 grouped by level of easiness.
617 grouped by level of easiness.
607 """
618 """
608 revlog = self.revlog
619 revlog = self.revlog
609 gdelta = revlog._generaldelta
620 gdelta = revlog._generaldelta
610 curr = len(revlog)
621 curr = len(revlog)
611 prev = curr - 1
622 prev = curr - 1
612 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
623 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
613
624
614 # should we try to build a delta?
625 # should we try to build a delta?
615 if prev != nullrev and revlog.storedeltachains:
626 if prev != nullrev and revlog.storedeltachains:
616 tested = set()
627 tested = set()
617 # This condition is true most of the time when processing
628 # This condition is true most of the time when processing
618 # changegroup data into a generaldelta repo. The only time it
629 # changegroup data into a generaldelta repo. The only time it
619 # isn't true is if this is the first revision in a delta chain
630 # isn't true is if this is the first revision in a delta chain
620 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
631 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
621 if cachedelta and gdelta and revlog._lazydeltabase:
632 if cachedelta and gdelta and revlog._lazydeltabase:
622 # Assume what we received from the server is a good choice
633 # Assume what we received from the server is a good choice
623 # build delta will reuse the cache
634 # build delta will reuse the cache
624 yield (cachedelta[0],)
635 yield (cachedelta[0],)
625 tested.add(cachedelta[0])
636 tested.add(cachedelta[0])
626
637
627 if gdelta:
638 if gdelta:
628 # exclude already lazy tested base if any
639 # exclude already lazy tested base if any
629 parents = [p for p in (p1r, p2r)
640 parents = [p for p in (p1r, p2r)
630 if p != nullrev and p not in tested]
641 if p != nullrev and p not in tested]
631
642
632 if not revlog._deltabothparents and len(parents) == 2:
643 if not revlog._deltabothparents and len(parents) == 2:
633 parents.sort()
644 parents.sort()
634 # To minimize the chance of having to build a fulltext,
645 # To minimize the chance of having to build a fulltext,
635 # pick first whichever parent is closest to us (max rev)
646 # pick first whichever parent is closest to us (max rev)
636 yield (parents[1],)
647 yield (parents[1],)
637 # then the other one (min rev) if the first did not fit
648 # then the other one (min rev) if the first did not fit
638 yield (parents[0],)
649 yield (parents[0],)
639 tested.update(parents)
650 tested.update(parents)
640 elif len(parents) > 0:
651 elif len(parents) > 0:
641 # Test all parents (1 or 2), and keep the best candidate
652 # Test all parents (1 or 2), and keep the best candidate
642 yield parents
653 yield parents
643 tested.update(parents)
654 tested.update(parents)
644
655
645 if prev not in tested:
656 if prev not in tested:
646 # other approach failed try against prev to hopefully save us a
657 # other approach failed try against prev to hopefully save us a
647 # fulltext.
658 # fulltext.
648 yield (prev,)
659 yield (prev,)
649 tested.add(prev)
660 tested.add(prev)
650
661
651 def buildtext(self, revinfo, fh):
662 def buildtext(self, revinfo, fh):
652 """Builds a fulltext version of a revision
663 """Builds a fulltext version of a revision
653
664
654 revinfo: _revisioninfo instance that contains all needed info
665 revinfo: _revisioninfo instance that contains all needed info
655 fh: file handle to either the .i or the .d revlog file,
666 fh: file handle to either the .i or the .d revlog file,
656 depending on whether it is inlined or not
667 depending on whether it is inlined or not
657 """
668 """
658 btext = revinfo.btext
669 btext = revinfo.btext
659 if btext[0] is not None:
670 if btext[0] is not None:
660 return btext[0]
671 return btext[0]
661
672
662 revlog = self.revlog
673 revlog = self.revlog
663 cachedelta = revinfo.cachedelta
674 cachedelta = revinfo.cachedelta
664 flags = revinfo.flags
675 flags = revinfo.flags
665 node = revinfo.node
676 node = revinfo.node
666
677
667 baserev = cachedelta[0]
678 baserev = cachedelta[0]
668 delta = cachedelta[1]
679 delta = cachedelta[1]
669 # special case deltas which replace entire base; no need to decode
680 # special case deltas which replace entire base; no need to decode
670 # base revision. this neatly avoids censored bases, which throw when
681 # base revision. this neatly avoids censored bases, which throw when
671 # they're decoded.
682 # they're decoded.
672 hlen = struct.calcsize(">lll")
683 hlen = struct.calcsize(">lll")
673 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
684 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
674 len(delta) - hlen):
685 len(delta) - hlen):
675 btext[0] = delta[hlen:]
686 btext[0] = delta[hlen:]
676 else:
687 else:
677 # deltabase is rawtext before changed by flag processors, which is
688 # deltabase is rawtext before changed by flag processors, which is
678 # equivalent to non-raw text
689 # equivalent to non-raw text
679 basetext = revlog.revision(baserev, _df=fh, raw=False)
690 basetext = revlog.revision(baserev, _df=fh, raw=False)
680 btext[0] = mdiff.patch(basetext, delta)
691 btext[0] = mdiff.patch(basetext, delta)
681
692
682 try:
693 try:
683 res = revlog._processflags(btext[0], flags, 'read', raw=True)
694 res = revlog._processflags(btext[0], flags, 'read', raw=True)
684 btext[0], validatehash = res
695 btext[0], validatehash = res
685 if validatehash:
696 if validatehash:
686 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
697 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
687 if flags & REVIDX_ISCENSORED:
698 if flags & REVIDX_ISCENSORED:
688 raise RevlogError(_('node %s is not censored') % node)
699 raise RevlogError(_('node %s is not censored') % node)
689 except CensoredNodeError:
700 except CensoredNodeError:
690 # must pass the censored index flag to add censored revisions
701 # must pass the censored index flag to add censored revisions
691 if not flags & REVIDX_ISCENSORED:
702 if not flags & REVIDX_ISCENSORED:
692 raise
703 raise
693 return btext[0]
704 return btext[0]
694
705
695 def _builddeltadiff(self, base, revinfo, fh):
706 def _builddeltadiff(self, base, revinfo, fh):
696 revlog = self.revlog
707 revlog = self.revlog
697 t = self.buildtext(revinfo, fh)
708 t = self.buildtext(revinfo, fh)
698 if revlog.iscensored(base):
709 if revlog.iscensored(base):
699 # deltas based on a censored revision must replace the
710 # deltas based on a censored revision must replace the
700 # full content in one patch, so delta works everywhere
711 # full content in one patch, so delta works everywhere
701 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
712 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
702 delta = header + t
713 delta = header + t
703 else:
714 else:
704 ptext = revlog.revision(base, _df=fh, raw=True)
715 ptext = revlog.revision(base, _df=fh, raw=True)
705 delta = mdiff.textdiff(ptext, t)
716 delta = mdiff.textdiff(ptext, t)
706
717
707 return delta
718 return delta
708
719
709 def _builddeltainfo(self, revinfo, base, fh):
720 def _builddeltainfo(self, revinfo, base, fh):
710 # can we use the cached delta?
721 # can we use the cached delta?
711 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
722 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
712 delta = revinfo.cachedelta[1]
723 delta = revinfo.cachedelta[1]
713 else:
724 else:
714 delta = self._builddeltadiff(base, revinfo, fh)
725 delta = self._builddeltadiff(base, revinfo, fh)
715 revlog = self.revlog
726 revlog = self.revlog
716 header, data = revlog.compress(delta)
727 header, data = revlog.compress(delta)
717 deltalen = len(header) + len(data)
728 deltalen = len(header) + len(data)
718 chainbase = revlog.chainbase(base)
729 chainbase = revlog.chainbase(base)
719 offset = revlog.end(len(revlog) - 1)
730 offset = revlog.end(len(revlog) - 1)
720 dist = deltalen + offset - revlog.start(chainbase)
731 dist = deltalen + offset - revlog.start(chainbase)
721 if revlog._generaldelta:
732 if revlog._generaldelta:
722 deltabase = base
733 deltabase = base
723 else:
734 else:
724 deltabase = chainbase
735 deltabase = chainbase
725 chainlen, compresseddeltalen = revlog._chaininfo(base)
736 chainlen, compresseddeltalen = revlog._chaininfo(base)
726 chainlen += 1
737 chainlen += 1
727 compresseddeltalen += deltalen
738 compresseddeltalen += deltalen
728 return _deltainfo(dist, deltalen, (header, data), deltabase,
739 return _deltainfo(dist, deltalen, (header, data), deltabase,
729 chainbase, chainlen, compresseddeltalen)
740 chainbase, chainlen, compresseddeltalen)
730
741
731 def finddeltainfo(self, revinfo, fh):
742 def finddeltainfo(self, revinfo, fh):
732 """Find an acceptable delta against a candidate revision
743 """Find an acceptable delta against a candidate revision
733
744
734 revinfo: information about the revision (instance of _revisioninfo)
745 revinfo: information about the revision (instance of _revisioninfo)
735 fh: file handle to either the .i or the .d revlog file,
746 fh: file handle to either the .i or the .d revlog file,
736 depending on whether it is inlined or not
747 depending on whether it is inlined or not
737
748
738 Returns the first acceptable candidate revision, as ordered by
749 Returns the first acceptable candidate revision, as ordered by
739 _getcandidaterevs
750 _getcandidaterevs
740 """
751 """
741 if not revinfo.textlen:
752 if not revinfo.textlen:
742 return None # empty file do not need delta
753 return None # empty file do not need delta
743
754
744 cachedelta = revinfo.cachedelta
755 cachedelta = revinfo.cachedelta
745 p1 = revinfo.p1
756 p1 = revinfo.p1
746 p2 = revinfo.p2
757 p2 = revinfo.p2
747 revlog = self.revlog
758 revlog = self.revlog
748
759
749 deltalength = self.revlog.length
760 deltalength = self.revlog.length
750 deltaparent = self.revlog.deltaparent
761 deltaparent = self.revlog.deltaparent
751
762
752 deltainfo = None
763 deltainfo = None
753 deltas_limit = revinfo.textlen * LIMIT_DELTA2TEXT
764 deltas_limit = revinfo.textlen * LIMIT_DELTA2TEXT
754 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
765 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
755 # filter out delta base that will never produce good delta
766 # filter out delta base that will never produce good delta
756 candidaterevs = [r for r in candidaterevs
767 candidaterevs = [r for r in candidaterevs
757 if self.revlog.length(r) <= deltas_limit]
768 if self.revlog.length(r) <= deltas_limit]
758 nominateddeltas = []
769 nominateddeltas = []
759 for candidaterev in candidaterevs:
770 for candidaterev in candidaterevs:
760 # skip over empty delta (no need to include them in a chain)
771 # skip over empty delta (no need to include them in a chain)
761 while candidaterev != nullrev and not deltalength(candidaterev):
772 while candidaterev != nullrev and not deltalength(candidaterev):
762 candidaterev = deltaparent(candidaterev)
773 candidaterev = deltaparent(candidaterev)
763 # no need to try a delta against nullid, this will be handled
774 # no need to try a delta against nullid, this will be handled
764 # by fulltext later.
775 # by fulltext later.
765 if candidaterev == nullrev:
776 if candidaterev == nullrev:
766 continue
777 continue
767 # no delta for rawtext-changing revs (see "candelta" for why)
778 # no delta for rawtext-changing revs (see "candelta" for why)
768 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
779 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
769 continue
780 continue
770 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
781 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
771 if revlog._isgooddeltainfo(candidatedelta, revinfo):
782 if revlog._isgooddeltainfo(candidatedelta, revinfo):
772 nominateddeltas.append(candidatedelta)
783 nominateddeltas.append(candidatedelta)
773 if nominateddeltas:
784 if nominateddeltas:
774 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
785 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
775 break
786 break
776
787
777 return deltainfo
788 return deltainfo
778
789
779 @attr.s(slots=True, frozen=True)
790 @attr.s(slots=True, frozen=True)
780 class _revisioninfo(object):
791 class _revisioninfo(object):
781 """Information about a revision that allows building its fulltext
792 """Information about a revision that allows building its fulltext
782 node: expected hash of the revision
793 node: expected hash of the revision
783 p1, p2: parent revs of the revision
794 p1, p2: parent revs of the revision
784 btext: built text cache consisting of a one-element list
795 btext: built text cache consisting of a one-element list
785 cachedelta: (baserev, uncompressed_delta) or None
796 cachedelta: (baserev, uncompressed_delta) or None
786 flags: flags associated to the revision storage
797 flags: flags associated to the revision storage
787
798
788 One of btext[0] or cachedelta must be set.
799 One of btext[0] or cachedelta must be set.
789 """
800 """
790 node = attr.ib()
801 node = attr.ib()
791 p1 = attr.ib()
802 p1 = attr.ib()
792 p2 = attr.ib()
803 p2 = attr.ib()
793 btext = attr.ib()
804 btext = attr.ib()
794 textlen = attr.ib()
805 textlen = attr.ib()
795 cachedelta = attr.ib()
806 cachedelta = attr.ib()
796 flags = attr.ib()
807 flags = attr.ib()
797
808
798 # index v0:
809 # index v0:
799 # 4 bytes: offset
810 # 4 bytes: offset
800 # 4 bytes: compressed length
811 # 4 bytes: compressed length
801 # 4 bytes: base rev
812 # 4 bytes: base rev
802 # 4 bytes: link rev
813 # 4 bytes: link rev
803 # 20 bytes: parent 1 nodeid
814 # 20 bytes: parent 1 nodeid
804 # 20 bytes: parent 2 nodeid
815 # 20 bytes: parent 2 nodeid
805 # 20 bytes: nodeid
816 # 20 bytes: nodeid
806 indexformatv0 = struct.Struct(">4l20s20s20s")
817 indexformatv0 = struct.Struct(">4l20s20s20s")
807 indexformatv0_pack = indexformatv0.pack
818 indexformatv0_pack = indexformatv0.pack
808 indexformatv0_unpack = indexformatv0.unpack
819 indexformatv0_unpack = indexformatv0.unpack
809
820
810 class revlogoldindex(list):
821 class revlogoldindex(list):
811 def __getitem__(self, i):
822 def __getitem__(self, i):
812 if i == -1:
823 if i == -1:
813 return (0, 0, 0, -1, -1, -1, -1, nullid)
824 return (0, 0, 0, -1, -1, -1, -1, nullid)
814 return list.__getitem__(self, i)
825 return list.__getitem__(self, i)
815
826
816 # maximum <delta-chain-data>/<revision-text-length> ratio
827 # maximum <delta-chain-data>/<revision-text-length> ratio
817 LIMIT_DELTA2TEXT = 2
828 LIMIT_DELTA2TEXT = 2
818
829
819 class revlogoldio(object):
830 class revlogoldio(object):
820 def __init__(self):
831 def __init__(self):
821 self.size = indexformatv0.size
832 self.size = indexformatv0.size
822
833
823 def parseindex(self, data, inline):
834 def parseindex(self, data, inline):
824 s = self.size
835 s = self.size
825 index = []
836 index = []
826 nodemap = {nullid: nullrev}
837 nodemap = {nullid: nullrev}
827 n = off = 0
838 n = off = 0
828 l = len(data)
839 l = len(data)
829 while off + s <= l:
840 while off + s <= l:
830 cur = data[off:off + s]
841 cur = data[off:off + s]
831 off += s
842 off += s
832 e = indexformatv0_unpack(cur)
843 e = indexformatv0_unpack(cur)
833 # transform to revlogv1 format
844 # transform to revlogv1 format
834 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
845 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
835 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
846 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
836 index.append(e2)
847 index.append(e2)
837 nodemap[e[6]] = n
848 nodemap[e[6]] = n
838 n += 1
849 n += 1
839
850
840 return revlogoldindex(index), nodemap, None
851 return revlogoldindex(index), nodemap, None
841
852
842 def packentry(self, entry, node, version, rev):
853 def packentry(self, entry, node, version, rev):
843 if gettype(entry[0]):
854 if gettype(entry[0]):
844 raise RevlogError(_('index entry flags need revlog version 1'))
855 raise RevlogError(_('index entry flags need revlog version 1'))
845 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
856 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
846 node(entry[5]), node(entry[6]), entry[7])
857 node(entry[5]), node(entry[6]), entry[7])
847 return indexformatv0_pack(*e2)
858 return indexformatv0_pack(*e2)
848
859
849 # index ng:
860 # index ng:
850 # 6 bytes: offset
861 # 6 bytes: offset
851 # 2 bytes: flags
862 # 2 bytes: flags
852 # 4 bytes: compressed length
863 # 4 bytes: compressed length
853 # 4 bytes: uncompressed length
864 # 4 bytes: uncompressed length
854 # 4 bytes: base rev
865 # 4 bytes: base rev
855 # 4 bytes: link rev
866 # 4 bytes: link rev
856 # 4 bytes: parent 1 rev
867 # 4 bytes: parent 1 rev
857 # 4 bytes: parent 2 rev
868 # 4 bytes: parent 2 rev
858 # 32 bytes: nodeid
869 # 32 bytes: nodeid
859 indexformatng = struct.Struct(">Qiiiiii20s12x")
870 indexformatng = struct.Struct(">Qiiiiii20s12x")
860 indexformatng_pack = indexformatng.pack
871 indexformatng_pack = indexformatng.pack
861 versionformat = struct.Struct(">I")
872 versionformat = struct.Struct(">I")
862 versionformat_pack = versionformat.pack
873 versionformat_pack = versionformat.pack
863 versionformat_unpack = versionformat.unpack
874 versionformat_unpack = versionformat.unpack
864
875
865 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
876 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
866 # signed integer)
877 # signed integer)
867 _maxentrysize = 0x7fffffff
878 _maxentrysize = 0x7fffffff
868
879
869 class revlogio(object):
880 class revlogio(object):
870 def __init__(self):
881 def __init__(self):
871 self.size = indexformatng.size
882 self.size = indexformatng.size
872
883
873 def parseindex(self, data, inline):
884 def parseindex(self, data, inline):
874 # call the C implementation to parse the index data
885 # call the C implementation to parse the index data
875 index, cache = parsers.parse_index2(data, inline)
886 index, cache = parsers.parse_index2(data, inline)
876 return index, getattr(index, 'nodemap', None), cache
887 return index, getattr(index, 'nodemap', None), cache
877
888
878 def packentry(self, entry, node, version, rev):
889 def packentry(self, entry, node, version, rev):
879 p = indexformatng_pack(*entry)
890 p = indexformatng_pack(*entry)
880 if rev == 0:
891 if rev == 0:
881 p = versionformat_pack(version) + p[4:]
892 p = versionformat_pack(version) + p[4:]
882 return p
893 return p
883
894
884 class revlog(object):
895 class revlog(object):
885 """
896 """
886 the underlying revision storage object
897 the underlying revision storage object
887
898
888 A revlog consists of two parts, an index and the revision data.
899 A revlog consists of two parts, an index and the revision data.
889
900
890 The index is a file with a fixed record size containing
901 The index is a file with a fixed record size containing
891 information on each revision, including its nodeid (hash), the
902 information on each revision, including its nodeid (hash), the
892 nodeids of its parents, the position and offset of its data within
903 nodeids of its parents, the position and offset of its data within
893 the data file, and the revision it's based on. Finally, each entry
904 the data file, and the revision it's based on. Finally, each entry
894 contains a linkrev entry that can serve as a pointer to external
905 contains a linkrev entry that can serve as a pointer to external
895 data.
906 data.
896
907
897 The revision data itself is a linear collection of data chunks.
908 The revision data itself is a linear collection of data chunks.
898 Each chunk represents a revision and is usually represented as a
909 Each chunk represents a revision and is usually represented as a
899 delta against the previous chunk. To bound lookup time, runs of
910 delta against the previous chunk. To bound lookup time, runs of
900 deltas are limited to about 2 times the length of the original
911 deltas are limited to about 2 times the length of the original
901 version data. This makes retrieval of a version proportional to
912 version data. This makes retrieval of a version proportional to
902 its size, or O(1) relative to the number of revisions.
913 its size, or O(1) relative to the number of revisions.
903
914
904 Both pieces of the revlog are written to in an append-only
915 Both pieces of the revlog are written to in an append-only
905 fashion, which means we never need to rewrite a file to insert or
916 fashion, which means we never need to rewrite a file to insert or
906 remove data, and can use some simple techniques to avoid the need
917 remove data, and can use some simple techniques to avoid the need
907 for locking while reading.
918 for locking while reading.
908
919
909 If checkambig, indexfile is opened with checkambig=True at
920 If checkambig, indexfile is opened with checkambig=True at
910 writing, to avoid file stat ambiguity.
921 writing, to avoid file stat ambiguity.
911
922
912 If mmaplargeindex is True, and an mmapindexthreshold is set, the
923 If mmaplargeindex is True, and an mmapindexthreshold is set, the
913 index will be mmapped rather than read if it is larger than the
924 index will be mmapped rather than read if it is larger than the
914 configured threshold.
925 configured threshold.
915
926
916 If censorable is True, the revlog can have censored revisions.
927 If censorable is True, the revlog can have censored revisions.
917 """
928 """
918 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
929 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
919 mmaplargeindex=False, censorable=False):
930 mmaplargeindex=False, censorable=False):
920 """
931 """
921 create a revlog object
932 create a revlog object
922
933
923 opener is a function that abstracts the file opening operation
934 opener is a function that abstracts the file opening operation
924 and can be used to implement COW semantics or the like.
935 and can be used to implement COW semantics or the like.
925 """
936 """
926 self.indexfile = indexfile
937 self.indexfile = indexfile
927 self.datafile = datafile or (indexfile[:-2] + ".d")
938 self.datafile = datafile or (indexfile[:-2] + ".d")
928 self.opener = opener
939 self.opener = opener
929 # When True, indexfile is opened with checkambig=True at writing, to
940 # When True, indexfile is opened with checkambig=True at writing, to
930 # avoid file stat ambiguity.
941 # avoid file stat ambiguity.
931 self._checkambig = checkambig
942 self._checkambig = checkambig
932 self._censorable = censorable
943 self._censorable = censorable
933 # 3-tuple of (node, rev, text) for a raw revision.
944 # 3-tuple of (node, rev, text) for a raw revision.
934 self._cache = None
945 self._cache = None
935 # Maps rev to chain base rev.
946 # Maps rev to chain base rev.
936 self._chainbasecache = util.lrucachedict(100)
947 self._chainbasecache = util.lrucachedict(100)
937 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
948 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
938 self._chunkcache = (0, '')
949 self._chunkcache = (0, '')
939 # How much data to read and cache into the raw revlog data cache.
950 # How much data to read and cache into the raw revlog data cache.
940 self._chunkcachesize = 65536
951 self._chunkcachesize = 65536
941 self._maxchainlen = None
952 self._maxchainlen = None
942 self._deltabothparents = True
953 self._deltabothparents = True
943 self.index = []
954 self.index = []
944 # Mapping of partial identifiers to full nodes.
955 # Mapping of partial identifiers to full nodes.
945 self._pcache = {}
956 self._pcache = {}
946 # Mapping of revision integer to full node.
957 # Mapping of revision integer to full node.
947 self._nodecache = {nullid: nullrev}
958 self._nodecache = {nullid: nullrev}
948 self._nodepos = None
959 self._nodepos = None
949 self._compengine = 'zlib'
960 self._compengine = 'zlib'
950 self._maxdeltachainspan = -1
961 self._maxdeltachainspan = -1
951 self._withsparseread = False
962 self._withsparseread = False
952 self._sparserevlog = False
963 self._sparserevlog = False
953 self._srdensitythreshold = 0.50
964 self._srdensitythreshold = 0.50
954 self._srmingapsize = 262144
965 self._srmingapsize = 262144
955
966
956 mmapindexthreshold = None
967 mmapindexthreshold = None
957 v = REVLOG_DEFAULT_VERSION
968 v = REVLOG_DEFAULT_VERSION
958 opts = getattr(opener, 'options', None)
969 opts = getattr(opener, 'options', None)
959 if opts is not None:
970 if opts is not None:
960 if 'revlogv2' in opts:
971 if 'revlogv2' in opts:
961 # version 2 revlogs always use generaldelta.
972 # version 2 revlogs always use generaldelta.
962 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
973 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
963 elif 'revlogv1' in opts:
974 elif 'revlogv1' in opts:
964 if 'generaldelta' in opts:
975 if 'generaldelta' in opts:
965 v |= FLAG_GENERALDELTA
976 v |= FLAG_GENERALDELTA
966 else:
977 else:
967 v = 0
978 v = 0
968 if 'chunkcachesize' in opts:
979 if 'chunkcachesize' in opts:
969 self._chunkcachesize = opts['chunkcachesize']
980 self._chunkcachesize = opts['chunkcachesize']
970 if 'maxchainlen' in opts:
981 if 'maxchainlen' in opts:
971 self._maxchainlen = opts['maxchainlen']
982 self._maxchainlen = opts['maxchainlen']
972 if 'deltabothparents' in opts:
983 if 'deltabothparents' in opts:
973 self._deltabothparents = opts['deltabothparents']
984 self._deltabothparents = opts['deltabothparents']
974 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
985 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
975 if 'compengine' in opts:
986 if 'compengine' in opts:
976 self._compengine = opts['compengine']
987 self._compengine = opts['compengine']
977 if 'maxdeltachainspan' in opts:
988 if 'maxdeltachainspan' in opts:
978 self._maxdeltachainspan = opts['maxdeltachainspan']
989 self._maxdeltachainspan = opts['maxdeltachainspan']
979 if mmaplargeindex and 'mmapindexthreshold' in opts:
990 if mmaplargeindex and 'mmapindexthreshold' in opts:
980 mmapindexthreshold = opts['mmapindexthreshold']
991 mmapindexthreshold = opts['mmapindexthreshold']
981 self._sparserevlog = bool(opts.get('sparse-revlog', False))
992 self._sparserevlog = bool(opts.get('sparse-revlog', False))
982 withsparseread = bool(opts.get('with-sparse-read', False))
993 withsparseread = bool(opts.get('with-sparse-read', False))
983 # sparse-revlog forces sparse-read
994 # sparse-revlog forces sparse-read
984 self._withsparseread = self._sparserevlog or withsparseread
995 self._withsparseread = self._sparserevlog or withsparseread
985 if 'sparse-read-density-threshold' in opts:
996 if 'sparse-read-density-threshold' in opts:
986 self._srdensitythreshold = opts['sparse-read-density-threshold']
997 self._srdensitythreshold = opts['sparse-read-density-threshold']
987 if 'sparse-read-min-gap-size' in opts:
998 if 'sparse-read-min-gap-size' in opts:
988 self._srmingapsize = opts['sparse-read-min-gap-size']
999 self._srmingapsize = opts['sparse-read-min-gap-size']
989
1000
990 if self._chunkcachesize <= 0:
1001 if self._chunkcachesize <= 0:
991 raise RevlogError(_('revlog chunk cache size %r is not greater '
1002 raise RevlogError(_('revlog chunk cache size %r is not greater '
992 'than 0') % self._chunkcachesize)
1003 'than 0') % self._chunkcachesize)
993 elif self._chunkcachesize & (self._chunkcachesize - 1):
1004 elif self._chunkcachesize & (self._chunkcachesize - 1):
994 raise RevlogError(_('revlog chunk cache size %r is not a power '
1005 raise RevlogError(_('revlog chunk cache size %r is not a power '
995 'of 2') % self._chunkcachesize)
1006 'of 2') % self._chunkcachesize)
996
1007
997 indexdata = ''
1008 indexdata = ''
998 self._initempty = True
1009 self._initempty = True
999 try:
1010 try:
1000 with self._indexfp() as f:
1011 with self._indexfp() as f:
1001 if (mmapindexthreshold is not None and
1012 if (mmapindexthreshold is not None and
1002 self.opener.fstat(f).st_size >= mmapindexthreshold):
1013 self.opener.fstat(f).st_size >= mmapindexthreshold):
1003 indexdata = util.buffer(util.mmapread(f))
1014 indexdata = util.buffer(util.mmapread(f))
1004 else:
1015 else:
1005 indexdata = f.read()
1016 indexdata = f.read()
1006 if len(indexdata) > 0:
1017 if len(indexdata) > 0:
1007 v = versionformat_unpack(indexdata[:4])[0]
1018 v = versionformat_unpack(indexdata[:4])[0]
1008 self._initempty = False
1019 self._initempty = False
1009 except IOError as inst:
1020 except IOError as inst:
1010 if inst.errno != errno.ENOENT:
1021 if inst.errno != errno.ENOENT:
1011 raise
1022 raise
1012
1023
1013 self.version = v
1024 self.version = v
1014 self._inline = v & FLAG_INLINE_DATA
1025 self._inline = v & FLAG_INLINE_DATA
1015 self._generaldelta = v & FLAG_GENERALDELTA
1026 self._generaldelta = v & FLAG_GENERALDELTA
1016 flags = v & ~0xFFFF
1027 flags = v & ~0xFFFF
1017 fmt = v & 0xFFFF
1028 fmt = v & 0xFFFF
1018 if fmt == REVLOGV0:
1029 if fmt == REVLOGV0:
1019 if flags:
1030 if flags:
1020 raise RevlogError(_('unknown flags (%#04x) in version %d '
1031 raise RevlogError(_('unknown flags (%#04x) in version %d '
1021 'revlog %s') %
1032 'revlog %s') %
1022 (flags >> 16, fmt, self.indexfile))
1033 (flags >> 16, fmt, self.indexfile))
1023 elif fmt == REVLOGV1:
1034 elif fmt == REVLOGV1:
1024 if flags & ~REVLOGV1_FLAGS:
1035 if flags & ~REVLOGV1_FLAGS:
1025 raise RevlogError(_('unknown flags (%#04x) in version %d '
1036 raise RevlogError(_('unknown flags (%#04x) in version %d '
1026 'revlog %s') %
1037 'revlog %s') %
1027 (flags >> 16, fmt, self.indexfile))
1038 (flags >> 16, fmt, self.indexfile))
1028 elif fmt == REVLOGV2:
1039 elif fmt == REVLOGV2:
1029 if flags & ~REVLOGV2_FLAGS:
1040 if flags & ~REVLOGV2_FLAGS:
1030 raise RevlogError(_('unknown flags (%#04x) in version %d '
1041 raise RevlogError(_('unknown flags (%#04x) in version %d '
1031 'revlog %s') %
1042 'revlog %s') %
1032 (flags >> 16, fmt, self.indexfile))
1043 (flags >> 16, fmt, self.indexfile))
1033 else:
1044 else:
1034 raise RevlogError(_('unknown version (%d) in revlog %s') %
1045 raise RevlogError(_('unknown version (%d) in revlog %s') %
1035 (fmt, self.indexfile))
1046 (fmt, self.indexfile))
1036
1047
1037 self.storedeltachains = True
1048 self.storedeltachains = True
1038
1049
1039 self._io = revlogio()
1050 self._io = revlogio()
1040 if self.version == REVLOGV0:
1051 if self.version == REVLOGV0:
1041 self._io = revlogoldio()
1052 self._io = revlogoldio()
1042 try:
1053 try:
1043 d = self._io.parseindex(indexdata, self._inline)
1054 d = self._io.parseindex(indexdata, self._inline)
1044 except (ValueError, IndexError):
1055 except (ValueError, IndexError):
1045 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1056 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1046 self.index, nodemap, self._chunkcache = d
1057 self.index, nodemap, self._chunkcache = d
1047 if nodemap is not None:
1058 if nodemap is not None:
1048 self.nodemap = self._nodecache = nodemap
1059 self.nodemap = self._nodecache = nodemap
1049 if not self._chunkcache:
1060 if not self._chunkcache:
1050 self._chunkclear()
1061 self._chunkclear()
1051 # revnum -> (chain-length, sum-delta-length)
1062 # revnum -> (chain-length, sum-delta-length)
1052 self._chaininfocache = {}
1063 self._chaininfocache = {}
1053 # revlog header -> revlog compressor
1064 # revlog header -> revlog compressor
1054 self._decompressors = {}
1065 self._decompressors = {}
1055
1066
1056 @util.propertycache
1067 @util.propertycache
1057 def _compressor(self):
1068 def _compressor(self):
1058 return util.compengines[self._compengine].revlogcompressor()
1069 return util.compengines[self._compengine].revlogcompressor()
1059
1070
1060 def _indexfp(self, mode='r'):
1071 def _indexfp(self, mode='r'):
1061 """file object for the revlog's index file"""
1072 """file object for the revlog's index file"""
1062 args = {r'mode': mode}
1073 args = {r'mode': mode}
1063 if mode != 'r':
1074 if mode != 'r':
1064 args[r'checkambig'] = self._checkambig
1075 args[r'checkambig'] = self._checkambig
1065 if mode == 'w':
1076 if mode == 'w':
1066 args[r'atomictemp'] = True
1077 args[r'atomictemp'] = True
1067 return self.opener(self.indexfile, **args)
1078 return self.opener(self.indexfile, **args)
1068
1079
1069 def _datafp(self, mode='r'):
1080 def _datafp(self, mode='r'):
1070 """file object for the revlog's data file"""
1081 """file object for the revlog's data file"""
1071 return self.opener(self.datafile, mode=mode)
1082 return self.opener(self.datafile, mode=mode)
1072
1083
1073 @contextlib.contextmanager
1084 @contextlib.contextmanager
1074 def _datareadfp(self, existingfp=None):
1085 def _datareadfp(self, existingfp=None):
1075 """file object suitable to read data"""
1086 """file object suitable to read data"""
1076 if existingfp is not None:
1087 if existingfp is not None:
1077 yield existingfp
1088 yield existingfp
1078 else:
1089 else:
1079 if self._inline:
1090 if self._inline:
1080 func = self._indexfp
1091 func = self._indexfp
1081 else:
1092 else:
1082 func = self._datafp
1093 func = self._datafp
1083 with func() as fp:
1094 with func() as fp:
1084 yield fp
1095 yield fp
1085
1096
1086 def tip(self):
1097 def tip(self):
1087 return self.node(len(self.index) - 1)
1098 return self.node(len(self.index) - 1)
1088 def __contains__(self, rev):
1099 def __contains__(self, rev):
1089 return 0 <= rev < len(self)
1100 return 0 <= rev < len(self)
1090 def __len__(self):
1101 def __len__(self):
1091 return len(self.index)
1102 return len(self.index)
1092 def __iter__(self):
1103 def __iter__(self):
1093 return iter(pycompat.xrange(len(self)))
1104 return iter(pycompat.xrange(len(self)))
1094 def revs(self, start=0, stop=None):
1105 def revs(self, start=0, stop=None):
1095 """iterate over all rev in this revlog (from start to stop)"""
1106 """iterate over all rev in this revlog (from start to stop)"""
1096 step = 1
1107 step = 1
1097 length = len(self)
1108 length = len(self)
1098 if stop is not None:
1109 if stop is not None:
1099 if start > stop:
1110 if start > stop:
1100 step = -1
1111 step = -1
1101 stop += step
1112 stop += step
1102 if stop > length:
1113 if stop > length:
1103 stop = length
1114 stop = length
1104 else:
1115 else:
1105 stop = length
1116 stop = length
1106 return pycompat.xrange(start, stop, step)
1117 return pycompat.xrange(start, stop, step)
1107
1118
1108 @util.propertycache
1119 @util.propertycache
1109 def nodemap(self):
1120 def nodemap(self):
1110 if self.index:
1121 if self.index:
1111 # populate mapping down to the initial node
1122 # populate mapping down to the initial node
1112 node0 = self.index[0][7] # get around changelog filtering
1123 node0 = self.index[0][7] # get around changelog filtering
1113 self.rev(node0)
1124 self.rev(node0)
1114 return self._nodecache
1125 return self._nodecache
1115
1126
1116 def hasnode(self, node):
1127 def hasnode(self, node):
1117 try:
1128 try:
1118 self.rev(node)
1129 self.rev(node)
1119 return True
1130 return True
1120 except KeyError:
1131 except KeyError:
1121 return False
1132 return False
1122
1133
1123 def candelta(self, baserev, rev):
1134 def candelta(self, baserev, rev):
1124 """whether two revisions (baserev, rev) can be delta-ed or not"""
1135 """whether two revisions (baserev, rev) can be delta-ed or not"""
1125 # Disable delta if either rev requires a content-changing flag
1136 # Disable delta if either rev requires a content-changing flag
1126 # processor (ex. LFS). This is because such flag processor can alter
1137 # processor (ex. LFS). This is because such flag processor can alter
1127 # the rawtext content that the delta will be based on, and two clients
1138 # the rawtext content that the delta will be based on, and two clients
1128 # could have a same revlog node with different flags (i.e. different
1139 # could have a same revlog node with different flags (i.e. different
1129 # rawtext contents) and the delta could be incompatible.
1140 # rawtext contents) and the delta could be incompatible.
1130 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1141 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1131 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1142 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1132 return False
1143 return False
1133 return True
1144 return True
1134
1145
1135 def clearcaches(self):
1146 def clearcaches(self):
1136 self._cache = None
1147 self._cache = None
1137 self._chainbasecache.clear()
1148 self._chainbasecache.clear()
1138 self._chunkcache = (0, '')
1149 self._chunkcache = (0, '')
1139 self._pcache = {}
1150 self._pcache = {}
1140
1151
1141 try:
1152 try:
1142 self._nodecache.clearcaches()
1153 self._nodecache.clearcaches()
1143 except AttributeError:
1154 except AttributeError:
1144 self._nodecache = {nullid: nullrev}
1155 self._nodecache = {nullid: nullrev}
1145 self._nodepos = None
1156 self._nodepos = None
1146
1157
1147 def rev(self, node):
1158 def rev(self, node):
1148 try:
1159 try:
1149 return self._nodecache[node]
1160 return self._nodecache[node]
1150 except TypeError:
1161 except TypeError:
1151 raise
1162 raise
1152 except RevlogError:
1163 except RevlogError:
1153 # parsers.c radix tree lookup failed
1164 # parsers.c radix tree lookup failed
1154 if node == wdirid or node in wdirfilenodeids:
1165 if node == wdirid or node in wdirfilenodeids:
1155 raise error.WdirUnsupported
1166 raise error.WdirUnsupported
1156 raise LookupError(node, self.indexfile, _('no node'))
1167 raise LookupError(node, self.indexfile, _('no node'))
1157 except KeyError:
1168 except KeyError:
1158 # pure python cache lookup failed
1169 # pure python cache lookup failed
1159 n = self._nodecache
1170 n = self._nodecache
1160 i = self.index
1171 i = self.index
1161 p = self._nodepos
1172 p = self._nodepos
1162 if p is None:
1173 if p is None:
1163 p = len(i) - 1
1174 p = len(i) - 1
1164 else:
1175 else:
1165 assert p < len(i)
1176 assert p < len(i)
1166 for r in pycompat.xrange(p, -1, -1):
1177 for r in pycompat.xrange(p, -1, -1):
1167 v = i[r][7]
1178 v = i[r][7]
1168 n[v] = r
1179 n[v] = r
1169 if v == node:
1180 if v == node:
1170 self._nodepos = r - 1
1181 self._nodepos = r - 1
1171 return r
1182 return r
1172 if node == wdirid or node in wdirfilenodeids:
1183 if node == wdirid or node in wdirfilenodeids:
1173 raise error.WdirUnsupported
1184 raise error.WdirUnsupported
1174 raise LookupError(node, self.indexfile, _('no node'))
1185 raise LookupError(node, self.indexfile, _('no node'))
1175
1186
1176 # Accessors for index entries.
1187 # Accessors for index entries.
1177
1188
1178 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1189 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1179 # are flags.
1190 # are flags.
1180 def start(self, rev):
1191 def start(self, rev):
1181 return int(self.index[rev][0] >> 16)
1192 return int(self.index[rev][0] >> 16)
1182
1193
1183 def flags(self, rev):
1194 def flags(self, rev):
1184 return self.index[rev][0] & 0xFFFF
1195 return self.index[rev][0] & 0xFFFF
1185
1196
1186 def length(self, rev):
1197 def length(self, rev):
1187 return self.index[rev][1]
1198 return self.index[rev][1]
1188
1199
1189 def rawsize(self, rev):
1200 def rawsize(self, rev):
1190 """return the length of the uncompressed text for a given revision"""
1201 """return the length of the uncompressed text for a given revision"""
1191 l = self.index[rev][2]
1202 l = self.index[rev][2]
1192 if l >= 0:
1203 if l >= 0:
1193 return l
1204 return l
1194
1205
1195 t = self.revision(rev, raw=True)
1206 t = self.revision(rev, raw=True)
1196 return len(t)
1207 return len(t)
1197
1208
1198 def size(self, rev):
1209 def size(self, rev):
1199 """length of non-raw text (processed by a "read" flag processor)"""
1210 """length of non-raw text (processed by a "read" flag processor)"""
1200 # fast path: if no "read" flag processor could change the content,
1211 # fast path: if no "read" flag processor could change the content,
1201 # size is rawsize. note: ELLIPSIS is known to not change the content.
1212 # size is rawsize. note: ELLIPSIS is known to not change the content.
1202 flags = self.flags(rev)
1213 flags = self.flags(rev)
1203 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1214 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1204 return self.rawsize(rev)
1215 return self.rawsize(rev)
1205
1216
1206 return len(self.revision(rev, raw=False))
1217 return len(self.revision(rev, raw=False))
1207
1218
1208 def chainbase(self, rev):
1219 def chainbase(self, rev):
1209 base = self._chainbasecache.get(rev)
1220 base = self._chainbasecache.get(rev)
1210 if base is not None:
1221 if base is not None:
1211 return base
1222 return base
1212
1223
1213 index = self.index
1224 index = self.index
1214 iterrev = rev
1225 iterrev = rev
1215 base = index[iterrev][3]
1226 base = index[iterrev][3]
1216 while base != iterrev:
1227 while base != iterrev:
1217 iterrev = base
1228 iterrev = base
1218 base = index[iterrev][3]
1229 base = index[iterrev][3]
1219
1230
1220 self._chainbasecache[rev] = base
1231 self._chainbasecache[rev] = base
1221 return base
1232 return base
1222
1233
1223 def linkrev(self, rev):
1234 def linkrev(self, rev):
1224 return self.index[rev][4]
1235 return self.index[rev][4]
1225
1236
1226 def parentrevs(self, rev):
1237 def parentrevs(self, rev):
1227 try:
1238 try:
1228 entry = self.index[rev]
1239 entry = self.index[rev]
1229 except IndexError:
1240 except IndexError:
1230 if rev == wdirrev:
1241 if rev == wdirrev:
1231 raise error.WdirUnsupported
1242 raise error.WdirUnsupported
1232 raise
1243 raise
1233
1244
1234 return entry[5], entry[6]
1245 return entry[5], entry[6]
1235
1246
1236 def node(self, rev):
1247 def node(self, rev):
1237 try:
1248 try:
1238 return self.index[rev][7]
1249 return self.index[rev][7]
1239 except IndexError:
1250 except IndexError:
1240 if rev == wdirrev:
1251 if rev == wdirrev:
1241 raise error.WdirUnsupported
1252 raise error.WdirUnsupported
1242 raise
1253 raise
1243
1254
1244 # Derived from index values.
1255 # Derived from index values.
1245
1256
1246 def end(self, rev):
1257 def end(self, rev):
1247 return self.start(rev) + self.length(rev)
1258 return self.start(rev) + self.length(rev)
1248
1259
1249 def parents(self, node):
1260 def parents(self, node):
1250 i = self.index
1261 i = self.index
1251 d = i[self.rev(node)]
1262 d = i[self.rev(node)]
1252 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1263 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1253
1264
1254 def chainlen(self, rev):
1265 def chainlen(self, rev):
1255 return self._chaininfo(rev)[0]
1266 return self._chaininfo(rev)[0]
1256
1267
1257 def _chaininfo(self, rev):
1268 def _chaininfo(self, rev):
1258 chaininfocache = self._chaininfocache
1269 chaininfocache = self._chaininfocache
1259 if rev in chaininfocache:
1270 if rev in chaininfocache:
1260 return chaininfocache[rev]
1271 return chaininfocache[rev]
1261 index = self.index
1272 index = self.index
1262 generaldelta = self._generaldelta
1273 generaldelta = self._generaldelta
1263 iterrev = rev
1274 iterrev = rev
1264 e = index[iterrev]
1275 e = index[iterrev]
1265 clen = 0
1276 clen = 0
1266 compresseddeltalen = 0
1277 compresseddeltalen = 0
1267 while iterrev != e[3]:
1278 while iterrev != e[3]:
1268 clen += 1
1279 clen += 1
1269 compresseddeltalen += e[1]
1280 compresseddeltalen += e[1]
1270 if generaldelta:
1281 if generaldelta:
1271 iterrev = e[3]
1282 iterrev = e[3]
1272 else:
1283 else:
1273 iterrev -= 1
1284 iterrev -= 1
1274 if iterrev in chaininfocache:
1285 if iterrev in chaininfocache:
1275 t = chaininfocache[iterrev]
1286 t = chaininfocache[iterrev]
1276 clen += t[0]
1287 clen += t[0]
1277 compresseddeltalen += t[1]
1288 compresseddeltalen += t[1]
1278 break
1289 break
1279 e = index[iterrev]
1290 e = index[iterrev]
1280 else:
1291 else:
1281 # Add text length of base since decompressing that also takes
1292 # Add text length of base since decompressing that also takes
1282 # work. For cache hits the length is already included.
1293 # work. For cache hits the length is already included.
1283 compresseddeltalen += e[1]
1294 compresseddeltalen += e[1]
1284 r = (clen, compresseddeltalen)
1295 r = (clen, compresseddeltalen)
1285 chaininfocache[rev] = r
1296 chaininfocache[rev] = r
1286 return r
1297 return r
1287
1298
1288 def _deltachain(self, rev, stoprev=None):
1299 def _deltachain(self, rev, stoprev=None):
1289 """Obtain the delta chain for a revision.
1300 """Obtain the delta chain for a revision.
1290
1301
1291 ``stoprev`` specifies a revision to stop at. If not specified, we
1302 ``stoprev`` specifies a revision to stop at. If not specified, we
1292 stop at the base of the chain.
1303 stop at the base of the chain.
1293
1304
1294 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1305 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1295 revs in ascending order and ``stopped`` is a bool indicating whether
1306 revs in ascending order and ``stopped`` is a bool indicating whether
1296 ``stoprev`` was hit.
1307 ``stoprev`` was hit.
1297 """
1308 """
1298 # Try C implementation.
1309 # Try C implementation.
1299 try:
1310 try:
1300 return self.index.deltachain(rev, stoprev, self._generaldelta)
1311 return self.index.deltachain(rev, stoprev, self._generaldelta)
1301 except AttributeError:
1312 except AttributeError:
1302 pass
1313 pass
1303
1314
1304 chain = []
1315 chain = []
1305
1316
1306 # Alias to prevent attribute lookup in tight loop.
1317 # Alias to prevent attribute lookup in tight loop.
1307 index = self.index
1318 index = self.index
1308 generaldelta = self._generaldelta
1319 generaldelta = self._generaldelta
1309
1320
1310 iterrev = rev
1321 iterrev = rev
1311 e = index[iterrev]
1322 e = index[iterrev]
1312 while iterrev != e[3] and iterrev != stoprev:
1323 while iterrev != e[3] and iterrev != stoprev:
1313 chain.append(iterrev)
1324 chain.append(iterrev)
1314 if generaldelta:
1325 if generaldelta:
1315 iterrev = e[3]
1326 iterrev = e[3]
1316 else:
1327 else:
1317 iterrev -= 1
1328 iterrev -= 1
1318 e = index[iterrev]
1329 e = index[iterrev]
1319
1330
1320 if iterrev == stoprev:
1331 if iterrev == stoprev:
1321 stopped = True
1332 stopped = True
1322 else:
1333 else:
1323 chain.append(iterrev)
1334 chain.append(iterrev)
1324 stopped = False
1335 stopped = False
1325
1336
1326 chain.reverse()
1337 chain.reverse()
1327 return chain, stopped
1338 return chain, stopped
1328
1339
1329 def ancestors(self, revs, stoprev=0, inclusive=False):
1340 def ancestors(self, revs, stoprev=0, inclusive=False):
1330 """Generate the ancestors of 'revs' in reverse topological order.
1341 """Generate the ancestors of 'revs' in reverse topological order.
1331 Does not generate revs lower than stoprev.
1342 Does not generate revs lower than stoprev.
1332
1343
1333 See the documentation for ancestor.lazyancestors for more details."""
1344 See the documentation for ancestor.lazyancestors for more details."""
1334
1345
1335 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1346 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1336 inclusive=inclusive)
1347 inclusive=inclusive)
1337
1348
1338 def descendants(self, revs):
1349 def descendants(self, revs):
1339 """Generate the descendants of 'revs' in revision order.
1350 """Generate the descendants of 'revs' in revision order.
1340
1351
1341 Yield a sequence of revision numbers starting with a child of
1352 Yield a sequence of revision numbers starting with a child of
1342 some rev in revs, i.e., each revision is *not* considered a
1353 some rev in revs, i.e., each revision is *not* considered a
1343 descendant of itself. Results are ordered by revision number (a
1354 descendant of itself. Results are ordered by revision number (a
1344 topological sort)."""
1355 topological sort)."""
1345 first = min(revs)
1356 first = min(revs)
1346 if first == nullrev:
1357 if first == nullrev:
1347 for i in self:
1358 for i in self:
1348 yield i
1359 yield i
1349 return
1360 return
1350
1361
1351 seen = set(revs)
1362 seen = set(revs)
1352 for i in self.revs(start=first + 1):
1363 for i in self.revs(start=first + 1):
1353 for x in self.parentrevs(i):
1364 for x in self.parentrevs(i):
1354 if x != nullrev and x in seen:
1365 if x != nullrev and x in seen:
1355 seen.add(i)
1366 seen.add(i)
1356 yield i
1367 yield i
1357 break
1368 break
1358
1369
1359 def findcommonmissing(self, common=None, heads=None):
1370 def findcommonmissing(self, common=None, heads=None):
1360 """Return a tuple of the ancestors of common and the ancestors of heads
1371 """Return a tuple of the ancestors of common and the ancestors of heads
1361 that are not ancestors of common. In revset terminology, we return the
1372 that are not ancestors of common. In revset terminology, we return the
1362 tuple:
1373 tuple:
1363
1374
1364 ::common, (::heads) - (::common)
1375 ::common, (::heads) - (::common)
1365
1376
1366 The list is sorted by revision number, meaning it is
1377 The list is sorted by revision number, meaning it is
1367 topologically sorted.
1378 topologically sorted.
1368
1379
1369 'heads' and 'common' are both lists of node IDs. If heads is
1380 'heads' and 'common' are both lists of node IDs. If heads is
1370 not supplied, uses all of the revlog's heads. If common is not
1381 not supplied, uses all of the revlog's heads. If common is not
1371 supplied, uses nullid."""
1382 supplied, uses nullid."""
1372 if common is None:
1383 if common is None:
1373 common = [nullid]
1384 common = [nullid]
1374 if heads is None:
1385 if heads is None:
1375 heads = self.heads()
1386 heads = self.heads()
1376
1387
1377 common = [self.rev(n) for n in common]
1388 common = [self.rev(n) for n in common]
1378 heads = [self.rev(n) for n in heads]
1389 heads = [self.rev(n) for n in heads]
1379
1390
1380 # we want the ancestors, but inclusive
1391 # we want the ancestors, but inclusive
1381 class lazyset(object):
1392 class lazyset(object):
1382 def __init__(self, lazyvalues):
1393 def __init__(self, lazyvalues):
1383 self.addedvalues = set()
1394 self.addedvalues = set()
1384 self.lazyvalues = lazyvalues
1395 self.lazyvalues = lazyvalues
1385
1396
1386 def __contains__(self, value):
1397 def __contains__(self, value):
1387 return value in self.addedvalues or value in self.lazyvalues
1398 return value in self.addedvalues or value in self.lazyvalues
1388
1399
1389 def __iter__(self):
1400 def __iter__(self):
1390 added = self.addedvalues
1401 added = self.addedvalues
1391 for r in added:
1402 for r in added:
1392 yield r
1403 yield r
1393 for r in self.lazyvalues:
1404 for r in self.lazyvalues:
1394 if not r in added:
1405 if not r in added:
1395 yield r
1406 yield r
1396
1407
1397 def add(self, value):
1408 def add(self, value):
1398 self.addedvalues.add(value)
1409 self.addedvalues.add(value)
1399
1410
1400 def update(self, values):
1411 def update(self, values):
1401 self.addedvalues.update(values)
1412 self.addedvalues.update(values)
1402
1413
1403 has = lazyset(self.ancestors(common))
1414 has = lazyset(self.ancestors(common))
1404 has.add(nullrev)
1415 has.add(nullrev)
1405 has.update(common)
1416 has.update(common)
1406
1417
1407 # take all ancestors from heads that aren't in has
1418 # take all ancestors from heads that aren't in has
1408 missing = set()
1419 missing = set()
1409 visit = collections.deque(r for r in heads if r not in has)
1420 visit = collections.deque(r for r in heads if r not in has)
1410 while visit:
1421 while visit:
1411 r = visit.popleft()
1422 r = visit.popleft()
1412 if r in missing:
1423 if r in missing:
1413 continue
1424 continue
1414 else:
1425 else:
1415 missing.add(r)
1426 missing.add(r)
1416 for p in self.parentrevs(r):
1427 for p in self.parentrevs(r):
1417 if p not in has:
1428 if p not in has:
1418 visit.append(p)
1429 visit.append(p)
1419 missing = list(missing)
1430 missing = list(missing)
1420 missing.sort()
1431 missing.sort()
1421 return has, [self.node(miss) for miss in missing]
1432 return has, [self.node(miss) for miss in missing]
1422
1433
1423 def incrementalmissingrevs(self, common=None):
1434 def incrementalmissingrevs(self, common=None):
1424 """Return an object that can be used to incrementally compute the
1435 """Return an object that can be used to incrementally compute the
1425 revision numbers of the ancestors of arbitrary sets that are not
1436 revision numbers of the ancestors of arbitrary sets that are not
1426 ancestors of common. This is an ancestor.incrementalmissingancestors
1437 ancestors of common. This is an ancestor.incrementalmissingancestors
1427 object.
1438 object.
1428
1439
1429 'common' is a list of revision numbers. If common is not supplied, uses
1440 'common' is a list of revision numbers. If common is not supplied, uses
1430 nullrev.
1441 nullrev.
1431 """
1442 """
1432 if common is None:
1443 if common is None:
1433 common = [nullrev]
1444 common = [nullrev]
1434
1445
1435 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1446 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1436
1447
1437 def findmissingrevs(self, common=None, heads=None):
1448 def findmissingrevs(self, common=None, heads=None):
1438 """Return the revision numbers of the ancestors of heads that
1449 """Return the revision numbers of the ancestors of heads that
1439 are not ancestors of common.
1450 are not ancestors of common.
1440
1451
1441 More specifically, return a list of revision numbers corresponding to
1452 More specifically, return a list of revision numbers corresponding to
1442 nodes N such that every N satisfies the following constraints:
1453 nodes N such that every N satisfies the following constraints:
1443
1454
1444 1. N is an ancestor of some node in 'heads'
1455 1. N is an ancestor of some node in 'heads'
1445 2. N is not an ancestor of any node in 'common'
1456 2. N is not an ancestor of any node in 'common'
1446
1457
1447 The list is sorted by revision number, meaning it is
1458 The list is sorted by revision number, meaning it is
1448 topologically sorted.
1459 topologically sorted.
1449
1460
1450 'heads' and 'common' are both lists of revision numbers. If heads is
1461 'heads' and 'common' are both lists of revision numbers. If heads is
1451 not supplied, uses all of the revlog's heads. If common is not
1462 not supplied, uses all of the revlog's heads. If common is not
1452 supplied, uses nullid."""
1463 supplied, uses nullid."""
1453 if common is None:
1464 if common is None:
1454 common = [nullrev]
1465 common = [nullrev]
1455 if heads is None:
1466 if heads is None:
1456 heads = self.headrevs()
1467 heads = self.headrevs()
1457
1468
1458 inc = self.incrementalmissingrevs(common=common)
1469 inc = self.incrementalmissingrevs(common=common)
1459 return inc.missingancestors(heads)
1470 return inc.missingancestors(heads)
1460
1471
1461 def findmissing(self, common=None, heads=None):
1472 def findmissing(self, common=None, heads=None):
1462 """Return the ancestors of heads that are not ancestors of common.
1473 """Return the ancestors of heads that are not ancestors of common.
1463
1474
1464 More specifically, return a list of nodes N such that every N
1475 More specifically, return a list of nodes N such that every N
1465 satisfies the following constraints:
1476 satisfies the following constraints:
1466
1477
1467 1. N is an ancestor of some node in 'heads'
1478 1. N is an ancestor of some node in 'heads'
1468 2. N is not an ancestor of any node in 'common'
1479 2. N is not an ancestor of any node in 'common'
1469
1480
1470 The list is sorted by revision number, meaning it is
1481 The list is sorted by revision number, meaning it is
1471 topologically sorted.
1482 topologically sorted.
1472
1483
1473 'heads' and 'common' are both lists of node IDs. If heads is
1484 'heads' and 'common' are both lists of node IDs. If heads is
1474 not supplied, uses all of the revlog's heads. If common is not
1485 not supplied, uses all of the revlog's heads. If common is not
1475 supplied, uses nullid."""
1486 supplied, uses nullid."""
1476 if common is None:
1487 if common is None:
1477 common = [nullid]
1488 common = [nullid]
1478 if heads is None:
1489 if heads is None:
1479 heads = self.heads()
1490 heads = self.heads()
1480
1491
1481 common = [self.rev(n) for n in common]
1492 common = [self.rev(n) for n in common]
1482 heads = [self.rev(n) for n in heads]
1493 heads = [self.rev(n) for n in heads]
1483
1494
1484 inc = self.incrementalmissingrevs(common=common)
1495 inc = self.incrementalmissingrevs(common=common)
1485 return [self.node(r) for r in inc.missingancestors(heads)]
1496 return [self.node(r) for r in inc.missingancestors(heads)]
1486
1497
1487 def nodesbetween(self, roots=None, heads=None):
1498 def nodesbetween(self, roots=None, heads=None):
1488 """Return a topological path from 'roots' to 'heads'.
1499 """Return a topological path from 'roots' to 'heads'.
1489
1500
1490 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1501 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1491 topologically sorted list of all nodes N that satisfy both of
1502 topologically sorted list of all nodes N that satisfy both of
1492 these constraints:
1503 these constraints:
1493
1504
1494 1. N is a descendant of some node in 'roots'
1505 1. N is a descendant of some node in 'roots'
1495 2. N is an ancestor of some node in 'heads'
1506 2. N is an ancestor of some node in 'heads'
1496
1507
1497 Every node is considered to be both a descendant and an ancestor
1508 Every node is considered to be both a descendant and an ancestor
1498 of itself, so every reachable node in 'roots' and 'heads' will be
1509 of itself, so every reachable node in 'roots' and 'heads' will be
1499 included in 'nodes'.
1510 included in 'nodes'.
1500
1511
1501 'outroots' is the list of reachable nodes in 'roots', i.e., the
1512 'outroots' is the list of reachable nodes in 'roots', i.e., the
1502 subset of 'roots' that is returned in 'nodes'. Likewise,
1513 subset of 'roots' that is returned in 'nodes'. Likewise,
1503 'outheads' is the subset of 'heads' that is also in 'nodes'.
1514 'outheads' is the subset of 'heads' that is also in 'nodes'.
1504
1515
1505 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1516 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1506 unspecified, uses nullid as the only root. If 'heads' is
1517 unspecified, uses nullid as the only root. If 'heads' is
1507 unspecified, uses list of all of the revlog's heads."""
1518 unspecified, uses list of all of the revlog's heads."""
1508 nonodes = ([], [], [])
1519 nonodes = ([], [], [])
1509 if roots is not None:
1520 if roots is not None:
1510 roots = list(roots)
1521 roots = list(roots)
1511 if not roots:
1522 if not roots:
1512 return nonodes
1523 return nonodes
1513 lowestrev = min([self.rev(n) for n in roots])
1524 lowestrev = min([self.rev(n) for n in roots])
1514 else:
1525 else:
1515 roots = [nullid] # Everybody's a descendant of nullid
1526 roots = [nullid] # Everybody's a descendant of nullid
1516 lowestrev = nullrev
1527 lowestrev = nullrev
1517 if (lowestrev == nullrev) and (heads is None):
1528 if (lowestrev == nullrev) and (heads is None):
1518 # We want _all_ the nodes!
1529 # We want _all_ the nodes!
1519 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1530 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1520 if heads is None:
1531 if heads is None:
1521 # All nodes are ancestors, so the latest ancestor is the last
1532 # All nodes are ancestors, so the latest ancestor is the last
1522 # node.
1533 # node.
1523 highestrev = len(self) - 1
1534 highestrev = len(self) - 1
1524 # Set ancestors to None to signal that every node is an ancestor.
1535 # Set ancestors to None to signal that every node is an ancestor.
1525 ancestors = None
1536 ancestors = None
1526 # Set heads to an empty dictionary for later discovery of heads
1537 # Set heads to an empty dictionary for later discovery of heads
1527 heads = {}
1538 heads = {}
1528 else:
1539 else:
1529 heads = list(heads)
1540 heads = list(heads)
1530 if not heads:
1541 if not heads:
1531 return nonodes
1542 return nonodes
1532 ancestors = set()
1543 ancestors = set()
1533 # Turn heads into a dictionary so we can remove 'fake' heads.
1544 # Turn heads into a dictionary so we can remove 'fake' heads.
1534 # Also, later we will be using it to filter out the heads we can't
1545 # Also, later we will be using it to filter out the heads we can't
1535 # find from roots.
1546 # find from roots.
1536 heads = dict.fromkeys(heads, False)
1547 heads = dict.fromkeys(heads, False)
1537 # Start at the top and keep marking parents until we're done.
1548 # Start at the top and keep marking parents until we're done.
1538 nodestotag = set(heads)
1549 nodestotag = set(heads)
1539 # Remember where the top was so we can use it as a limit later.
1550 # Remember where the top was so we can use it as a limit later.
1540 highestrev = max([self.rev(n) for n in nodestotag])
1551 highestrev = max([self.rev(n) for n in nodestotag])
1541 while nodestotag:
1552 while nodestotag:
1542 # grab a node to tag
1553 # grab a node to tag
1543 n = nodestotag.pop()
1554 n = nodestotag.pop()
1544 # Never tag nullid
1555 # Never tag nullid
1545 if n == nullid:
1556 if n == nullid:
1546 continue
1557 continue
1547 # A node's revision number represents its place in a
1558 # A node's revision number represents its place in a
1548 # topologically sorted list of nodes.
1559 # topologically sorted list of nodes.
1549 r = self.rev(n)
1560 r = self.rev(n)
1550 if r >= lowestrev:
1561 if r >= lowestrev:
1551 if n not in ancestors:
1562 if n not in ancestors:
1552 # If we are possibly a descendant of one of the roots
1563 # If we are possibly a descendant of one of the roots
1553 # and we haven't already been marked as an ancestor
1564 # and we haven't already been marked as an ancestor
1554 ancestors.add(n) # Mark as ancestor
1565 ancestors.add(n) # Mark as ancestor
1555 # Add non-nullid parents to list of nodes to tag.
1566 # Add non-nullid parents to list of nodes to tag.
1556 nodestotag.update([p for p in self.parents(n) if
1567 nodestotag.update([p for p in self.parents(n) if
1557 p != nullid])
1568 p != nullid])
1558 elif n in heads: # We've seen it before, is it a fake head?
1569 elif n in heads: # We've seen it before, is it a fake head?
1559 # So it is, real heads should not be the ancestors of
1570 # So it is, real heads should not be the ancestors of
1560 # any other heads.
1571 # any other heads.
1561 heads.pop(n)
1572 heads.pop(n)
1562 if not ancestors:
1573 if not ancestors:
1563 return nonodes
1574 return nonodes
1564 # Now that we have our set of ancestors, we want to remove any
1575 # Now that we have our set of ancestors, we want to remove any
1565 # roots that are not ancestors.
1576 # roots that are not ancestors.
1566
1577
1567 # If one of the roots was nullid, everything is included anyway.
1578 # If one of the roots was nullid, everything is included anyway.
1568 if lowestrev > nullrev:
1579 if lowestrev > nullrev:
1569 # But, since we weren't, let's recompute the lowest rev to not
1580 # But, since we weren't, let's recompute the lowest rev to not
1570 # include roots that aren't ancestors.
1581 # include roots that aren't ancestors.
1571
1582
1572 # Filter out roots that aren't ancestors of heads
1583 # Filter out roots that aren't ancestors of heads
1573 roots = [root for root in roots if root in ancestors]
1584 roots = [root for root in roots if root in ancestors]
1574 # Recompute the lowest revision
1585 # Recompute the lowest revision
1575 if roots:
1586 if roots:
1576 lowestrev = min([self.rev(root) for root in roots])
1587 lowestrev = min([self.rev(root) for root in roots])
1577 else:
1588 else:
1578 # No more roots? Return empty list
1589 # No more roots? Return empty list
1579 return nonodes
1590 return nonodes
1580 else:
1591 else:
1581 # We are descending from nullid, and don't need to care about
1592 # We are descending from nullid, and don't need to care about
1582 # any other roots.
1593 # any other roots.
1583 lowestrev = nullrev
1594 lowestrev = nullrev
1584 roots = [nullid]
1595 roots = [nullid]
1585 # Transform our roots list into a set.
1596 # Transform our roots list into a set.
1586 descendants = set(roots)
1597 descendants = set(roots)
1587 # Also, keep the original roots so we can filter out roots that aren't
1598 # Also, keep the original roots so we can filter out roots that aren't
1588 # 'real' roots (i.e. are descended from other roots).
1599 # 'real' roots (i.e. are descended from other roots).
1589 roots = descendants.copy()
1600 roots = descendants.copy()
1590 # Our topologically sorted list of output nodes.
1601 # Our topologically sorted list of output nodes.
1591 orderedout = []
1602 orderedout = []
1592 # Don't start at nullid since we don't want nullid in our output list,
1603 # Don't start at nullid since we don't want nullid in our output list,
1593 # and if nullid shows up in descendants, empty parents will look like
1604 # and if nullid shows up in descendants, empty parents will look like
1594 # they're descendants.
1605 # they're descendants.
1595 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1606 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1596 n = self.node(r)
1607 n = self.node(r)
1597 isdescendant = False
1608 isdescendant = False
1598 if lowestrev == nullrev: # Everybody is a descendant of nullid
1609 if lowestrev == nullrev: # Everybody is a descendant of nullid
1599 isdescendant = True
1610 isdescendant = True
1600 elif n in descendants:
1611 elif n in descendants:
1601 # n is already a descendant
1612 # n is already a descendant
1602 isdescendant = True
1613 isdescendant = True
1603 # This check only needs to be done here because all the roots
1614 # This check only needs to be done here because all the roots
1604 # will start being marked is descendants before the loop.
1615 # will start being marked is descendants before the loop.
1605 if n in roots:
1616 if n in roots:
1606 # If n was a root, check if it's a 'real' root.
1617 # If n was a root, check if it's a 'real' root.
1607 p = tuple(self.parents(n))
1618 p = tuple(self.parents(n))
1608 # If any of its parents are descendants, it's not a root.
1619 # If any of its parents are descendants, it's not a root.
1609 if (p[0] in descendants) or (p[1] in descendants):
1620 if (p[0] in descendants) or (p[1] in descendants):
1610 roots.remove(n)
1621 roots.remove(n)
1611 else:
1622 else:
1612 p = tuple(self.parents(n))
1623 p = tuple(self.parents(n))
1613 # A node is a descendant if either of its parents are
1624 # A node is a descendant if either of its parents are
1614 # descendants. (We seeded the dependents list with the roots
1625 # descendants. (We seeded the dependents list with the roots
1615 # up there, remember?)
1626 # up there, remember?)
1616 if (p[0] in descendants) or (p[1] in descendants):
1627 if (p[0] in descendants) or (p[1] in descendants):
1617 descendants.add(n)
1628 descendants.add(n)
1618 isdescendant = True
1629 isdescendant = True
1619 if isdescendant and ((ancestors is None) or (n in ancestors)):
1630 if isdescendant and ((ancestors is None) or (n in ancestors)):
1620 # Only include nodes that are both descendants and ancestors.
1631 # Only include nodes that are both descendants and ancestors.
1621 orderedout.append(n)
1632 orderedout.append(n)
1622 if (ancestors is not None) and (n in heads):
1633 if (ancestors is not None) and (n in heads):
1623 # We're trying to figure out which heads are reachable
1634 # We're trying to figure out which heads are reachable
1624 # from roots.
1635 # from roots.
1625 # Mark this head as having been reached
1636 # Mark this head as having been reached
1626 heads[n] = True
1637 heads[n] = True
1627 elif ancestors is None:
1638 elif ancestors is None:
1628 # Otherwise, we're trying to discover the heads.
1639 # Otherwise, we're trying to discover the heads.
1629 # Assume this is a head because if it isn't, the next step
1640 # Assume this is a head because if it isn't, the next step
1630 # will eventually remove it.
1641 # will eventually remove it.
1631 heads[n] = True
1642 heads[n] = True
1632 # But, obviously its parents aren't.
1643 # But, obviously its parents aren't.
1633 for p in self.parents(n):
1644 for p in self.parents(n):
1634 heads.pop(p, None)
1645 heads.pop(p, None)
1635 heads = [head for head, flag in heads.iteritems() if flag]
1646 heads = [head for head, flag in heads.iteritems() if flag]
1636 roots = list(roots)
1647 roots = list(roots)
1637 assert orderedout
1648 assert orderedout
1638 assert roots
1649 assert roots
1639 assert heads
1650 assert heads
1640 return (orderedout, roots, heads)
1651 return (orderedout, roots, heads)
1641
1652
1642 def headrevs(self):
1653 def headrevs(self):
1643 try:
1654 try:
1644 return self.index.headrevs()
1655 return self.index.headrevs()
1645 except AttributeError:
1656 except AttributeError:
1646 return self._headrevs()
1657 return self._headrevs()
1647
1658
1648 def computephases(self, roots):
1659 def computephases(self, roots):
1649 return self.index.computephasesmapsets(roots)
1660 return self.index.computephasesmapsets(roots)
1650
1661
1651 def _headrevs(self):
1662 def _headrevs(self):
1652 count = len(self)
1663 count = len(self)
1653 if not count:
1664 if not count:
1654 return [nullrev]
1665 return [nullrev]
1655 # we won't iter over filtered rev so nobody is a head at start
1666 # we won't iter over filtered rev so nobody is a head at start
1656 ishead = [0] * (count + 1)
1667 ishead = [0] * (count + 1)
1657 index = self.index
1668 index = self.index
1658 for r in self:
1669 for r in self:
1659 ishead[r] = 1 # I may be an head
1670 ishead[r] = 1 # I may be an head
1660 e = index[r]
1671 e = index[r]
1661 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1672 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1662 return [r for r, val in enumerate(ishead) if val]
1673 return [r for r, val in enumerate(ishead) if val]
1663
1674
1664 def heads(self, start=None, stop=None):
1675 def heads(self, start=None, stop=None):
1665 """return the list of all nodes that have no children
1676 """return the list of all nodes that have no children
1666
1677
1667 if start is specified, only heads that are descendants of
1678 if start is specified, only heads that are descendants of
1668 start will be returned
1679 start will be returned
1669 if stop is specified, it will consider all the revs from stop
1680 if stop is specified, it will consider all the revs from stop
1670 as if they had no children
1681 as if they had no children
1671 """
1682 """
1672 if start is None and stop is None:
1683 if start is None and stop is None:
1673 if not len(self):
1684 if not len(self):
1674 return [nullid]
1685 return [nullid]
1675 return [self.node(r) for r in self.headrevs()]
1686 return [self.node(r) for r in self.headrevs()]
1676
1687
1677 if start is None:
1688 if start is None:
1678 start = nullid
1689 start = nullid
1679 if stop is None:
1690 if stop is None:
1680 stop = []
1691 stop = []
1681 stoprevs = set([self.rev(n) for n in stop])
1692 stoprevs = set([self.rev(n) for n in stop])
1682 startrev = self.rev(start)
1693 startrev = self.rev(start)
1683 reachable = {startrev}
1694 reachable = {startrev}
1684 heads = {startrev}
1695 heads = {startrev}
1685
1696
1686 parentrevs = self.parentrevs
1697 parentrevs = self.parentrevs
1687 for r in self.revs(start=startrev + 1):
1698 for r in self.revs(start=startrev + 1):
1688 for p in parentrevs(r):
1699 for p in parentrevs(r):
1689 if p in reachable:
1700 if p in reachable:
1690 if r not in stoprevs:
1701 if r not in stoprevs:
1691 reachable.add(r)
1702 reachable.add(r)
1692 heads.add(r)
1703 heads.add(r)
1693 if p in heads and p not in stoprevs:
1704 if p in heads and p not in stoprevs:
1694 heads.remove(p)
1705 heads.remove(p)
1695
1706
1696 return [self.node(r) for r in heads]
1707 return [self.node(r) for r in heads]
1697
1708
1698 def children(self, node):
1709 def children(self, node):
1699 """find the children of a given node"""
1710 """find the children of a given node"""
1700 c = []
1711 c = []
1701 p = self.rev(node)
1712 p = self.rev(node)
1702 for r in self.revs(start=p + 1):
1713 for r in self.revs(start=p + 1):
1703 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1714 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1704 if prevs:
1715 if prevs:
1705 for pr in prevs:
1716 for pr in prevs:
1706 if pr == p:
1717 if pr == p:
1707 c.append(self.node(r))
1718 c.append(self.node(r))
1708 elif p == nullrev:
1719 elif p == nullrev:
1709 c.append(self.node(r))
1720 c.append(self.node(r))
1710 return c
1721 return c
1711
1722
1712 def commonancestorsheads(self, a, b):
1723 def commonancestorsheads(self, a, b):
1713 """calculate all the heads of the common ancestors of nodes a and b"""
1724 """calculate all the heads of the common ancestors of nodes a and b"""
1714 a, b = self.rev(a), self.rev(b)
1725 a, b = self.rev(a), self.rev(b)
1715 ancs = self._commonancestorsheads(a, b)
1726 ancs = self._commonancestorsheads(a, b)
1716 return pycompat.maplist(self.node, ancs)
1727 return pycompat.maplist(self.node, ancs)
1717
1728
1718 def _commonancestorsheads(self, *revs):
1729 def _commonancestorsheads(self, *revs):
1719 """calculate all the heads of the common ancestors of revs"""
1730 """calculate all the heads of the common ancestors of revs"""
1720 try:
1731 try:
1721 ancs = self.index.commonancestorsheads(*revs)
1732 ancs = self.index.commonancestorsheads(*revs)
1722 except (AttributeError, OverflowError): # C implementation failed
1733 except (AttributeError, OverflowError): # C implementation failed
1723 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1734 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1724 return ancs
1735 return ancs
1725
1736
1726 def isancestor(self, a, b):
1737 def isancestor(self, a, b):
1727 """return True if node a is an ancestor of node b
1738 """return True if node a is an ancestor of node b
1728
1739
1729 A revision is considered an ancestor of itself."""
1740 A revision is considered an ancestor of itself."""
1730 a, b = self.rev(a), self.rev(b)
1741 a, b = self.rev(a), self.rev(b)
1731 return self.isancestorrev(a, b)
1742 return self.isancestorrev(a, b)
1732
1743
1733 def isancestorrev(self, a, b):
1744 def isancestorrev(self, a, b):
1734 """return True if revision a is an ancestor of revision b
1745 """return True if revision a is an ancestor of revision b
1735
1746
1736 A revision is considered an ancestor of itself.
1747 A revision is considered an ancestor of itself.
1737
1748
1738 The implementation of this is trivial but the use of
1749 The implementation of this is trivial but the use of
1739 commonancestorsheads is not."""
1750 commonancestorsheads is not."""
1740 if a == nullrev:
1751 if a == nullrev:
1741 return True
1752 return True
1742 elif a == b:
1753 elif a == b:
1743 return True
1754 return True
1744 elif a > b:
1755 elif a > b:
1745 return False
1756 return False
1746 return a in self._commonancestorsheads(a, b)
1757 return a in self._commonancestorsheads(a, b)
1747
1758
1748 def ancestor(self, a, b):
1759 def ancestor(self, a, b):
1749 """calculate the "best" common ancestor of nodes a and b"""
1760 """calculate the "best" common ancestor of nodes a and b"""
1750
1761
1751 a, b = self.rev(a), self.rev(b)
1762 a, b = self.rev(a), self.rev(b)
1752 try:
1763 try:
1753 ancs = self.index.ancestors(a, b)
1764 ancs = self.index.ancestors(a, b)
1754 except (AttributeError, OverflowError):
1765 except (AttributeError, OverflowError):
1755 ancs = ancestor.ancestors(self.parentrevs, a, b)
1766 ancs = ancestor.ancestors(self.parentrevs, a, b)
1756 if ancs:
1767 if ancs:
1757 # choose a consistent winner when there's a tie
1768 # choose a consistent winner when there's a tie
1758 return min(map(self.node, ancs))
1769 return min(map(self.node, ancs))
1759 return nullid
1770 return nullid
1760
1771
1761 def _match(self, id):
1772 def _match(self, id):
1762 if isinstance(id, int):
1773 if isinstance(id, int):
1763 # rev
1774 # rev
1764 return self.node(id)
1775 return self.node(id)
1765 if len(id) == 20:
1776 if len(id) == 20:
1766 # possibly a binary node
1777 # possibly a binary node
1767 # odds of a binary node being all hex in ASCII are 1 in 10**25
1778 # odds of a binary node being all hex in ASCII are 1 in 10**25
1768 try:
1779 try:
1769 node = id
1780 node = id
1770 self.rev(node) # quick search the index
1781 self.rev(node) # quick search the index
1771 return node
1782 return node
1772 except LookupError:
1783 except LookupError:
1773 pass # may be partial hex id
1784 pass # may be partial hex id
1774 try:
1785 try:
1775 # str(rev)
1786 # str(rev)
1776 rev = int(id)
1787 rev = int(id)
1777 if "%d" % rev != id:
1788 if "%d" % rev != id:
1778 raise ValueError
1789 raise ValueError
1779 if rev < 0:
1790 if rev < 0:
1780 rev = len(self) + rev
1791 rev = len(self) + rev
1781 if rev < 0 or rev >= len(self):
1792 if rev < 0 or rev >= len(self):
1782 raise ValueError
1793 raise ValueError
1783 return self.node(rev)
1794 return self.node(rev)
1784 except (ValueError, OverflowError):
1795 except (ValueError, OverflowError):
1785 pass
1796 pass
1786 if len(id) == 40:
1797 if len(id) == 40:
1787 try:
1798 try:
1788 # a full hex nodeid?
1799 # a full hex nodeid?
1789 node = bin(id)
1800 node = bin(id)
1790 self.rev(node)
1801 self.rev(node)
1791 return node
1802 return node
1792 except (TypeError, LookupError):
1803 except (TypeError, LookupError):
1793 pass
1804 pass
1794
1805
1795 def _partialmatch(self, id):
1806 def _partialmatch(self, id):
1796 # we don't care wdirfilenodeids as they should be always full hash
1807 # we don't care wdirfilenodeids as they should be always full hash
1797 maybewdir = wdirhex.startswith(id)
1808 maybewdir = wdirhex.startswith(id)
1798 try:
1809 try:
1799 partial = self.index.partialmatch(id)
1810 partial = self.index.partialmatch(id)
1800 if partial and self.hasnode(partial):
1811 if partial and self.hasnode(partial):
1801 if maybewdir:
1812 if maybewdir:
1802 # single 'ff...' match in radix tree, ambiguous with wdir
1813 # single 'ff...' match in radix tree, ambiguous with wdir
1803 raise RevlogError
1814 raise RevlogError
1804 return partial
1815 return partial
1805 if maybewdir:
1816 if maybewdir:
1806 # no 'ff...' match in radix tree, wdir identified
1817 # no 'ff...' match in radix tree, wdir identified
1807 raise error.WdirUnsupported
1818 raise error.WdirUnsupported
1808 return None
1819 return None
1809 except RevlogError:
1820 except RevlogError:
1810 # parsers.c radix tree lookup gave multiple matches
1821 # parsers.c radix tree lookup gave multiple matches
1811 # fast path: for unfiltered changelog, radix tree is accurate
1822 # fast path: for unfiltered changelog, radix tree is accurate
1812 if not getattr(self, 'filteredrevs', None):
1823 if not getattr(self, 'filteredrevs', None):
1813 raise AmbiguousPrefixLookupError(id, self.indexfile,
1824 raise AmbiguousPrefixLookupError(id, self.indexfile,
1814 _('ambiguous identifier'))
1825 _('ambiguous identifier'))
1815 # fall through to slow path that filters hidden revisions
1826 # fall through to slow path that filters hidden revisions
1816 except (AttributeError, ValueError):
1827 except (AttributeError, ValueError):
1817 # we are pure python, or key was too short to search radix tree
1828 # we are pure python, or key was too short to search radix tree
1818 pass
1829 pass
1819
1830
1820 if id in self._pcache:
1831 if id in self._pcache:
1821 return self._pcache[id]
1832 return self._pcache[id]
1822
1833
1823 if len(id) <= 40:
1834 if len(id) <= 40:
1824 try:
1835 try:
1825 # hex(node)[:...]
1836 # hex(node)[:...]
1826 l = len(id) // 2 # grab an even number of digits
1837 l = len(id) // 2 # grab an even number of digits
1827 prefix = bin(id[:l * 2])
1838 prefix = bin(id[:l * 2])
1828 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1839 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1829 nl = [n for n in nl if hex(n).startswith(id) and
1840 nl = [n for n in nl if hex(n).startswith(id) and
1830 self.hasnode(n)]
1841 self.hasnode(n)]
1831 if len(nl) > 0:
1842 if len(nl) > 0:
1832 if len(nl) == 1 and not maybewdir:
1843 if len(nl) == 1 and not maybewdir:
1833 self._pcache[id] = nl[0]
1844 self._pcache[id] = nl[0]
1834 return nl[0]
1845 return nl[0]
1835 raise AmbiguousPrefixLookupError(id, self.indexfile,
1846 raise AmbiguousPrefixLookupError(id, self.indexfile,
1836 _('ambiguous identifier'))
1847 _('ambiguous identifier'))
1837 if maybewdir:
1848 if maybewdir:
1838 raise error.WdirUnsupported
1849 raise error.WdirUnsupported
1839 return None
1850 return None
1840 except TypeError:
1851 except TypeError:
1841 pass
1852 pass
1842
1853
1843 def lookup(self, id):
1854 def lookup(self, id):
1844 """locate a node based on:
1855 """locate a node based on:
1845 - revision number or str(revision number)
1856 - revision number or str(revision number)
1846 - nodeid or subset of hex nodeid
1857 - nodeid or subset of hex nodeid
1847 """
1858 """
1848 n = self._match(id)
1859 n = self._match(id)
1849 if n is not None:
1860 if n is not None:
1850 return n
1861 return n
1851 n = self._partialmatch(id)
1862 n = self._partialmatch(id)
1852 if n:
1863 if n:
1853 return n
1864 return n
1854
1865
1855 raise LookupError(id, self.indexfile, _('no match found'))
1866 raise LookupError(id, self.indexfile, _('no match found'))
1856
1867
1857 def shortest(self, node, minlength=1):
1868 def shortest(self, node, minlength=1):
1858 """Find the shortest unambiguous prefix that matches node."""
1869 """Find the shortest unambiguous prefix that matches node."""
1859 def isvalid(prefix):
1870 def isvalid(prefix):
1860 try:
1871 try:
1861 node = self._partialmatch(prefix)
1872 node = self._partialmatch(prefix)
1862 except error.RevlogError:
1873 except error.RevlogError:
1863 return False
1874 return False
1864 except error.WdirUnsupported:
1875 except error.WdirUnsupported:
1865 # single 'ff...' match
1876 # single 'ff...' match
1866 return True
1877 return True
1867 if node is None:
1878 if node is None:
1868 raise LookupError(node, self.indexfile, _('no node'))
1879 raise LookupError(node, self.indexfile, _('no node'))
1869 return True
1880 return True
1870
1881
1871 def maybewdir(prefix):
1882 def maybewdir(prefix):
1872 return all(c == 'f' for c in prefix)
1883 return all(c == 'f' for c in prefix)
1873
1884
1874 hexnode = hex(node)
1885 hexnode = hex(node)
1875
1886
1876 def disambiguate(hexnode, minlength):
1887 def disambiguate(hexnode, minlength):
1877 """Disambiguate against wdirid."""
1888 """Disambiguate against wdirid."""
1878 for length in range(minlength, 41):
1889 for length in range(minlength, 41):
1879 prefix = hexnode[:length]
1890 prefix = hexnode[:length]
1880 if not maybewdir(prefix):
1891 if not maybewdir(prefix):
1881 return prefix
1892 return prefix
1882
1893
1883 if not getattr(self, 'filteredrevs', None):
1894 if not getattr(self, 'filteredrevs', None):
1884 try:
1895 try:
1885 length = max(self.index.shortest(node), minlength)
1896 length = max(self.index.shortest(node), minlength)
1886 return disambiguate(hexnode, length)
1897 return disambiguate(hexnode, length)
1887 except RevlogError:
1898 except RevlogError:
1888 if node != wdirid:
1899 if node != wdirid:
1889 raise LookupError(node, self.indexfile, _('no node'))
1900 raise LookupError(node, self.indexfile, _('no node'))
1890 except AttributeError:
1901 except AttributeError:
1891 # Fall through to pure code
1902 # Fall through to pure code
1892 pass
1903 pass
1893
1904
1894 if node == wdirid:
1905 if node == wdirid:
1895 for length in range(minlength, 41):
1906 for length in range(minlength, 41):
1896 prefix = hexnode[:length]
1907 prefix = hexnode[:length]
1897 if isvalid(prefix):
1908 if isvalid(prefix):
1898 return prefix
1909 return prefix
1899
1910
1900 for length in range(minlength, 41):
1911 for length in range(minlength, 41):
1901 prefix = hexnode[:length]
1912 prefix = hexnode[:length]
1902 if isvalid(prefix):
1913 if isvalid(prefix):
1903 return disambiguate(hexnode, length)
1914 return disambiguate(hexnode, length)
1904
1915
1905 def cmp(self, node, text):
1916 def cmp(self, node, text):
1906 """compare text with a given file revision
1917 """compare text with a given file revision
1907
1918
1908 returns True if text is different than what is stored.
1919 returns True if text is different than what is stored.
1909 """
1920 """
1910 p1, p2 = self.parents(node)
1921 p1, p2 = self.parents(node)
1911 return hash(text, p1, p2) != node
1922 return hash(text, p1, p2) != node
1912
1923
1913 def _cachesegment(self, offset, data):
1924 def _cachesegment(self, offset, data):
1914 """Add a segment to the revlog cache.
1925 """Add a segment to the revlog cache.
1915
1926
1916 Accepts an absolute offset and the data that is at that location.
1927 Accepts an absolute offset and the data that is at that location.
1917 """
1928 """
1918 o, d = self._chunkcache
1929 o, d = self._chunkcache
1919 # try to add to existing cache
1930 # try to add to existing cache
1920 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1931 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1921 self._chunkcache = o, d + data
1932 self._chunkcache = o, d + data
1922 else:
1933 else:
1923 self._chunkcache = offset, data
1934 self._chunkcache = offset, data
1924
1935
1925 def _readsegment(self, offset, length, df=None):
1936 def _readsegment(self, offset, length, df=None):
1926 """Load a segment of raw data from the revlog.
1937 """Load a segment of raw data from the revlog.
1927
1938
1928 Accepts an absolute offset, length to read, and an optional existing
1939 Accepts an absolute offset, length to read, and an optional existing
1929 file handle to read from.
1940 file handle to read from.
1930
1941
1931 If an existing file handle is passed, it will be seeked and the
1942 If an existing file handle is passed, it will be seeked and the
1932 original seek position will NOT be restored.
1943 original seek position will NOT be restored.
1933
1944
1934 Returns a str or buffer of raw byte data.
1945 Returns a str or buffer of raw byte data.
1935 """
1946 """
1936 # Cache data both forward and backward around the requested
1947 # Cache data both forward and backward around the requested
1937 # data, in a fixed size window. This helps speed up operations
1948 # data, in a fixed size window. This helps speed up operations
1938 # involving reading the revlog backwards.
1949 # involving reading the revlog backwards.
1939 cachesize = self._chunkcachesize
1950 cachesize = self._chunkcachesize
1940 realoffset = offset & ~(cachesize - 1)
1951 realoffset = offset & ~(cachesize - 1)
1941 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1952 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1942 - realoffset)
1953 - realoffset)
1943 with self._datareadfp(df) as df:
1954 with self._datareadfp(df) as df:
1944 df.seek(realoffset)
1955 df.seek(realoffset)
1945 d = df.read(reallength)
1956 d = df.read(reallength)
1946 self._cachesegment(realoffset, d)
1957 self._cachesegment(realoffset, d)
1947 if offset != realoffset or reallength != length:
1958 if offset != realoffset or reallength != length:
1948 return util.buffer(d, offset - realoffset, length)
1959 return util.buffer(d, offset - realoffset, length)
1949 return d
1960 return d
1950
1961
1951 def _getsegment(self, offset, length, df=None):
1962 def _getsegment(self, offset, length, df=None):
1952 """Obtain a segment of raw data from the revlog.
1963 """Obtain a segment of raw data from the revlog.
1953
1964
1954 Accepts an absolute offset, length of bytes to obtain, and an
1965 Accepts an absolute offset, length of bytes to obtain, and an
1955 optional file handle to the already-opened revlog. If the file
1966 optional file handle to the already-opened revlog. If the file
1956 handle is used, it's original seek position will not be preserved.
1967 handle is used, it's original seek position will not be preserved.
1957
1968
1958 Requests for data may be returned from a cache.
1969 Requests for data may be returned from a cache.
1959
1970
1960 Returns a str or a buffer instance of raw byte data.
1971 Returns a str or a buffer instance of raw byte data.
1961 """
1972 """
1962 o, d = self._chunkcache
1973 o, d = self._chunkcache
1963 l = len(d)
1974 l = len(d)
1964
1975
1965 # is it in the cache?
1976 # is it in the cache?
1966 cachestart = offset - o
1977 cachestart = offset - o
1967 cacheend = cachestart + length
1978 cacheend = cachestart + length
1968 if cachestart >= 0 and cacheend <= l:
1979 if cachestart >= 0 and cacheend <= l:
1969 if cachestart == 0 and cacheend == l:
1980 if cachestart == 0 and cacheend == l:
1970 return d # avoid a copy
1981 return d # avoid a copy
1971 return util.buffer(d, cachestart, cacheend - cachestart)
1982 return util.buffer(d, cachestart, cacheend - cachestart)
1972
1983
1973 return self._readsegment(offset, length, df=df)
1984 return self._readsegment(offset, length, df=df)
1974
1985
1975 def _getsegmentforrevs(self, startrev, endrev, df=None):
1986 def _getsegmentforrevs(self, startrev, endrev, df=None):
1976 """Obtain a segment of raw data corresponding to a range of revisions.
1987 """Obtain a segment of raw data corresponding to a range of revisions.
1977
1988
1978 Accepts the start and end revisions and an optional already-open
1989 Accepts the start and end revisions and an optional already-open
1979 file handle to be used for reading. If the file handle is read, its
1990 file handle to be used for reading. If the file handle is read, its
1980 seek position will not be preserved.
1991 seek position will not be preserved.
1981
1992
1982 Requests for data may be satisfied by a cache.
1993 Requests for data may be satisfied by a cache.
1983
1994
1984 Returns a 2-tuple of (offset, data) for the requested range of
1995 Returns a 2-tuple of (offset, data) for the requested range of
1985 revisions. Offset is the integer offset from the beginning of the
1996 revisions. Offset is the integer offset from the beginning of the
1986 revlog and data is a str or buffer of the raw byte data.
1997 revlog and data is a str or buffer of the raw byte data.
1987
1998
1988 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1999 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1989 to determine where each revision's data begins and ends.
2000 to determine where each revision's data begins and ends.
1990 """
2001 """
1991 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
2002 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1992 # (functions are expensive).
2003 # (functions are expensive).
1993 index = self.index
2004 index = self.index
1994 istart = index[startrev]
2005 istart = index[startrev]
1995 start = int(istart[0] >> 16)
2006 start = int(istart[0] >> 16)
1996 if startrev == endrev:
2007 if startrev == endrev:
1997 end = start + istart[1]
2008 end = start + istart[1]
1998 else:
2009 else:
1999 iend = index[endrev]
2010 iend = index[endrev]
2000 end = int(iend[0] >> 16) + iend[1]
2011 end = int(iend[0] >> 16) + iend[1]
2001
2012
2002 if self._inline:
2013 if self._inline:
2003 start += (startrev + 1) * self._io.size
2014 start += (startrev + 1) * self._io.size
2004 end += (endrev + 1) * self._io.size
2015 end += (endrev + 1) * self._io.size
2005 length = end - start
2016 length = end - start
2006
2017
2007 return start, self._getsegment(start, length, df=df)
2018 return start, self._getsegment(start, length, df=df)
2008
2019
2009 def _chunk(self, rev, df=None):
2020 def _chunk(self, rev, df=None):
2010 """Obtain a single decompressed chunk for a revision.
2021 """Obtain a single decompressed chunk for a revision.
2011
2022
2012 Accepts an integer revision and an optional already-open file handle
2023 Accepts an integer revision and an optional already-open file handle
2013 to be used for reading. If used, the seek position of the file will not
2024 to be used for reading. If used, the seek position of the file will not
2014 be preserved.
2025 be preserved.
2015
2026
2016 Returns a str holding uncompressed data for the requested revision.
2027 Returns a str holding uncompressed data for the requested revision.
2017 """
2028 """
2018 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
2029 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
2019
2030
2020 def _chunks(self, revs, df=None, targetsize=None):
2031 def _chunks(self, revs, df=None, targetsize=None):
2021 """Obtain decompressed chunks for the specified revisions.
2032 """Obtain decompressed chunks for the specified revisions.
2022
2033
2023 Accepts an iterable of numeric revisions that are assumed to be in
2034 Accepts an iterable of numeric revisions that are assumed to be in
2024 ascending order. Also accepts an optional already-open file handle
2035 ascending order. Also accepts an optional already-open file handle
2025 to be used for reading. If used, the seek position of the file will
2036 to be used for reading. If used, the seek position of the file will
2026 not be preserved.
2037 not be preserved.
2027
2038
2028 This function is similar to calling ``self._chunk()`` multiple times,
2039 This function is similar to calling ``self._chunk()`` multiple times,
2029 but is faster.
2040 but is faster.
2030
2041
2031 Returns a list with decompressed data for each requested revision.
2042 Returns a list with decompressed data for each requested revision.
2032 """
2043 """
2033 if not revs:
2044 if not revs:
2034 return []
2045 return []
2035 start = self.start
2046 start = self.start
2036 length = self.length
2047 length = self.length
2037 inline = self._inline
2048 inline = self._inline
2038 iosize = self._io.size
2049 iosize = self._io.size
2039 buffer = util.buffer
2050 buffer = util.buffer
2040
2051
2041 l = []
2052 l = []
2042 ladd = l.append
2053 ladd = l.append
2043
2054
2044 if not self._withsparseread:
2055 if not self._withsparseread:
2045 slicedchunks = (revs,)
2056 slicedchunks = (revs,)
2046 else:
2057 else:
2047 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2058 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2048
2059
2049 for revschunk in slicedchunks:
2060 for revschunk in slicedchunks:
2050 firstrev = revschunk[0]
2061 firstrev = revschunk[0]
2051 # Skip trailing revisions with empty diff
2062 # Skip trailing revisions with empty diff
2052 for lastrev in revschunk[::-1]:
2063 for lastrev in revschunk[::-1]:
2053 if length(lastrev) != 0:
2064 if length(lastrev) != 0:
2054 break
2065 break
2055
2066
2056 try:
2067 try:
2057 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2068 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2058 except OverflowError:
2069 except OverflowError:
2059 # issue4215 - we can't cache a run of chunks greater than
2070 # issue4215 - we can't cache a run of chunks greater than
2060 # 2G on Windows
2071 # 2G on Windows
2061 return [self._chunk(rev, df=df) for rev in revschunk]
2072 return [self._chunk(rev, df=df) for rev in revschunk]
2062
2073
2063 decomp = self.decompress
2074 decomp = self.decompress
2064 for rev in revschunk:
2075 for rev in revschunk:
2065 chunkstart = start(rev)
2076 chunkstart = start(rev)
2066 if inline:
2077 if inline:
2067 chunkstart += (rev + 1) * iosize
2078 chunkstart += (rev + 1) * iosize
2068 chunklength = length(rev)
2079 chunklength = length(rev)
2069 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2080 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2070
2081
2071 return l
2082 return l
2072
2083
2073 def _chunkclear(self):
2084 def _chunkclear(self):
2074 """Clear the raw chunk cache."""
2085 """Clear the raw chunk cache."""
2075 self._chunkcache = (0, '')
2086 self._chunkcache = (0, '')
2076
2087
2077 def deltaparent(self, rev):
2088 def deltaparent(self, rev):
2078 """return deltaparent of the given revision"""
2089 """return deltaparent of the given revision"""
2079 base = self.index[rev][3]
2090 base = self.index[rev][3]
2080 if base == rev:
2091 if base == rev:
2081 return nullrev
2092 return nullrev
2082 elif self._generaldelta:
2093 elif self._generaldelta:
2083 return base
2094 return base
2084 else:
2095 else:
2085 return rev - 1
2096 return rev - 1
2086
2097
2087 def revdiff(self, rev1, rev2):
2098 def revdiff(self, rev1, rev2):
2088 """return or calculate a delta between two revisions
2099 """return or calculate a delta between two revisions
2089
2100
2090 The delta calculated is in binary form and is intended to be written to
2101 The delta calculated is in binary form and is intended to be written to
2091 revlog data directly. So this function needs raw revision data.
2102 revlog data directly. So this function needs raw revision data.
2092 """
2103 """
2093 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2104 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2094 return bytes(self._chunk(rev2))
2105 return bytes(self._chunk(rev2))
2095
2106
2096 return mdiff.textdiff(self.revision(rev1, raw=True),
2107 return mdiff.textdiff(self.revision(rev1, raw=True),
2097 self.revision(rev2, raw=True))
2108 self.revision(rev2, raw=True))
2098
2109
2099 def revision(self, nodeorrev, _df=None, raw=False):
2110 def revision(self, nodeorrev, _df=None, raw=False):
2100 """return an uncompressed revision of a given node or revision
2111 """return an uncompressed revision of a given node or revision
2101 number.
2112 number.
2102
2113
2103 _df - an existing file handle to read from. (internal-only)
2114 _df - an existing file handle to read from. (internal-only)
2104 raw - an optional argument specifying if the revision data is to be
2115 raw - an optional argument specifying if the revision data is to be
2105 treated as raw data when applying flag transforms. 'raw' should be set
2116 treated as raw data when applying flag transforms. 'raw' should be set
2106 to True when generating changegroups or in debug commands.
2117 to True when generating changegroups or in debug commands.
2107 """
2118 """
2108 if isinstance(nodeorrev, int):
2119 if isinstance(nodeorrev, int):
2109 rev = nodeorrev
2120 rev = nodeorrev
2110 node = self.node(rev)
2121 node = self.node(rev)
2111 else:
2122 else:
2112 node = nodeorrev
2123 node = nodeorrev
2113 rev = None
2124 rev = None
2114
2125
2115 cachedrev = None
2126 cachedrev = None
2116 flags = None
2127 flags = None
2117 rawtext = None
2128 rawtext = None
2118 if node == nullid:
2129 if node == nullid:
2119 return ""
2130 return ""
2120 if self._cache:
2131 if self._cache:
2121 if self._cache[0] == node:
2132 if self._cache[0] == node:
2122 # _cache only stores rawtext
2133 # _cache only stores rawtext
2123 if raw:
2134 if raw:
2124 return self._cache[2]
2135 return self._cache[2]
2125 # duplicated, but good for perf
2136 # duplicated, but good for perf
2126 if rev is None:
2137 if rev is None:
2127 rev = self.rev(node)
2138 rev = self.rev(node)
2128 if flags is None:
2139 if flags is None:
2129 flags = self.flags(rev)
2140 flags = self.flags(rev)
2130 # no extra flags set, no flag processor runs, text = rawtext
2141 # no extra flags set, no flag processor runs, text = rawtext
2131 if flags == REVIDX_DEFAULT_FLAGS:
2142 if flags == REVIDX_DEFAULT_FLAGS:
2132 return self._cache[2]
2143 return self._cache[2]
2133 # rawtext is reusable. need to run flag processor
2144 # rawtext is reusable. need to run flag processor
2134 rawtext = self._cache[2]
2145 rawtext = self._cache[2]
2135
2146
2136 cachedrev = self._cache[1]
2147 cachedrev = self._cache[1]
2137
2148
2138 # look up what we need to read
2149 # look up what we need to read
2139 if rawtext is None:
2150 if rawtext is None:
2140 if rev is None:
2151 if rev is None:
2141 rev = self.rev(node)
2152 rev = self.rev(node)
2142
2153
2143 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2154 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2144 if stopped:
2155 if stopped:
2145 rawtext = self._cache[2]
2156 rawtext = self._cache[2]
2146
2157
2147 # drop cache to save memory
2158 # drop cache to save memory
2148 self._cache = None
2159 self._cache = None
2149
2160
2150 targetsize = None
2161 targetsize = None
2151 rawsize = self.index[rev][2]
2162 rawsize = self.index[rev][2]
2152 if 0 <= rawsize:
2163 if 0 <= rawsize:
2153 targetsize = 4 * rawsize
2164 targetsize = 4 * rawsize
2154
2165
2155 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2166 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2156 if rawtext is None:
2167 if rawtext is None:
2157 rawtext = bytes(bins[0])
2168 rawtext = bytes(bins[0])
2158 bins = bins[1:]
2169 bins = bins[1:]
2159
2170
2160 rawtext = mdiff.patches(rawtext, bins)
2171 rawtext = mdiff.patches(rawtext, bins)
2161 self._cache = (node, rev, rawtext)
2172 self._cache = (node, rev, rawtext)
2162
2173
2163 if flags is None:
2174 if flags is None:
2164 if rev is None:
2175 if rev is None:
2165 rev = self.rev(node)
2176 rev = self.rev(node)
2166 flags = self.flags(rev)
2177 flags = self.flags(rev)
2167
2178
2168 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2179 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2169 if validatehash:
2180 if validatehash:
2170 self.checkhash(text, node, rev=rev)
2181 self.checkhash(text, node, rev=rev)
2171
2182
2172 return text
2183 return text
2173
2184
2174 def hash(self, text, p1, p2):
2185 def hash(self, text, p1, p2):
2175 """Compute a node hash.
2186 """Compute a node hash.
2176
2187
2177 Available as a function so that subclasses can replace the hash
2188 Available as a function so that subclasses can replace the hash
2178 as needed.
2189 as needed.
2179 """
2190 """
2180 return hash(text, p1, p2)
2191 return hash(text, p1, p2)
2181
2192
2182 def _processflags(self, text, flags, operation, raw=False):
2193 def _processflags(self, text, flags, operation, raw=False):
2183 """Inspect revision data flags and applies transforms defined by
2194 """Inspect revision data flags and applies transforms defined by
2184 registered flag processors.
2195 registered flag processors.
2185
2196
2186 ``text`` - the revision data to process
2197 ``text`` - the revision data to process
2187 ``flags`` - the revision flags
2198 ``flags`` - the revision flags
2188 ``operation`` - the operation being performed (read or write)
2199 ``operation`` - the operation being performed (read or write)
2189 ``raw`` - an optional argument describing if the raw transform should be
2200 ``raw`` - an optional argument describing if the raw transform should be
2190 applied.
2201 applied.
2191
2202
2192 This method processes the flags in the order (or reverse order if
2203 This method processes the flags in the order (or reverse order if
2193 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2204 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2194 flag processors registered for present flags. The order of flags defined
2205 flag processors registered for present flags. The order of flags defined
2195 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2206 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2196
2207
2197 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2208 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2198 processed text and ``validatehash`` is a bool indicating whether the
2209 processed text and ``validatehash`` is a bool indicating whether the
2199 returned text should be checked for hash integrity.
2210 returned text should be checked for hash integrity.
2200
2211
2201 Note: If the ``raw`` argument is set, it has precedence over the
2212 Note: If the ``raw`` argument is set, it has precedence over the
2202 operation and will only update the value of ``validatehash``.
2213 operation and will only update the value of ``validatehash``.
2203 """
2214 """
2204 # fast path: no flag processors will run
2215 # fast path: no flag processors will run
2205 if flags == 0:
2216 if flags == 0:
2206 return text, True
2217 return text, True
2207 if not operation in ('read', 'write'):
2218 if not operation in ('read', 'write'):
2208 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2219 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2209 # Check all flags are known.
2220 # Check all flags are known.
2210 if flags & ~REVIDX_KNOWN_FLAGS:
2221 if flags & ~REVIDX_KNOWN_FLAGS:
2211 raise RevlogError(_("incompatible revision flag '%#x'") %
2222 raise RevlogError(_("incompatible revision flag '%#x'") %
2212 (flags & ~REVIDX_KNOWN_FLAGS))
2223 (flags & ~REVIDX_KNOWN_FLAGS))
2213 validatehash = True
2224 validatehash = True
2214 # Depending on the operation (read or write), the order might be
2225 # Depending on the operation (read or write), the order might be
2215 # reversed due to non-commutative transforms.
2226 # reversed due to non-commutative transforms.
2216 orderedflags = REVIDX_FLAGS_ORDER
2227 orderedflags = REVIDX_FLAGS_ORDER
2217 if operation == 'write':
2228 if operation == 'write':
2218 orderedflags = reversed(orderedflags)
2229 orderedflags = reversed(orderedflags)
2219
2230
2220 for flag in orderedflags:
2231 for flag in orderedflags:
2221 # If a flagprocessor has been registered for a known flag, apply the
2232 # If a flagprocessor has been registered for a known flag, apply the
2222 # related operation transform and update result tuple.
2233 # related operation transform and update result tuple.
2223 if flag & flags:
2234 if flag & flags:
2224 vhash = True
2235 vhash = True
2225
2236
2226 if flag not in _flagprocessors:
2237 if flag not in _flagprocessors:
2227 message = _("missing processor for flag '%#x'") % (flag)
2238 message = _("missing processor for flag '%#x'") % (flag)
2228 raise RevlogError(message)
2239 raise RevlogError(message)
2229
2240
2230 processor = _flagprocessors[flag]
2241 processor = _flagprocessors[flag]
2231 if processor is not None:
2242 if processor is not None:
2232 readtransform, writetransform, rawtransform = processor
2243 readtransform, writetransform, rawtransform = processor
2233
2244
2234 if raw:
2245 if raw:
2235 vhash = rawtransform(self, text)
2246 vhash = rawtransform(self, text)
2236 elif operation == 'read':
2247 elif operation == 'read':
2237 text, vhash = readtransform(self, text)
2248 text, vhash = readtransform(self, text)
2238 else: # write operation
2249 else: # write operation
2239 text, vhash = writetransform(self, text)
2250 text, vhash = writetransform(self, text)
2240 validatehash = validatehash and vhash
2251 validatehash = validatehash and vhash
2241
2252
2242 return text, validatehash
2253 return text, validatehash
2243
2254
2244 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2255 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2245 """Check node hash integrity.
2256 """Check node hash integrity.
2246
2257
2247 Available as a function so that subclasses can extend hash mismatch
2258 Available as a function so that subclasses can extend hash mismatch
2248 behaviors as needed.
2259 behaviors as needed.
2249 """
2260 """
2250 try:
2261 try:
2251 if p1 is None and p2 is None:
2262 if p1 is None and p2 is None:
2252 p1, p2 = self.parents(node)
2263 p1, p2 = self.parents(node)
2253 if node != self.hash(text, p1, p2):
2264 if node != self.hash(text, p1, p2):
2254 revornode = rev
2265 revornode = rev
2255 if revornode is None:
2266 if revornode is None:
2256 revornode = templatefilters.short(hex(node))
2267 revornode = templatefilters.short(hex(node))
2257 raise RevlogError(_("integrity check failed on %s:%s")
2268 raise RevlogError(_("integrity check failed on %s:%s")
2258 % (self.indexfile, pycompat.bytestr(revornode)))
2269 % (self.indexfile, pycompat.bytestr(revornode)))
2259 except RevlogError:
2270 except RevlogError:
2260 if self._censorable and _censoredtext(text):
2271 if self._censorable and _censoredtext(text):
2261 raise error.CensoredNodeError(self.indexfile, node, text)
2272 raise error.CensoredNodeError(self.indexfile, node, text)
2262 raise
2273 raise
2263
2274
2264 def _enforceinlinesize(self, tr, fp=None):
2275 def _enforceinlinesize(self, tr, fp=None):
2265 """Check if the revlog is too big for inline and convert if so.
2276 """Check if the revlog is too big for inline and convert if so.
2266
2277
2267 This should be called after revisions are added to the revlog. If the
2278 This should be called after revisions are added to the revlog. If the
2268 revlog has grown too large to be an inline revlog, it will convert it
2279 revlog has grown too large to be an inline revlog, it will convert it
2269 to use multiple index and data files.
2280 to use multiple index and data files.
2270 """
2281 """
2271 tiprev = len(self) - 1
2282 tiprev = len(self) - 1
2272 if (not self._inline or
2283 if (not self._inline or
2273 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
2284 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
2274 return
2285 return
2275
2286
2276 trinfo = tr.find(self.indexfile)
2287 trinfo = tr.find(self.indexfile)
2277 if trinfo is None:
2288 if trinfo is None:
2278 raise RevlogError(_("%s not found in the transaction")
2289 raise RevlogError(_("%s not found in the transaction")
2279 % self.indexfile)
2290 % self.indexfile)
2280
2291
2281 trindex = trinfo[2]
2292 trindex = trinfo[2]
2282 if trindex is not None:
2293 if trindex is not None:
2283 dataoff = self.start(trindex)
2294 dataoff = self.start(trindex)
2284 else:
2295 else:
2285 # revlog was stripped at start of transaction, use all leftover data
2296 # revlog was stripped at start of transaction, use all leftover data
2286 trindex = len(self) - 1
2297 trindex = len(self) - 1
2287 dataoff = self.end(tiprev)
2298 dataoff = self.end(tiprev)
2288
2299
2289 tr.add(self.datafile, dataoff)
2300 tr.add(self.datafile, dataoff)
2290
2301
2291 if fp:
2302 if fp:
2292 fp.flush()
2303 fp.flush()
2293 fp.close()
2304 fp.close()
2294
2305
2295 with self._datafp('w') as df:
2306 with self._datafp('w') as df:
2296 for r in self:
2307 for r in self:
2297 df.write(self._getsegmentforrevs(r, r)[1])
2308 df.write(self._getsegmentforrevs(r, r)[1])
2298
2309
2299 with self._indexfp('w') as fp:
2310 with self._indexfp('w') as fp:
2300 self.version &= ~FLAG_INLINE_DATA
2311 self.version &= ~FLAG_INLINE_DATA
2301 self._inline = False
2312 self._inline = False
2302 io = self._io
2313 io = self._io
2303 for i in self:
2314 for i in self:
2304 e = io.packentry(self.index[i], self.node, self.version, i)
2315 e = io.packentry(self.index[i], self.node, self.version, i)
2305 fp.write(e)
2316 fp.write(e)
2306
2317
2307 # the temp file replace the real index when we exit the context
2318 # the temp file replace the real index when we exit the context
2308 # manager
2319 # manager
2309
2320
2310 tr.replace(self.indexfile, trindex * self._io.size)
2321 tr.replace(self.indexfile, trindex * self._io.size)
2311 self._chunkclear()
2322 self._chunkclear()
2312
2323
2313 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2324 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2314 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2325 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2315 """add a revision to the log
2326 """add a revision to the log
2316
2327
2317 text - the revision data to add
2328 text - the revision data to add
2318 transaction - the transaction object used for rollback
2329 transaction - the transaction object used for rollback
2319 link - the linkrev data to add
2330 link - the linkrev data to add
2320 p1, p2 - the parent nodeids of the revision
2331 p1, p2 - the parent nodeids of the revision
2321 cachedelta - an optional precomputed delta
2332 cachedelta - an optional precomputed delta
2322 node - nodeid of revision; typically node is not specified, and it is
2333 node - nodeid of revision; typically node is not specified, and it is
2323 computed by default as hash(text, p1, p2), however subclasses might
2334 computed by default as hash(text, p1, p2), however subclasses might
2324 use different hashing method (and override checkhash() in such case)
2335 use different hashing method (and override checkhash() in such case)
2325 flags - the known flags to set on the revision
2336 flags - the known flags to set on the revision
2326 deltacomputer - an optional _deltacomputer instance shared between
2337 deltacomputer - an optional _deltacomputer instance shared between
2327 multiple calls
2338 multiple calls
2328 """
2339 """
2329 if link == nullrev:
2340 if link == nullrev:
2330 raise RevlogError(_("attempted to add linkrev -1 to %s")
2341 raise RevlogError(_("attempted to add linkrev -1 to %s")
2331 % self.indexfile)
2342 % self.indexfile)
2332
2343
2333 if flags:
2344 if flags:
2334 node = node or self.hash(text, p1, p2)
2345 node = node or self.hash(text, p1, p2)
2335
2346
2336 rawtext, validatehash = self._processflags(text, flags, 'write')
2347 rawtext, validatehash = self._processflags(text, flags, 'write')
2337
2348
2338 # If the flag processor modifies the revision data, ignore any provided
2349 # If the flag processor modifies the revision data, ignore any provided
2339 # cachedelta.
2350 # cachedelta.
2340 if rawtext != text:
2351 if rawtext != text:
2341 cachedelta = None
2352 cachedelta = None
2342
2353
2343 if len(rawtext) > _maxentrysize:
2354 if len(rawtext) > _maxentrysize:
2344 raise RevlogError(
2355 raise RevlogError(
2345 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2356 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2346 % (self.indexfile, len(rawtext)))
2357 % (self.indexfile, len(rawtext)))
2347
2358
2348 node = node or self.hash(rawtext, p1, p2)
2359 node = node or self.hash(rawtext, p1, p2)
2349 if node in self.nodemap:
2360 if node in self.nodemap:
2350 return node
2361 return node
2351
2362
2352 if validatehash:
2363 if validatehash:
2353 self.checkhash(rawtext, node, p1=p1, p2=p2)
2364 self.checkhash(rawtext, node, p1=p1, p2=p2)
2354
2365
2355 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2366 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2356 flags, cachedelta=cachedelta,
2367 flags, cachedelta=cachedelta,
2357 deltacomputer=deltacomputer)
2368 deltacomputer=deltacomputer)
2358
2369
2359 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2370 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2360 cachedelta=None, deltacomputer=None):
2371 cachedelta=None, deltacomputer=None):
2361 """add a raw revision with known flags, node and parents
2372 """add a raw revision with known flags, node and parents
2362 useful when reusing a revision not stored in this revlog (ex: received
2373 useful when reusing a revision not stored in this revlog (ex: received
2363 over wire, or read from an external bundle).
2374 over wire, or read from an external bundle).
2364 """
2375 """
2365 dfh = None
2376 dfh = None
2366 if not self._inline:
2377 if not self._inline:
2367 dfh = self._datafp("a+")
2378 dfh = self._datafp("a+")
2368 ifh = self._indexfp("a+")
2379 ifh = self._indexfp("a+")
2369 try:
2380 try:
2370 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2381 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2371 flags, cachedelta, ifh, dfh,
2382 flags, cachedelta, ifh, dfh,
2372 deltacomputer=deltacomputer)
2383 deltacomputer=deltacomputer)
2373 finally:
2384 finally:
2374 if dfh:
2385 if dfh:
2375 dfh.close()
2386 dfh.close()
2376 ifh.close()
2387 ifh.close()
2377
2388
2378 def compress(self, data):
2389 def compress(self, data):
2379 """Generate a possibly-compressed representation of data."""
2390 """Generate a possibly-compressed representation of data."""
2380 if not data:
2391 if not data:
2381 return '', data
2392 return '', data
2382
2393
2383 compressed = self._compressor.compress(data)
2394 compressed = self._compressor.compress(data)
2384
2395
2385 if compressed:
2396 if compressed:
2386 # The revlog compressor added the header in the returned data.
2397 # The revlog compressor added the header in the returned data.
2387 return '', compressed
2398 return '', compressed
2388
2399
2389 if data[0:1] == '\0':
2400 if data[0:1] == '\0':
2390 return '', data
2401 return '', data
2391 return 'u', data
2402 return 'u', data
2392
2403
2393 def decompress(self, data):
2404 def decompress(self, data):
2394 """Decompress a revlog chunk.
2405 """Decompress a revlog chunk.
2395
2406
2396 The chunk is expected to begin with a header identifying the
2407 The chunk is expected to begin with a header identifying the
2397 format type so it can be routed to an appropriate decompressor.
2408 format type so it can be routed to an appropriate decompressor.
2398 """
2409 """
2399 if not data:
2410 if not data:
2400 return data
2411 return data
2401
2412
2402 # Revlogs are read much more frequently than they are written and many
2413 # Revlogs are read much more frequently than they are written and many
2403 # chunks only take microseconds to decompress, so performance is
2414 # chunks only take microseconds to decompress, so performance is
2404 # important here.
2415 # important here.
2405 #
2416 #
2406 # We can make a few assumptions about revlogs:
2417 # We can make a few assumptions about revlogs:
2407 #
2418 #
2408 # 1) the majority of chunks will be compressed (as opposed to inline
2419 # 1) the majority of chunks will be compressed (as opposed to inline
2409 # raw data).
2420 # raw data).
2410 # 2) decompressing *any* data will likely by at least 10x slower than
2421 # 2) decompressing *any* data will likely by at least 10x slower than
2411 # returning raw inline data.
2422 # returning raw inline data.
2412 # 3) we want to prioritize common and officially supported compression
2423 # 3) we want to prioritize common and officially supported compression
2413 # engines
2424 # engines
2414 #
2425 #
2415 # It follows that we want to optimize for "decompress compressed data
2426 # It follows that we want to optimize for "decompress compressed data
2416 # when encoded with common and officially supported compression engines"
2427 # when encoded with common and officially supported compression engines"
2417 # case over "raw data" and "data encoded by less common or non-official
2428 # case over "raw data" and "data encoded by less common or non-official
2418 # compression engines." That is why we have the inline lookup first
2429 # compression engines." That is why we have the inline lookup first
2419 # followed by the compengines lookup.
2430 # followed by the compengines lookup.
2420 #
2431 #
2421 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2432 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2422 # compressed chunks. And this matters for changelog and manifest reads.
2433 # compressed chunks. And this matters for changelog and manifest reads.
2423 t = data[0:1]
2434 t = data[0:1]
2424
2435
2425 if t == 'x':
2436 if t == 'x':
2426 try:
2437 try:
2427 return _zlibdecompress(data)
2438 return _zlibdecompress(data)
2428 except zlib.error as e:
2439 except zlib.error as e:
2429 raise RevlogError(_('revlog decompress error: %s') %
2440 raise RevlogError(_('revlog decompress error: %s') %
2430 stringutil.forcebytestr(e))
2441 stringutil.forcebytestr(e))
2431 # '\0' is more common than 'u' so it goes first.
2442 # '\0' is more common than 'u' so it goes first.
2432 elif t == '\0':
2443 elif t == '\0':
2433 return data
2444 return data
2434 elif t == 'u':
2445 elif t == 'u':
2435 return util.buffer(data, 1)
2446 return util.buffer(data, 1)
2436
2447
2437 try:
2448 try:
2438 compressor = self._decompressors[t]
2449 compressor = self._decompressors[t]
2439 except KeyError:
2450 except KeyError:
2440 try:
2451 try:
2441 engine = util.compengines.forrevlogheader(t)
2452 engine = util.compengines.forrevlogheader(t)
2442 compressor = engine.revlogcompressor()
2453 compressor = engine.revlogcompressor()
2443 self._decompressors[t] = compressor
2454 self._decompressors[t] = compressor
2444 except KeyError:
2455 except KeyError:
2445 raise RevlogError(_('unknown compression type %r') % t)
2456 raise RevlogError(_('unknown compression type %r') % t)
2446
2457
2447 return compressor.decompress(data)
2458 return compressor.decompress(data)
2448
2459
2449 def _isgooddeltainfo(self, deltainfo, revinfo):
2460 def _isgooddeltainfo(self, deltainfo, revinfo):
2450 """Returns True if the given delta is good. Good means that it is within
2461 """Returns True if the given delta is good. Good means that it is within
2451 the disk span, disk size, and chain length bounds that we know to be
2462 the disk span, disk size, and chain length bounds that we know to be
2452 performant."""
2463 performant."""
2453 if deltainfo is None:
2464 if deltainfo is None:
2454 return False
2465 return False
2455
2466
2456 # - 'deltainfo.distance' is the distance from the base revision --
2467 # - 'deltainfo.distance' is the distance from the base revision --
2457 # bounding it limits the amount of I/O we need to do.
2468 # bounding it limits the amount of I/O we need to do.
2458 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2469 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2459 # deltas we need to apply -- bounding it limits the amount of CPU
2470 # deltas we need to apply -- bounding it limits the amount of CPU
2460 # we consume.
2471 # we consume.
2461
2472
2462 if self._sparserevlog:
2473 if self._sparserevlog:
2463 # As sparse-read will be used, we can consider that the distance,
2474 # As sparse-read will be used, we can consider that the distance,
2464 # instead of being the span of the whole chunk,
2475 # instead of being the span of the whole chunk,
2465 # is the span of the largest read chunk
2476 # is the span of the largest read chunk
2466 base = deltainfo.base
2477 base = deltainfo.base
2467
2478
2468 if base != nullrev:
2479 if base != nullrev:
2469 deltachain = self._deltachain(base)[0]
2480 deltachain = self._deltachain(base)[0]
2470 else:
2481 else:
2471 deltachain = []
2482 deltachain = []
2472
2483
2473 chunks = _slicechunk(self, deltachain, deltainfo)
2484 chunks = _slicechunk(self, deltachain, deltainfo)
2474 distance = max(map(lambda revs:_segmentspan(self, revs), chunks))
2485 all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks]
2486 distance = max(all_span)
2475 else:
2487 else:
2476 distance = deltainfo.distance
2488 distance = deltainfo.distance
2477
2489
2478 textlen = revinfo.textlen
2490 textlen = revinfo.textlen
2479 defaultmax = textlen * 4
2491 defaultmax = textlen * 4
2480 maxdist = self._maxdeltachainspan
2492 maxdist = self._maxdeltachainspan
2481 if not maxdist:
2493 if not maxdist:
2482 maxdist = distance # ensure the conditional pass
2494 maxdist = distance # ensure the conditional pass
2483 maxdist = max(maxdist, defaultmax)
2495 maxdist = max(maxdist, defaultmax)
2484 if self._sparserevlog and maxdist < self._srmingapsize:
2496 if self._sparserevlog and maxdist < self._srmingapsize:
2485 # In multiple place, we are ignoring irrelevant data range below a
2497 # In multiple place, we are ignoring irrelevant data range below a
2486 # certain size. Be also apply this tradeoff here and relax span
2498 # certain size. Be also apply this tradeoff here and relax span
2487 # constraint for small enought content.
2499 # constraint for small enought content.
2488 maxdist = self._srmingapsize
2500 maxdist = self._srmingapsize
2489
2501
2490 # Bad delta from read span:
2502 # Bad delta from read span:
2491 #
2503 #
2492 # If the span of data read is larger than the maximum allowed.
2504 # If the span of data read is larger than the maximum allowed.
2493 if maxdist < distance:
2505 if maxdist < distance:
2494 return False
2506 return False
2495
2507
2496 # Bad delta from new delta size:
2508 # Bad delta from new delta size:
2497 #
2509 #
2498 # If the delta size is larger than the target text, storing the
2510 # If the delta size is larger than the target text, storing the
2499 # delta will be inefficient.
2511 # delta will be inefficient.
2500 if textlen < deltainfo.deltalen:
2512 if textlen < deltainfo.deltalen:
2501 return False
2513 return False
2502
2514
2503 # Bad delta from cumulated payload size:
2515 # Bad delta from cumulated payload size:
2504 #
2516 #
2505 # If the sum of delta get larger than K * target text length.
2517 # If the sum of delta get larger than K * target text length.
2506 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
2518 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
2507 return False
2519 return False
2508
2520
2509 # Bad delta from chain length:
2521 # Bad delta from chain length:
2510 #
2522 #
2511 # If the number of delta in the chain gets too high.
2523 # If the number of delta in the chain gets too high.
2512 if self._maxchainlen and self._maxchainlen < deltainfo.chainlen:
2524 if self._maxchainlen and self._maxchainlen < deltainfo.chainlen:
2513 return False
2525 return False
2514
2526
2515 return True
2527 return True
2516
2528
2517 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2529 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2518 cachedelta, ifh, dfh, alwayscache=False,
2530 cachedelta, ifh, dfh, alwayscache=False,
2519 deltacomputer=None):
2531 deltacomputer=None):
2520 """internal function to add revisions to the log
2532 """internal function to add revisions to the log
2521
2533
2522 see addrevision for argument descriptions.
2534 see addrevision for argument descriptions.
2523
2535
2524 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2536 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2525
2537
2526 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2538 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2527 be used.
2539 be used.
2528
2540
2529 invariants:
2541 invariants:
2530 - rawtext is optional (can be None); if not set, cachedelta must be set.
2542 - rawtext is optional (can be None); if not set, cachedelta must be set.
2531 if both are set, they must correspond to each other.
2543 if both are set, they must correspond to each other.
2532 """
2544 """
2533 if node == nullid:
2545 if node == nullid:
2534 raise RevlogError(_("%s: attempt to add null revision") %
2546 raise RevlogError(_("%s: attempt to add null revision") %
2535 (self.indexfile))
2547 (self.indexfile))
2536 if node == wdirid or node in wdirfilenodeids:
2548 if node == wdirid or node in wdirfilenodeids:
2537 raise RevlogError(_("%s: attempt to add wdir revision") %
2549 raise RevlogError(_("%s: attempt to add wdir revision") %
2538 (self.indexfile))
2550 (self.indexfile))
2539
2551
2540 if self._inline:
2552 if self._inline:
2541 fh = ifh
2553 fh = ifh
2542 else:
2554 else:
2543 fh = dfh
2555 fh = dfh
2544
2556
2545 btext = [rawtext]
2557 btext = [rawtext]
2546
2558
2547 curr = len(self)
2559 curr = len(self)
2548 prev = curr - 1
2560 prev = curr - 1
2549 offset = self.end(prev)
2561 offset = self.end(prev)
2550 p1r, p2r = self.rev(p1), self.rev(p2)
2562 p1r, p2r = self.rev(p1), self.rev(p2)
2551
2563
2552 # full versions are inserted when the needed deltas
2564 # full versions are inserted when the needed deltas
2553 # become comparable to the uncompressed text
2565 # become comparable to the uncompressed text
2554 if rawtext is None:
2566 if rawtext is None:
2555 # need rawtext size, before changed by flag processors, which is
2567 # need rawtext size, before changed by flag processors, which is
2556 # the non-raw size. use revlog explicitly to avoid filelog's extra
2568 # the non-raw size. use revlog explicitly to avoid filelog's extra
2557 # logic that might remove metadata size.
2569 # logic that might remove metadata size.
2558 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2570 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2559 cachedelta[1])
2571 cachedelta[1])
2560 else:
2572 else:
2561 textlen = len(rawtext)
2573 textlen = len(rawtext)
2562
2574
2563 if deltacomputer is None:
2575 if deltacomputer is None:
2564 deltacomputer = _deltacomputer(self)
2576 deltacomputer = _deltacomputer(self)
2565
2577
2566 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2578 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2567
2579
2568 # no delta for flag processor revision (see "candelta" for why)
2580 # no delta for flag processor revision (see "candelta" for why)
2569 # not calling candelta since only one revision needs test, also to
2581 # not calling candelta since only one revision needs test, also to
2570 # avoid overhead fetching flags again.
2582 # avoid overhead fetching flags again.
2571 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2583 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2572 deltainfo = None
2584 deltainfo = None
2573 else:
2585 else:
2574 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2586 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2575
2587
2576 if deltainfo is not None:
2588 if deltainfo is not None:
2577 base = deltainfo.base
2589 base = deltainfo.base
2578 chainbase = deltainfo.chainbase
2590 chainbase = deltainfo.chainbase
2579 data = deltainfo.data
2591 data = deltainfo.data
2580 l = deltainfo.deltalen
2592 l = deltainfo.deltalen
2581 else:
2593 else:
2582 rawtext = deltacomputer.buildtext(revinfo, fh)
2594 rawtext = deltacomputer.buildtext(revinfo, fh)
2583 data = self.compress(rawtext)
2595 data = self.compress(rawtext)
2584 l = len(data[1]) + len(data[0])
2596 l = len(data[1]) + len(data[0])
2585 base = chainbase = curr
2597 base = chainbase = curr
2586
2598
2587 e = (offset_type(offset, flags), l, textlen,
2599 e = (offset_type(offset, flags), l, textlen,
2588 base, link, p1r, p2r, node)
2600 base, link, p1r, p2r, node)
2589 self.index.append(e)
2601 self.index.append(e)
2590 self.nodemap[node] = curr
2602 self.nodemap[node] = curr
2591
2603
2592 entry = self._io.packentry(e, self.node, self.version, curr)
2604 entry = self._io.packentry(e, self.node, self.version, curr)
2593 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2605 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2594
2606
2595 if alwayscache and rawtext is None:
2607 if alwayscache and rawtext is None:
2596 rawtext = deltacomputer._buildtext(revinfo, fh)
2608 rawtext = deltacomputer._buildtext(revinfo, fh)
2597
2609
2598 if type(rawtext) == bytes: # only accept immutable objects
2610 if type(rawtext) == bytes: # only accept immutable objects
2599 self._cache = (node, curr, rawtext)
2611 self._cache = (node, curr, rawtext)
2600 self._chainbasecache[curr] = chainbase
2612 self._chainbasecache[curr] = chainbase
2601 return node
2613 return node
2602
2614
2603 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2615 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2604 # Files opened in a+ mode have inconsistent behavior on various
2616 # Files opened in a+ mode have inconsistent behavior on various
2605 # platforms. Windows requires that a file positioning call be made
2617 # platforms. Windows requires that a file positioning call be made
2606 # when the file handle transitions between reads and writes. See
2618 # when the file handle transitions between reads and writes. See
2607 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2619 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2608 # platforms, Python or the platform itself can be buggy. Some versions
2620 # platforms, Python or the platform itself can be buggy. Some versions
2609 # of Solaris have been observed to not append at the end of the file
2621 # of Solaris have been observed to not append at the end of the file
2610 # if the file was seeked to before the end. See issue4943 for more.
2622 # if the file was seeked to before the end. See issue4943 for more.
2611 #
2623 #
2612 # We work around this issue by inserting a seek() before writing.
2624 # We work around this issue by inserting a seek() before writing.
2613 # Note: This is likely not necessary on Python 3.
2625 # Note: This is likely not necessary on Python 3.
2614 ifh.seek(0, os.SEEK_END)
2626 ifh.seek(0, os.SEEK_END)
2615 if dfh:
2627 if dfh:
2616 dfh.seek(0, os.SEEK_END)
2628 dfh.seek(0, os.SEEK_END)
2617
2629
2618 curr = len(self) - 1
2630 curr = len(self) - 1
2619 if not self._inline:
2631 if not self._inline:
2620 transaction.add(self.datafile, offset)
2632 transaction.add(self.datafile, offset)
2621 transaction.add(self.indexfile, curr * len(entry))
2633 transaction.add(self.indexfile, curr * len(entry))
2622 if data[0]:
2634 if data[0]:
2623 dfh.write(data[0])
2635 dfh.write(data[0])
2624 dfh.write(data[1])
2636 dfh.write(data[1])
2625 ifh.write(entry)
2637 ifh.write(entry)
2626 else:
2638 else:
2627 offset += curr * self._io.size
2639 offset += curr * self._io.size
2628 transaction.add(self.indexfile, offset, curr)
2640 transaction.add(self.indexfile, offset, curr)
2629 ifh.write(entry)
2641 ifh.write(entry)
2630 ifh.write(data[0])
2642 ifh.write(data[0])
2631 ifh.write(data[1])
2643 ifh.write(data[1])
2632 self._enforceinlinesize(transaction, ifh)
2644 self._enforceinlinesize(transaction, ifh)
2633
2645
2634 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2646 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2635 """
2647 """
2636 add a delta group
2648 add a delta group
2637
2649
2638 given a set of deltas, add them to the revision log. the
2650 given a set of deltas, add them to the revision log. the
2639 first delta is against its parent, which should be in our
2651 first delta is against its parent, which should be in our
2640 log, the rest are against the previous delta.
2652 log, the rest are against the previous delta.
2641
2653
2642 If ``addrevisioncb`` is defined, it will be called with arguments of
2654 If ``addrevisioncb`` is defined, it will be called with arguments of
2643 this revlog and the node that was added.
2655 this revlog and the node that was added.
2644 """
2656 """
2645
2657
2646 nodes = []
2658 nodes = []
2647
2659
2648 r = len(self)
2660 r = len(self)
2649 end = 0
2661 end = 0
2650 if r:
2662 if r:
2651 end = self.end(r - 1)
2663 end = self.end(r - 1)
2652 ifh = self._indexfp("a+")
2664 ifh = self._indexfp("a+")
2653 isize = r * self._io.size
2665 isize = r * self._io.size
2654 if self._inline:
2666 if self._inline:
2655 transaction.add(self.indexfile, end + isize, r)
2667 transaction.add(self.indexfile, end + isize, r)
2656 dfh = None
2668 dfh = None
2657 else:
2669 else:
2658 transaction.add(self.indexfile, isize, r)
2670 transaction.add(self.indexfile, isize, r)
2659 transaction.add(self.datafile, end)
2671 transaction.add(self.datafile, end)
2660 dfh = self._datafp("a+")
2672 dfh = self._datafp("a+")
2661 def flush():
2673 def flush():
2662 if dfh:
2674 if dfh:
2663 dfh.flush()
2675 dfh.flush()
2664 ifh.flush()
2676 ifh.flush()
2665 try:
2677 try:
2666 deltacomputer = _deltacomputer(self)
2678 deltacomputer = _deltacomputer(self)
2667 # loop through our set of deltas
2679 # loop through our set of deltas
2668 for data in deltas:
2680 for data in deltas:
2669 node, p1, p2, linknode, deltabase, delta, flags = data
2681 node, p1, p2, linknode, deltabase, delta, flags = data
2670 link = linkmapper(linknode)
2682 link = linkmapper(linknode)
2671 flags = flags or REVIDX_DEFAULT_FLAGS
2683 flags = flags or REVIDX_DEFAULT_FLAGS
2672
2684
2673 nodes.append(node)
2685 nodes.append(node)
2674
2686
2675 if node in self.nodemap:
2687 if node in self.nodemap:
2676 # this can happen if two branches make the same change
2688 # this can happen if two branches make the same change
2677 continue
2689 continue
2678
2690
2679 for p in (p1, p2):
2691 for p in (p1, p2):
2680 if p not in self.nodemap:
2692 if p not in self.nodemap:
2681 raise LookupError(p, self.indexfile,
2693 raise LookupError(p, self.indexfile,
2682 _('unknown parent'))
2694 _('unknown parent'))
2683
2695
2684 if deltabase not in self.nodemap:
2696 if deltabase not in self.nodemap:
2685 raise LookupError(deltabase, self.indexfile,
2697 raise LookupError(deltabase, self.indexfile,
2686 _('unknown delta base'))
2698 _('unknown delta base'))
2687
2699
2688 baserev = self.rev(deltabase)
2700 baserev = self.rev(deltabase)
2689
2701
2690 if baserev != nullrev and self.iscensored(baserev):
2702 if baserev != nullrev and self.iscensored(baserev):
2691 # if base is censored, delta must be full replacement in a
2703 # if base is censored, delta must be full replacement in a
2692 # single patch operation
2704 # single patch operation
2693 hlen = struct.calcsize(">lll")
2705 hlen = struct.calcsize(">lll")
2694 oldlen = self.rawsize(baserev)
2706 oldlen = self.rawsize(baserev)
2695 newlen = len(delta) - hlen
2707 newlen = len(delta) - hlen
2696 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2708 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2697 raise error.CensoredBaseError(self.indexfile,
2709 raise error.CensoredBaseError(self.indexfile,
2698 self.node(baserev))
2710 self.node(baserev))
2699
2711
2700 if not flags and self._peek_iscensored(baserev, delta, flush):
2712 if not flags and self._peek_iscensored(baserev, delta, flush):
2701 flags |= REVIDX_ISCENSORED
2713 flags |= REVIDX_ISCENSORED
2702
2714
2703 # We assume consumers of addrevisioncb will want to retrieve
2715 # We assume consumers of addrevisioncb will want to retrieve
2704 # the added revision, which will require a call to
2716 # the added revision, which will require a call to
2705 # revision(). revision() will fast path if there is a cache
2717 # revision(). revision() will fast path if there is a cache
2706 # hit. So, we tell _addrevision() to always cache in this case.
2718 # hit. So, we tell _addrevision() to always cache in this case.
2707 # We're only using addgroup() in the context of changegroup
2719 # We're only using addgroup() in the context of changegroup
2708 # generation so the revision data can always be handled as raw
2720 # generation so the revision data can always be handled as raw
2709 # by the flagprocessor.
2721 # by the flagprocessor.
2710 self._addrevision(node, None, transaction, link,
2722 self._addrevision(node, None, transaction, link,
2711 p1, p2, flags, (baserev, delta),
2723 p1, p2, flags, (baserev, delta),
2712 ifh, dfh,
2724 ifh, dfh,
2713 alwayscache=bool(addrevisioncb),
2725 alwayscache=bool(addrevisioncb),
2714 deltacomputer=deltacomputer)
2726 deltacomputer=deltacomputer)
2715
2727
2716 if addrevisioncb:
2728 if addrevisioncb:
2717 addrevisioncb(self, node)
2729 addrevisioncb(self, node)
2718
2730
2719 if not dfh and not self._inline:
2731 if not dfh and not self._inline:
2720 # addrevision switched from inline to conventional
2732 # addrevision switched from inline to conventional
2721 # reopen the index
2733 # reopen the index
2722 ifh.close()
2734 ifh.close()
2723 dfh = self._datafp("a+")
2735 dfh = self._datafp("a+")
2724 ifh = self._indexfp("a+")
2736 ifh = self._indexfp("a+")
2725 finally:
2737 finally:
2726 if dfh:
2738 if dfh:
2727 dfh.close()
2739 dfh.close()
2728 ifh.close()
2740 ifh.close()
2729
2741
2730 return nodes
2742 return nodes
2731
2743
2732 def iscensored(self, rev):
2744 def iscensored(self, rev):
2733 """Check if a file revision is censored."""
2745 """Check if a file revision is censored."""
2734 if not self._censorable:
2746 if not self._censorable:
2735 return False
2747 return False
2736
2748
2737 return self.flags(rev) & REVIDX_ISCENSORED
2749 return self.flags(rev) & REVIDX_ISCENSORED
2738
2750
2739 def _peek_iscensored(self, baserev, delta, flush):
2751 def _peek_iscensored(self, baserev, delta, flush):
2740 """Quickly check if a delta produces a censored revision."""
2752 """Quickly check if a delta produces a censored revision."""
2741 if not self._censorable:
2753 if not self._censorable:
2742 return False
2754 return False
2743
2755
2744 # Fragile heuristic: unless new file meta keys are added alphabetically
2756 # Fragile heuristic: unless new file meta keys are added alphabetically
2745 # preceding "censored", all censored revisions are prefixed by
2757 # preceding "censored", all censored revisions are prefixed by
2746 # "\1\ncensored:". A delta producing such a censored revision must be a
2758 # "\1\ncensored:". A delta producing such a censored revision must be a
2747 # full-replacement delta, so we inspect the first and only patch in the
2759 # full-replacement delta, so we inspect the first and only patch in the
2748 # delta for this prefix.
2760 # delta for this prefix.
2749 hlen = struct.calcsize(">lll")
2761 hlen = struct.calcsize(">lll")
2750 if len(delta) <= hlen:
2762 if len(delta) <= hlen:
2751 return False
2763 return False
2752
2764
2753 oldlen = self.rawsize(baserev)
2765 oldlen = self.rawsize(baserev)
2754 newlen = len(delta) - hlen
2766 newlen = len(delta) - hlen
2755 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2767 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2756 return False
2768 return False
2757
2769
2758 add = "\1\ncensored:"
2770 add = "\1\ncensored:"
2759 addlen = len(add)
2771 addlen = len(add)
2760 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2772 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2761
2773
2762 def getstrippoint(self, minlink):
2774 def getstrippoint(self, minlink):
2763 """find the minimum rev that must be stripped to strip the linkrev
2775 """find the minimum rev that must be stripped to strip the linkrev
2764
2776
2765 Returns a tuple containing the minimum rev and a set of all revs that
2777 Returns a tuple containing the minimum rev and a set of all revs that
2766 have linkrevs that will be broken by this strip.
2778 have linkrevs that will be broken by this strip.
2767 """
2779 """
2768 brokenrevs = set()
2780 brokenrevs = set()
2769 strippoint = len(self)
2781 strippoint = len(self)
2770
2782
2771 heads = {}
2783 heads = {}
2772 futurelargelinkrevs = set()
2784 futurelargelinkrevs = set()
2773 for head in self.headrevs():
2785 for head in self.headrevs():
2774 headlinkrev = self.linkrev(head)
2786 headlinkrev = self.linkrev(head)
2775 heads[head] = headlinkrev
2787 heads[head] = headlinkrev
2776 if headlinkrev >= minlink:
2788 if headlinkrev >= minlink:
2777 futurelargelinkrevs.add(headlinkrev)
2789 futurelargelinkrevs.add(headlinkrev)
2778
2790
2779 # This algorithm involves walking down the rev graph, starting at the
2791 # This algorithm involves walking down the rev graph, starting at the
2780 # heads. Since the revs are topologically sorted according to linkrev,
2792 # heads. Since the revs are topologically sorted according to linkrev,
2781 # once all head linkrevs are below the minlink, we know there are
2793 # once all head linkrevs are below the minlink, we know there are
2782 # no more revs that could have a linkrev greater than minlink.
2794 # no more revs that could have a linkrev greater than minlink.
2783 # So we can stop walking.
2795 # So we can stop walking.
2784 while futurelargelinkrevs:
2796 while futurelargelinkrevs:
2785 strippoint -= 1
2797 strippoint -= 1
2786 linkrev = heads.pop(strippoint)
2798 linkrev = heads.pop(strippoint)
2787
2799
2788 if linkrev < minlink:
2800 if linkrev < minlink:
2789 brokenrevs.add(strippoint)
2801 brokenrevs.add(strippoint)
2790 else:
2802 else:
2791 futurelargelinkrevs.remove(linkrev)
2803 futurelargelinkrevs.remove(linkrev)
2792
2804
2793 for p in self.parentrevs(strippoint):
2805 for p in self.parentrevs(strippoint):
2794 if p != nullrev:
2806 if p != nullrev:
2795 plinkrev = self.linkrev(p)
2807 plinkrev = self.linkrev(p)
2796 heads[p] = plinkrev
2808 heads[p] = plinkrev
2797 if plinkrev >= minlink:
2809 if plinkrev >= minlink:
2798 futurelargelinkrevs.add(plinkrev)
2810 futurelargelinkrevs.add(plinkrev)
2799
2811
2800 return strippoint, brokenrevs
2812 return strippoint, brokenrevs
2801
2813
2802 def strip(self, minlink, transaction):
2814 def strip(self, minlink, transaction):
2803 """truncate the revlog on the first revision with a linkrev >= minlink
2815 """truncate the revlog on the first revision with a linkrev >= minlink
2804
2816
2805 This function is called when we're stripping revision minlink and
2817 This function is called when we're stripping revision minlink and
2806 its descendants from the repository.
2818 its descendants from the repository.
2807
2819
2808 We have to remove all revisions with linkrev >= minlink, because
2820 We have to remove all revisions with linkrev >= minlink, because
2809 the equivalent changelog revisions will be renumbered after the
2821 the equivalent changelog revisions will be renumbered after the
2810 strip.
2822 strip.
2811
2823
2812 So we truncate the revlog on the first of these revisions, and
2824 So we truncate the revlog on the first of these revisions, and
2813 trust that the caller has saved the revisions that shouldn't be
2825 trust that the caller has saved the revisions that shouldn't be
2814 removed and that it'll re-add them after this truncation.
2826 removed and that it'll re-add them after this truncation.
2815 """
2827 """
2816 if len(self) == 0:
2828 if len(self) == 0:
2817 return
2829 return
2818
2830
2819 rev, _ = self.getstrippoint(minlink)
2831 rev, _ = self.getstrippoint(minlink)
2820 if rev == len(self):
2832 if rev == len(self):
2821 return
2833 return
2822
2834
2823 # first truncate the files on disk
2835 # first truncate the files on disk
2824 end = self.start(rev)
2836 end = self.start(rev)
2825 if not self._inline:
2837 if not self._inline:
2826 transaction.add(self.datafile, end)
2838 transaction.add(self.datafile, end)
2827 end = rev * self._io.size
2839 end = rev * self._io.size
2828 else:
2840 else:
2829 end += rev * self._io.size
2841 end += rev * self._io.size
2830
2842
2831 transaction.add(self.indexfile, end)
2843 transaction.add(self.indexfile, end)
2832
2844
2833 # then reset internal state in memory to forget those revisions
2845 # then reset internal state in memory to forget those revisions
2834 self._cache = None
2846 self._cache = None
2835 self._chaininfocache = {}
2847 self._chaininfocache = {}
2836 self._chunkclear()
2848 self._chunkclear()
2837 for x in pycompat.xrange(rev, len(self)):
2849 for x in pycompat.xrange(rev, len(self)):
2838 del self.nodemap[self.node(x)]
2850 del self.nodemap[self.node(x)]
2839
2851
2840 del self.index[rev:-1]
2852 del self.index[rev:-1]
2841 self._nodepos = None
2853 self._nodepos = None
2842
2854
2843 def checksize(self):
2855 def checksize(self):
2844 expected = 0
2856 expected = 0
2845 if len(self):
2857 if len(self):
2846 expected = max(0, self.end(len(self) - 1))
2858 expected = max(0, self.end(len(self) - 1))
2847
2859
2848 try:
2860 try:
2849 with self._datafp() as f:
2861 with self._datafp() as f:
2850 f.seek(0, 2)
2862 f.seek(0, 2)
2851 actual = f.tell()
2863 actual = f.tell()
2852 dd = actual - expected
2864 dd = actual - expected
2853 except IOError as inst:
2865 except IOError as inst:
2854 if inst.errno != errno.ENOENT:
2866 if inst.errno != errno.ENOENT:
2855 raise
2867 raise
2856 dd = 0
2868 dd = 0
2857
2869
2858 try:
2870 try:
2859 f = self.opener(self.indexfile)
2871 f = self.opener(self.indexfile)
2860 f.seek(0, 2)
2872 f.seek(0, 2)
2861 actual = f.tell()
2873 actual = f.tell()
2862 f.close()
2874 f.close()
2863 s = self._io.size
2875 s = self._io.size
2864 i = max(0, actual // s)
2876 i = max(0, actual // s)
2865 di = actual - (i * s)
2877 di = actual - (i * s)
2866 if self._inline:
2878 if self._inline:
2867 databytes = 0
2879 databytes = 0
2868 for r in self:
2880 for r in self:
2869 databytes += max(0, self.length(r))
2881 databytes += max(0, self.length(r))
2870 dd = 0
2882 dd = 0
2871 di = actual - len(self) * s - databytes
2883 di = actual - len(self) * s - databytes
2872 except IOError as inst:
2884 except IOError as inst:
2873 if inst.errno != errno.ENOENT:
2885 if inst.errno != errno.ENOENT:
2874 raise
2886 raise
2875 di = 0
2887 di = 0
2876
2888
2877 return (dd, di)
2889 return (dd, di)
2878
2890
2879 def files(self):
2891 def files(self):
2880 res = [self.indexfile]
2892 res = [self.indexfile]
2881 if not self._inline:
2893 if not self._inline:
2882 res.append(self.datafile)
2894 res.append(self.datafile)
2883 return res
2895 return res
2884
2896
2885 DELTAREUSEALWAYS = 'always'
2897 DELTAREUSEALWAYS = 'always'
2886 DELTAREUSESAMEREVS = 'samerevs'
2898 DELTAREUSESAMEREVS = 'samerevs'
2887 DELTAREUSENEVER = 'never'
2899 DELTAREUSENEVER = 'never'
2888
2900
2889 DELTAREUSEFULLADD = 'fulladd'
2901 DELTAREUSEFULLADD = 'fulladd'
2890
2902
2891 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2903 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2892
2904
2893 def clone(self, tr, destrevlog, addrevisioncb=None,
2905 def clone(self, tr, destrevlog, addrevisioncb=None,
2894 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2906 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2895 """Copy this revlog to another, possibly with format changes.
2907 """Copy this revlog to another, possibly with format changes.
2896
2908
2897 The destination revlog will contain the same revisions and nodes.
2909 The destination revlog will contain the same revisions and nodes.
2898 However, it may not be bit-for-bit identical due to e.g. delta encoding
2910 However, it may not be bit-for-bit identical due to e.g. delta encoding
2899 differences.
2911 differences.
2900
2912
2901 The ``deltareuse`` argument control how deltas from the existing revlog
2913 The ``deltareuse`` argument control how deltas from the existing revlog
2902 are preserved in the destination revlog. The argument can have the
2914 are preserved in the destination revlog. The argument can have the
2903 following values:
2915 following values:
2904
2916
2905 DELTAREUSEALWAYS
2917 DELTAREUSEALWAYS
2906 Deltas will always be reused (if possible), even if the destination
2918 Deltas will always be reused (if possible), even if the destination
2907 revlog would not select the same revisions for the delta. This is the
2919 revlog would not select the same revisions for the delta. This is the
2908 fastest mode of operation.
2920 fastest mode of operation.
2909 DELTAREUSESAMEREVS
2921 DELTAREUSESAMEREVS
2910 Deltas will be reused if the destination revlog would pick the same
2922 Deltas will be reused if the destination revlog would pick the same
2911 revisions for the delta. This mode strikes a balance between speed
2923 revisions for the delta. This mode strikes a balance between speed
2912 and optimization.
2924 and optimization.
2913 DELTAREUSENEVER
2925 DELTAREUSENEVER
2914 Deltas will never be reused. This is the slowest mode of execution.
2926 Deltas will never be reused. This is the slowest mode of execution.
2915 This mode can be used to recompute deltas (e.g. if the diff/delta
2927 This mode can be used to recompute deltas (e.g. if the diff/delta
2916 algorithm changes).
2928 algorithm changes).
2917
2929
2918 Delta computation can be slow, so the choice of delta reuse policy can
2930 Delta computation can be slow, so the choice of delta reuse policy can
2919 significantly affect run time.
2931 significantly affect run time.
2920
2932
2921 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2933 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2922 two extremes. Deltas will be reused if they are appropriate. But if the
2934 two extremes. Deltas will be reused if they are appropriate. But if the
2923 delta could choose a better revision, it will do so. This means if you
2935 delta could choose a better revision, it will do so. This means if you
2924 are converting a non-generaldelta revlog to a generaldelta revlog,
2936 are converting a non-generaldelta revlog to a generaldelta revlog,
2925 deltas will be recomputed if the delta's parent isn't a parent of the
2937 deltas will be recomputed if the delta's parent isn't a parent of the
2926 revision.
2938 revision.
2927
2939
2928 In addition to the delta policy, the ``deltabothparents`` argument
2940 In addition to the delta policy, the ``deltabothparents`` argument
2929 controls whether to compute deltas against both parents for merges.
2941 controls whether to compute deltas against both parents for merges.
2930 By default, the current default is used.
2942 By default, the current default is used.
2931 """
2943 """
2932 if deltareuse not in self.DELTAREUSEALL:
2944 if deltareuse not in self.DELTAREUSEALL:
2933 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2945 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2934
2946
2935 if len(destrevlog):
2947 if len(destrevlog):
2936 raise ValueError(_('destination revlog is not empty'))
2948 raise ValueError(_('destination revlog is not empty'))
2937
2949
2938 if getattr(self, 'filteredrevs', None):
2950 if getattr(self, 'filteredrevs', None):
2939 raise ValueError(_('source revlog has filtered revisions'))
2951 raise ValueError(_('source revlog has filtered revisions'))
2940 if getattr(destrevlog, 'filteredrevs', None):
2952 if getattr(destrevlog, 'filteredrevs', None):
2941 raise ValueError(_('destination revlog has filtered revisions'))
2953 raise ValueError(_('destination revlog has filtered revisions'))
2942
2954
2943 # lazydeltabase controls whether to reuse a cached delta, if possible.
2955 # lazydeltabase controls whether to reuse a cached delta, if possible.
2944 oldlazydeltabase = destrevlog._lazydeltabase
2956 oldlazydeltabase = destrevlog._lazydeltabase
2945 oldamd = destrevlog._deltabothparents
2957 oldamd = destrevlog._deltabothparents
2946
2958
2947 try:
2959 try:
2948 if deltareuse == self.DELTAREUSEALWAYS:
2960 if deltareuse == self.DELTAREUSEALWAYS:
2949 destrevlog._lazydeltabase = True
2961 destrevlog._lazydeltabase = True
2950 elif deltareuse == self.DELTAREUSESAMEREVS:
2962 elif deltareuse == self.DELTAREUSESAMEREVS:
2951 destrevlog._lazydeltabase = False
2963 destrevlog._lazydeltabase = False
2952
2964
2953 destrevlog._deltabothparents = deltabothparents or oldamd
2965 destrevlog._deltabothparents = deltabothparents or oldamd
2954
2966
2955 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2967 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2956 self.DELTAREUSESAMEREVS)
2968 self.DELTAREUSESAMEREVS)
2957
2969
2958 deltacomputer = _deltacomputer(destrevlog)
2970 deltacomputer = _deltacomputer(destrevlog)
2959 index = self.index
2971 index = self.index
2960 for rev in self:
2972 for rev in self:
2961 entry = index[rev]
2973 entry = index[rev]
2962
2974
2963 # Some classes override linkrev to take filtered revs into
2975 # Some classes override linkrev to take filtered revs into
2964 # account. Use raw entry from index.
2976 # account. Use raw entry from index.
2965 flags = entry[0] & 0xffff
2977 flags = entry[0] & 0xffff
2966 linkrev = entry[4]
2978 linkrev = entry[4]
2967 p1 = index[entry[5]][7]
2979 p1 = index[entry[5]][7]
2968 p2 = index[entry[6]][7]
2980 p2 = index[entry[6]][7]
2969 node = entry[7]
2981 node = entry[7]
2970
2982
2971 # (Possibly) reuse the delta from the revlog if allowed and
2983 # (Possibly) reuse the delta from the revlog if allowed and
2972 # the revlog chunk is a delta.
2984 # the revlog chunk is a delta.
2973 cachedelta = None
2985 cachedelta = None
2974 rawtext = None
2986 rawtext = None
2975 if populatecachedelta:
2987 if populatecachedelta:
2976 dp = self.deltaparent(rev)
2988 dp = self.deltaparent(rev)
2977 if dp != nullrev:
2989 if dp != nullrev:
2978 cachedelta = (dp, bytes(self._chunk(rev)))
2990 cachedelta = (dp, bytes(self._chunk(rev)))
2979
2991
2980 if not cachedelta:
2992 if not cachedelta:
2981 rawtext = self.revision(rev, raw=True)
2993 rawtext = self.revision(rev, raw=True)
2982
2994
2983
2995
2984 if deltareuse == self.DELTAREUSEFULLADD:
2996 if deltareuse == self.DELTAREUSEFULLADD:
2985 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2997 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2986 cachedelta=cachedelta,
2998 cachedelta=cachedelta,
2987 node=node, flags=flags,
2999 node=node, flags=flags,
2988 deltacomputer=deltacomputer)
3000 deltacomputer=deltacomputer)
2989 else:
3001 else:
2990 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
3002 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2991 checkambig=False)
3003 checkambig=False)
2992 dfh = None
3004 dfh = None
2993 if not destrevlog._inline:
3005 if not destrevlog._inline:
2994 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
3006 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2995 try:
3007 try:
2996 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
3008 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2997 p2, flags, cachedelta, ifh, dfh,
3009 p2, flags, cachedelta, ifh, dfh,
2998 deltacomputer=deltacomputer)
3010 deltacomputer=deltacomputer)
2999 finally:
3011 finally:
3000 if dfh:
3012 if dfh:
3001 dfh.close()
3013 dfh.close()
3002 ifh.close()
3014 ifh.close()
3003
3015
3004 if addrevisioncb:
3016 if addrevisioncb:
3005 addrevisioncb(self, rev, node)
3017 addrevisioncb(self, rev, node)
3006 finally:
3018 finally:
3007 destrevlog._lazydeltabase = oldlazydeltabase
3019 destrevlog._lazydeltabase = oldlazydeltabase
3008 destrevlog._deltabothparents = oldamd
3020 destrevlog._deltabothparents = oldamd
@@ -1,1277 +1,1277 b''
1 #testcases b2-pushkey b2-binary
1 #testcases b2-pushkey b2-binary
2
2
3 #if b2-pushkey
3 #if b2-pushkey
4 $ cat << EOF >> $HGRCPATH
4 $ cat << EOF >> $HGRCPATH
5 > [devel]
5 > [devel]
6 > legacy.exchange=bookmarks
6 > legacy.exchange=bookmarks
7 > EOF
7 > EOF
8 #endif
8 #endif
9
9
10 #require serve
10 #require serve
11
11
12 $ cat << EOF >> $HGRCPATH
12 $ cat << EOF >> $HGRCPATH
13 > [ui]
13 > [ui]
14 > logtemplate={rev}:{node|short} {desc|firstline}
14 > logtemplate={rev}:{node|short} {desc|firstline}
15 > [phases]
15 > [phases]
16 > publish=False
16 > publish=False
17 > [experimental]
17 > [experimental]
18 > evolution.createmarkers=True
18 > evolution.createmarkers=True
19 > evolution.exchange=True
19 > evolution.exchange=True
20 > EOF
20 > EOF
21
21
22 $ cat > $TESTTMP/hook.sh <<'EOF'
22 $ cat > $TESTTMP/hook.sh <<'EOF'
23 > echo "test-hook-bookmark: $HG_BOOKMARK: $HG_OLDNODE -> $HG_NODE"
23 > echo "test-hook-bookmark: $HG_BOOKMARK: $HG_OLDNODE -> $HG_NODE"
24 > EOF
24 > EOF
25 $ TESTHOOK="hooks.txnclose-bookmark.test=sh $TESTTMP/hook.sh"
25 $ TESTHOOK="hooks.txnclose-bookmark.test=sh $TESTTMP/hook.sh"
26
26
27 initialize
27 initialize
28
28
29 $ hg init a
29 $ hg init a
30 $ cd a
30 $ cd a
31 $ echo 'test' > test
31 $ echo 'test' > test
32 $ hg commit -Am'test'
32 $ hg commit -Am'test'
33 adding test
33 adding test
34
34
35 set bookmarks
35 set bookmarks
36
36
37 $ hg bookmark X
37 $ hg bookmark X
38 $ hg bookmark Y
38 $ hg bookmark Y
39 $ hg bookmark Z
39 $ hg bookmark Z
40
40
41 import bookmark by name
41 import bookmark by name
42
42
43 $ hg init ../b
43 $ hg init ../b
44 $ cd ../b
44 $ cd ../b
45 $ hg book Y
45 $ hg book Y
46 $ hg book
46 $ hg book
47 * Y -1:000000000000
47 * Y -1:000000000000
48 $ hg pull ../a --config "$TESTHOOK"
48 $ hg pull ../a --config "$TESTHOOK"
49 pulling from ../a
49 pulling from ../a
50 requesting all changes
50 requesting all changes
51 adding changesets
51 adding changesets
52 adding manifests
52 adding manifests
53 adding file changes
53 adding file changes
54 added 1 changesets with 1 changes to 1 files
54 added 1 changesets with 1 changes to 1 files
55 adding remote bookmark X
55 adding remote bookmark X
56 updating bookmark Y
56 updating bookmark Y
57 adding remote bookmark Z
57 adding remote bookmark Z
58 new changesets 4e3505fd9583
58 new changesets 4e3505fd9583
59 test-hook-bookmark: X: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
59 test-hook-bookmark: X: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
60 test-hook-bookmark: Y: 0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
60 test-hook-bookmark: Y: 0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
61 test-hook-bookmark: Z: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
61 test-hook-bookmark: Z: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
62 (run 'hg update' to get a working copy)
62 (run 'hg update' to get a working copy)
63 $ hg bookmarks
63 $ hg bookmarks
64 X 0:4e3505fd9583
64 X 0:4e3505fd9583
65 * Y 0:4e3505fd9583
65 * Y 0:4e3505fd9583
66 Z 0:4e3505fd9583
66 Z 0:4e3505fd9583
67 $ hg debugpushkey ../a namespaces
67 $ hg debugpushkey ../a namespaces
68 bookmarks
68 bookmarks
69 namespaces
69 namespaces
70 obsolete
70 obsolete
71 phases
71 phases
72 $ hg debugpushkey ../a bookmarks
72 $ hg debugpushkey ../a bookmarks
73 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
73 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
74 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
74 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
75 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
75 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
76
76
77 delete the bookmark to re-pull it
77 delete the bookmark to re-pull it
78
78
79 $ hg book -d X
79 $ hg book -d X
80 $ hg pull -B X ../a
80 $ hg pull -B X ../a
81 pulling from ../a
81 pulling from ../a
82 no changes found
82 no changes found
83 adding remote bookmark X
83 adding remote bookmark X
84
84
85 finally no-op pull
85 finally no-op pull
86
86
87 $ hg pull -B X ../a
87 $ hg pull -B X ../a
88 pulling from ../a
88 pulling from ../a
89 no changes found
89 no changes found
90 $ hg bookmark
90 $ hg bookmark
91 X 0:4e3505fd9583
91 X 0:4e3505fd9583
92 * Y 0:4e3505fd9583
92 * Y 0:4e3505fd9583
93 Z 0:4e3505fd9583
93 Z 0:4e3505fd9583
94
94
95 export bookmark by name
95 export bookmark by name
96
96
97 $ hg bookmark W
97 $ hg bookmark W
98 $ hg bookmark foo
98 $ hg bookmark foo
99 $ hg bookmark foobar
99 $ hg bookmark foobar
100 $ hg push -B W ../a
100 $ hg push -B W ../a
101 pushing to ../a
101 pushing to ../a
102 searching for changes
102 searching for changes
103 no changes found
103 no changes found
104 exporting bookmark W
104 exporting bookmark W
105 [1]
105 [1]
106 $ hg -R ../a bookmarks
106 $ hg -R ../a bookmarks
107 W -1:000000000000
107 W -1:000000000000
108 X 0:4e3505fd9583
108 X 0:4e3505fd9583
109 Y 0:4e3505fd9583
109 Y 0:4e3505fd9583
110 * Z 0:4e3505fd9583
110 * Z 0:4e3505fd9583
111
111
112 delete a remote bookmark
112 delete a remote bookmark
113
113
114 $ hg book -d W
114 $ hg book -d W
115
115
116 #if b2-pushkey
116 #if b2-pushkey
117
117
118 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
118 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
119 pushing to ../a
119 pushing to ../a
120 query 1; heads
120 query 1; heads
121 searching for changes
121 searching for changes
122 all remote heads known locally
122 all remote heads known locally
123 listing keys for "phases"
123 listing keys for "phases"
124 checking for updated bookmarks
124 checking for updated bookmarks
125 listing keys for "bookmarks"
125 listing keys for "bookmarks"
126 no changes found
126 no changes found
127 bundle2-output-bundle: "HG20", 4 parts total
127 bundle2-output-bundle: "HG20", 4 parts total
128 bundle2-output: start emission of HG20 stream
128 bundle2-output: start emission of HG20 stream
129 bundle2-output: bundle parameter:
129 bundle2-output: bundle parameter:
130 bundle2-output: start of parts
130 bundle2-output: start of parts
131 bundle2-output: bundle part: "replycaps"
131 bundle2-output: bundle part: "replycaps"
132 bundle2-output-part: "replycaps" 222 bytes payload
132 bundle2-output-part: "replycaps" 222 bytes payload
133 bundle2-output: part 0: "REPLYCAPS"
133 bundle2-output: part 0: "REPLYCAPS"
134 bundle2-output: header chunk size: 16
134 bundle2-output: header chunk size: 16
135 bundle2-output: payload chunk size: 222
135 bundle2-output: payload chunk size: 222
136 bundle2-output: closing payload chunk
136 bundle2-output: closing payload chunk
137 bundle2-output: bundle part: "check:bookmarks"
137 bundle2-output: bundle part: "check:bookmarks"
138 bundle2-output-part: "check:bookmarks" 23 bytes payload
138 bundle2-output-part: "check:bookmarks" 23 bytes payload
139 bundle2-output: part 1: "CHECK:BOOKMARKS"
139 bundle2-output: part 1: "CHECK:BOOKMARKS"
140 bundle2-output: header chunk size: 22
140 bundle2-output: header chunk size: 22
141 bundle2-output: payload chunk size: 23
141 bundle2-output: payload chunk size: 23
142 bundle2-output: closing payload chunk
142 bundle2-output: closing payload chunk
143 bundle2-output: bundle part: "check:phases"
143 bundle2-output: bundle part: "check:phases"
144 bundle2-output-part: "check:phases" 48 bytes payload
144 bundle2-output-part: "check:phases" 24 bytes payload
145 bundle2-output: part 2: "CHECK:PHASES"
145 bundle2-output: part 2: "CHECK:PHASES"
146 bundle2-output: header chunk size: 19
146 bundle2-output: header chunk size: 19
147 bundle2-output: payload chunk size: 48
147 bundle2-output: payload chunk size: 24
148 bundle2-output: closing payload chunk
148 bundle2-output: closing payload chunk
149 bundle2-output: bundle part: "pushkey"
149 bundle2-output: bundle part: "pushkey"
150 bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload
150 bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload
151 bundle2-output: part 3: "PUSHKEY"
151 bundle2-output: part 3: "PUSHKEY"
152 bundle2-output: header chunk size: 90
152 bundle2-output: header chunk size: 90
153 bundle2-output: closing payload chunk
153 bundle2-output: closing payload chunk
154 bundle2-output: end of bundle
154 bundle2-output: end of bundle
155 bundle2-input: start processing of HG20 stream
155 bundle2-input: start processing of HG20 stream
156 bundle2-input: reading bundle2 stream parameters
156 bundle2-input: reading bundle2 stream parameters
157 bundle2-input-bundle: with-transaction
157 bundle2-input-bundle: with-transaction
158 bundle2-input: start extraction of bundle2 parts
158 bundle2-input: start extraction of bundle2 parts
159 bundle2-input: part header size: 16
159 bundle2-input: part header size: 16
160 bundle2-input: part type: "REPLYCAPS"
160 bundle2-input: part type: "REPLYCAPS"
161 bundle2-input: part id: "0"
161 bundle2-input: part id: "0"
162 bundle2-input: part parameters: 0
162 bundle2-input: part parameters: 0
163 bundle2-input: found a handler for part replycaps
163 bundle2-input: found a handler for part replycaps
164 bundle2-input-part: "replycaps" supported
164 bundle2-input-part: "replycaps" supported
165 bundle2-input: payload chunk size: 222
165 bundle2-input: payload chunk size: 222
166 bundle2-input: payload chunk size: 0
166 bundle2-input: payload chunk size: 0
167 bundle2-input-part: total payload size 222
167 bundle2-input-part: total payload size 222
168 bundle2-input: part header size: 22
168 bundle2-input: part header size: 22
169 bundle2-input: part type: "CHECK:BOOKMARKS"
169 bundle2-input: part type: "CHECK:BOOKMARKS"
170 bundle2-input: part id: "1"
170 bundle2-input: part id: "1"
171 bundle2-input: part parameters: 0
171 bundle2-input: part parameters: 0
172 bundle2-input: found a handler for part check:bookmarks
172 bundle2-input: found a handler for part check:bookmarks
173 bundle2-input-part: "check:bookmarks" supported
173 bundle2-input-part: "check:bookmarks" supported
174 bundle2-input: payload chunk size: 23
174 bundle2-input: payload chunk size: 23
175 bundle2-input: payload chunk size: 0
175 bundle2-input: payload chunk size: 0
176 bundle2-input-part: total payload size 23
176 bundle2-input-part: total payload size 23
177 bundle2-input: part header size: 19
177 bundle2-input: part header size: 19
178 bundle2-input: part type: "CHECK:PHASES"
178 bundle2-input: part type: "CHECK:PHASES"
179 bundle2-input: part id: "2"
179 bundle2-input: part id: "2"
180 bundle2-input: part parameters: 0
180 bundle2-input: part parameters: 0
181 bundle2-input: found a handler for part check:phases
181 bundle2-input: found a handler for part check:phases
182 bundle2-input-part: "check:phases" supported
182 bundle2-input-part: "check:phases" supported
183 bundle2-input: payload chunk size: 48
183 bundle2-input: payload chunk size: 24
184 bundle2-input: payload chunk size: 0
184 bundle2-input: payload chunk size: 0
185 bundle2-input-part: total payload size 48
185 bundle2-input-part: total payload size 24
186 bundle2-input: part header size: 90
186 bundle2-input: part header size: 90
187 bundle2-input: part type: "PUSHKEY"
187 bundle2-input: part type: "PUSHKEY"
188 bundle2-input: part id: "3"
188 bundle2-input: part id: "3"
189 bundle2-input: part parameters: 4
189 bundle2-input: part parameters: 4
190 bundle2-input: found a handler for part pushkey
190 bundle2-input: found a handler for part pushkey
191 bundle2-input-part: "pushkey" (params: 4 mandatory) supported
191 bundle2-input-part: "pushkey" (params: 4 mandatory) supported
192 pushing key for "bookmarks:W"
192 pushing key for "bookmarks:W"
193 bundle2-input: payload chunk size: 0
193 bundle2-input: payload chunk size: 0
194 bundle2-input: part header size: 0
194 bundle2-input: part header size: 0
195 bundle2-input: end of bundle2 stream
195 bundle2-input: end of bundle2 stream
196 bundle2-input-bundle: 3 parts total
196 bundle2-input-bundle: 3 parts total
197 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
197 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
198 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
198 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
199 bundle2-output-bundle: "HG20", 1 parts total
199 bundle2-output-bundle: "HG20", 1 parts total
200 bundle2-output: start emission of HG20 stream
200 bundle2-output: start emission of HG20 stream
201 bundle2-output: bundle parameter:
201 bundle2-output: bundle parameter:
202 bundle2-output: start of parts
202 bundle2-output: start of parts
203 bundle2-output: bundle part: "reply:pushkey"
203 bundle2-output: bundle part: "reply:pushkey"
204 bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
204 bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
205 bundle2-output: part 0: "REPLY:PUSHKEY"
205 bundle2-output: part 0: "REPLY:PUSHKEY"
206 bundle2-output: header chunk size: 43
206 bundle2-output: header chunk size: 43
207 bundle2-output: closing payload chunk
207 bundle2-output: closing payload chunk
208 bundle2-output: end of bundle
208 bundle2-output: end of bundle
209 bundle2-input: start processing of HG20 stream
209 bundle2-input: start processing of HG20 stream
210 bundle2-input: reading bundle2 stream parameters
210 bundle2-input: reading bundle2 stream parameters
211 bundle2-input-bundle: no-transaction
211 bundle2-input-bundle: no-transaction
212 bundle2-input: start extraction of bundle2 parts
212 bundle2-input: start extraction of bundle2 parts
213 bundle2-input: part header size: 43
213 bundle2-input: part header size: 43
214 bundle2-input: part type: "REPLY:PUSHKEY"
214 bundle2-input: part type: "REPLY:PUSHKEY"
215 bundle2-input: part id: "0"
215 bundle2-input: part id: "0"
216 bundle2-input: part parameters: 2
216 bundle2-input: part parameters: 2
217 bundle2-input: found a handler for part reply:pushkey
217 bundle2-input: found a handler for part reply:pushkey
218 bundle2-input-part: "reply:pushkey" (params: 0 advisory) supported
218 bundle2-input-part: "reply:pushkey" (params: 0 advisory) supported
219 bundle2-input: payload chunk size: 0
219 bundle2-input: payload chunk size: 0
220 bundle2-input: part header size: 0
220 bundle2-input: part header size: 0
221 bundle2-input: end of bundle2 stream
221 bundle2-input: end of bundle2 stream
222 bundle2-input-bundle: 0 parts total
222 bundle2-input-bundle: 0 parts total
223 deleting remote bookmark W
223 deleting remote bookmark W
224 listing keys for "phases"
224 listing keys for "phases"
225 [1]
225 [1]
226
226
227 #endif
227 #endif
228 #if b2-binary
228 #if b2-binary
229
229
230 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
230 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
231 pushing to ../a
231 pushing to ../a
232 query 1; heads
232 query 1; heads
233 searching for changes
233 searching for changes
234 all remote heads known locally
234 all remote heads known locally
235 listing keys for "phases"
235 listing keys for "phases"
236 checking for updated bookmarks
236 checking for updated bookmarks
237 listing keys for "bookmarks"
237 listing keys for "bookmarks"
238 no changes found
238 no changes found
239 bundle2-output-bundle: "HG20", 4 parts total
239 bundle2-output-bundle: "HG20", 4 parts total
240 bundle2-output: start emission of HG20 stream
240 bundle2-output: start emission of HG20 stream
241 bundle2-output: bundle parameter:
241 bundle2-output: bundle parameter:
242 bundle2-output: start of parts
242 bundle2-output: start of parts
243 bundle2-output: bundle part: "replycaps"
243 bundle2-output: bundle part: "replycaps"
244 bundle2-output-part: "replycaps" 222 bytes payload
244 bundle2-output-part: "replycaps" 222 bytes payload
245 bundle2-output: part 0: "REPLYCAPS"
245 bundle2-output: part 0: "REPLYCAPS"
246 bundle2-output: header chunk size: 16
246 bundle2-output: header chunk size: 16
247 bundle2-output: payload chunk size: 222
247 bundle2-output: payload chunk size: 222
248 bundle2-output: closing payload chunk
248 bundle2-output: closing payload chunk
249 bundle2-output: bundle part: "check:bookmarks"
249 bundle2-output: bundle part: "check:bookmarks"
250 bundle2-output-part: "check:bookmarks" 23 bytes payload
250 bundle2-output-part: "check:bookmarks" 23 bytes payload
251 bundle2-output: part 1: "CHECK:BOOKMARKS"
251 bundle2-output: part 1: "CHECK:BOOKMARKS"
252 bundle2-output: header chunk size: 22
252 bundle2-output: header chunk size: 22
253 bundle2-output: payload chunk size: 23
253 bundle2-output: payload chunk size: 23
254 bundle2-output: closing payload chunk
254 bundle2-output: closing payload chunk
255 bundle2-output: bundle part: "check:phases"
255 bundle2-output: bundle part: "check:phases"
256 bundle2-output-part: "check:phases" 48 bytes payload
256 bundle2-output-part: "check:phases" 24 bytes payload
257 bundle2-output: part 2: "CHECK:PHASES"
257 bundle2-output: part 2: "CHECK:PHASES"
258 bundle2-output: header chunk size: 19
258 bundle2-output: header chunk size: 19
259 bundle2-output: payload chunk size: 48
259 bundle2-output: payload chunk size: 24
260 bundle2-output: closing payload chunk
260 bundle2-output: closing payload chunk
261 bundle2-output: bundle part: "bookmarks"
261 bundle2-output: bundle part: "bookmarks"
262 bundle2-output-part: "bookmarks" 23 bytes payload
262 bundle2-output-part: "bookmarks" 23 bytes payload
263 bundle2-output: part 3: "BOOKMARKS"
263 bundle2-output: part 3: "BOOKMARKS"
264 bundle2-output: header chunk size: 16
264 bundle2-output: header chunk size: 16
265 bundle2-output: payload chunk size: 23
265 bundle2-output: payload chunk size: 23
266 bundle2-output: closing payload chunk
266 bundle2-output: closing payload chunk
267 bundle2-output: end of bundle
267 bundle2-output: end of bundle
268 bundle2-input: start processing of HG20 stream
268 bundle2-input: start processing of HG20 stream
269 bundle2-input: reading bundle2 stream parameters
269 bundle2-input: reading bundle2 stream parameters
270 bundle2-input-bundle: with-transaction
270 bundle2-input-bundle: with-transaction
271 bundle2-input: start extraction of bundle2 parts
271 bundle2-input: start extraction of bundle2 parts
272 bundle2-input: part header size: 16
272 bundle2-input: part header size: 16
273 bundle2-input: part type: "REPLYCAPS"
273 bundle2-input: part type: "REPLYCAPS"
274 bundle2-input: part id: "0"
274 bundle2-input: part id: "0"
275 bundle2-input: part parameters: 0
275 bundle2-input: part parameters: 0
276 bundle2-input: found a handler for part replycaps
276 bundle2-input: found a handler for part replycaps
277 bundle2-input-part: "replycaps" supported
277 bundle2-input-part: "replycaps" supported
278 bundle2-input: payload chunk size: 222
278 bundle2-input: payload chunk size: 222
279 bundle2-input: payload chunk size: 0
279 bundle2-input: payload chunk size: 0
280 bundle2-input-part: total payload size 222
280 bundle2-input-part: total payload size 222
281 bundle2-input: part header size: 22
281 bundle2-input: part header size: 22
282 bundle2-input: part type: "CHECK:BOOKMARKS"
282 bundle2-input: part type: "CHECK:BOOKMARKS"
283 bundle2-input: part id: "1"
283 bundle2-input: part id: "1"
284 bundle2-input: part parameters: 0
284 bundle2-input: part parameters: 0
285 bundle2-input: found a handler for part check:bookmarks
285 bundle2-input: found a handler for part check:bookmarks
286 bundle2-input-part: "check:bookmarks" supported
286 bundle2-input-part: "check:bookmarks" supported
287 bundle2-input: payload chunk size: 23
287 bundle2-input: payload chunk size: 23
288 bundle2-input: payload chunk size: 0
288 bundle2-input: payload chunk size: 0
289 bundle2-input-part: total payload size 23
289 bundle2-input-part: total payload size 23
290 bundle2-input: part header size: 19
290 bundle2-input: part header size: 19
291 bundle2-input: part type: "CHECK:PHASES"
291 bundle2-input: part type: "CHECK:PHASES"
292 bundle2-input: part id: "2"
292 bundle2-input: part id: "2"
293 bundle2-input: part parameters: 0
293 bundle2-input: part parameters: 0
294 bundle2-input: found a handler for part check:phases
294 bundle2-input: found a handler for part check:phases
295 bundle2-input-part: "check:phases" supported
295 bundle2-input-part: "check:phases" supported
296 bundle2-input: payload chunk size: 48
296 bundle2-input: payload chunk size: 24
297 bundle2-input: payload chunk size: 0
297 bundle2-input: payload chunk size: 0
298 bundle2-input-part: total payload size 48
298 bundle2-input-part: total payload size 24
299 bundle2-input: part header size: 16
299 bundle2-input: part header size: 16
300 bundle2-input: part type: "BOOKMARKS"
300 bundle2-input: part type: "BOOKMARKS"
301 bundle2-input: part id: "3"
301 bundle2-input: part id: "3"
302 bundle2-input: part parameters: 0
302 bundle2-input: part parameters: 0
303 bundle2-input: found a handler for part bookmarks
303 bundle2-input: found a handler for part bookmarks
304 bundle2-input-part: "bookmarks" supported
304 bundle2-input-part: "bookmarks" supported
305 bundle2-input: payload chunk size: 23
305 bundle2-input: payload chunk size: 23
306 bundle2-input: payload chunk size: 0
306 bundle2-input: payload chunk size: 0
307 bundle2-input-part: total payload size 23
307 bundle2-input-part: total payload size 23
308 bundle2-input: part header size: 0
308 bundle2-input: part header size: 0
309 bundle2-input: end of bundle2 stream
309 bundle2-input: end of bundle2 stream
310 bundle2-input-bundle: 3 parts total
310 bundle2-input-bundle: 3 parts total
311 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
311 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
312 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
312 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
313 bundle2-output-bundle: "HG20", 0 parts total
313 bundle2-output-bundle: "HG20", 0 parts total
314 bundle2-output: start emission of HG20 stream
314 bundle2-output: start emission of HG20 stream
315 bundle2-output: bundle parameter:
315 bundle2-output: bundle parameter:
316 bundle2-output: start of parts
316 bundle2-output: start of parts
317 bundle2-output: end of bundle
317 bundle2-output: end of bundle
318 bundle2-input: start processing of HG20 stream
318 bundle2-input: start processing of HG20 stream
319 bundle2-input: reading bundle2 stream parameters
319 bundle2-input: reading bundle2 stream parameters
320 bundle2-input-bundle: no-transaction
320 bundle2-input-bundle: no-transaction
321 bundle2-input: start extraction of bundle2 parts
321 bundle2-input: start extraction of bundle2 parts
322 bundle2-input: part header size: 0
322 bundle2-input: part header size: 0
323 bundle2-input: end of bundle2 stream
323 bundle2-input: end of bundle2 stream
324 bundle2-input-bundle: 0 parts total
324 bundle2-input-bundle: 0 parts total
325 deleting remote bookmark W
325 deleting remote bookmark W
326 listing keys for "phases"
326 listing keys for "phases"
327 [1]
327 [1]
328
328
329 #endif
329 #endif
330
330
331 export the active bookmark
331 export the active bookmark
332
332
333 $ hg bookmark V
333 $ hg bookmark V
334 $ hg push -B . ../a
334 $ hg push -B . ../a
335 pushing to ../a
335 pushing to ../a
336 searching for changes
336 searching for changes
337 no changes found
337 no changes found
338 exporting bookmark V
338 exporting bookmark V
339 [1]
339 [1]
340
340
341 exporting the active bookmark with 'push -B .'
341 exporting the active bookmark with 'push -B .'
342 demand that one of the bookmarks is activated
342 demand that one of the bookmarks is activated
343
343
344 $ hg update -r default
344 $ hg update -r default
345 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
345 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 (leaving bookmark V)
346 (leaving bookmark V)
347 $ hg push -B . ../a
347 $ hg push -B . ../a
348 abort: no active bookmark
348 abort: no active bookmark
349 [255]
349 [255]
350 $ hg update -r V
350 $ hg update -r V
351 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
351 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
352 (activating bookmark V)
352 (activating bookmark V)
353
353
354 delete the bookmark
354 delete the bookmark
355
355
356 $ hg book -d V
356 $ hg book -d V
357 $ hg push -B V ../a
357 $ hg push -B V ../a
358 pushing to ../a
358 pushing to ../a
359 searching for changes
359 searching for changes
360 no changes found
360 no changes found
361 deleting remote bookmark V
361 deleting remote bookmark V
362 [1]
362 [1]
363 $ hg up foobar
363 $ hg up foobar
364 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
364 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
365 (activating bookmark foobar)
365 (activating bookmark foobar)
366
366
367 push/pull name that doesn't exist
367 push/pull name that doesn't exist
368
368
369 $ hg push -B badname ../a
369 $ hg push -B badname ../a
370 pushing to ../a
370 pushing to ../a
371 searching for changes
371 searching for changes
372 bookmark badname does not exist on the local or remote repository!
372 bookmark badname does not exist on the local or remote repository!
373 no changes found
373 no changes found
374 [2]
374 [2]
375 $ hg pull -B anotherbadname ../a
375 $ hg pull -B anotherbadname ../a
376 pulling from ../a
376 pulling from ../a
377 abort: remote bookmark anotherbadname not found!
377 abort: remote bookmark anotherbadname not found!
378 [255]
378 [255]
379
379
380 divergent bookmarks
380 divergent bookmarks
381
381
382 $ cd ../a
382 $ cd ../a
383 $ echo c1 > f1
383 $ echo c1 > f1
384 $ hg ci -Am1
384 $ hg ci -Am1
385 adding f1
385 adding f1
386 $ hg book -f @
386 $ hg book -f @
387 $ hg book -f X
387 $ hg book -f X
388 $ hg book
388 $ hg book
389 @ 1:0d2164f0ce0d
389 @ 1:0d2164f0ce0d
390 * X 1:0d2164f0ce0d
390 * X 1:0d2164f0ce0d
391 Y 0:4e3505fd9583
391 Y 0:4e3505fd9583
392 Z 1:0d2164f0ce0d
392 Z 1:0d2164f0ce0d
393
393
394 $ cd ../b
394 $ cd ../b
395 $ hg up
395 $ hg up
396 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
396 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
397 updating bookmark foobar
397 updating bookmark foobar
398 $ echo c2 > f2
398 $ echo c2 > f2
399 $ hg ci -Am2
399 $ hg ci -Am2
400 adding f2
400 adding f2
401 $ hg book -if @
401 $ hg book -if @
402 $ hg book -if X
402 $ hg book -if X
403 $ hg book
403 $ hg book
404 @ 1:9b140be10808
404 @ 1:9b140be10808
405 X 1:9b140be10808
405 X 1:9b140be10808
406 Y 0:4e3505fd9583
406 Y 0:4e3505fd9583
407 Z 0:4e3505fd9583
407 Z 0:4e3505fd9583
408 foo -1:000000000000
408 foo -1:000000000000
409 * foobar 1:9b140be10808
409 * foobar 1:9b140be10808
410
410
411 $ hg pull --config paths.foo=../a foo --config "$TESTHOOK"
411 $ hg pull --config paths.foo=../a foo --config "$TESTHOOK"
412 pulling from $TESTTMP/a
412 pulling from $TESTTMP/a
413 searching for changes
413 searching for changes
414 adding changesets
414 adding changesets
415 adding manifests
415 adding manifests
416 adding file changes
416 adding file changes
417 added 1 changesets with 1 changes to 1 files (+1 heads)
417 added 1 changesets with 1 changes to 1 files (+1 heads)
418 divergent bookmark @ stored as @foo
418 divergent bookmark @ stored as @foo
419 divergent bookmark X stored as X@foo
419 divergent bookmark X stored as X@foo
420 updating bookmark Z
420 updating bookmark Z
421 new changesets 0d2164f0ce0d
421 new changesets 0d2164f0ce0d
422 test-hook-bookmark: @foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
422 test-hook-bookmark: @foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
423 test-hook-bookmark: X@foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
423 test-hook-bookmark: X@foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
424 test-hook-bookmark: Z: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
424 test-hook-bookmark: Z: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
425 (run 'hg heads' to see heads, 'hg merge' to merge)
425 (run 'hg heads' to see heads, 'hg merge' to merge)
426 $ hg book
426 $ hg book
427 @ 1:9b140be10808
427 @ 1:9b140be10808
428 @foo 2:0d2164f0ce0d
428 @foo 2:0d2164f0ce0d
429 X 1:9b140be10808
429 X 1:9b140be10808
430 X@foo 2:0d2164f0ce0d
430 X@foo 2:0d2164f0ce0d
431 Y 0:4e3505fd9583
431 Y 0:4e3505fd9583
432 Z 2:0d2164f0ce0d
432 Z 2:0d2164f0ce0d
433 foo -1:000000000000
433 foo -1:000000000000
434 * foobar 1:9b140be10808
434 * foobar 1:9b140be10808
435
435
436 (test that too many divergence of bookmark)
436 (test that too many divergence of bookmark)
437
437
438 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
438 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
439 $ hg pull ../a
439 $ hg pull ../a
440 pulling from ../a
440 pulling from ../a
441 searching for changes
441 searching for changes
442 no changes found
442 no changes found
443 warning: failed to assign numbered name to divergent bookmark X
443 warning: failed to assign numbered name to divergent bookmark X
444 divergent bookmark @ stored as @1
444 divergent bookmark @ stored as @1
445 $ hg bookmarks | grep '^ X' | grep -v ':000000000000'
445 $ hg bookmarks | grep '^ X' | grep -v ':000000000000'
446 X 1:9b140be10808
446 X 1:9b140be10808
447 X@foo 2:0d2164f0ce0d
447 X@foo 2:0d2164f0ce0d
448
448
449 (test that remotely diverged bookmarks are reused if they aren't changed)
449 (test that remotely diverged bookmarks are reused if they aren't changed)
450
450
451 $ hg bookmarks | grep '^ @'
451 $ hg bookmarks | grep '^ @'
452 @ 1:9b140be10808
452 @ 1:9b140be10808
453 @1 2:0d2164f0ce0d
453 @1 2:0d2164f0ce0d
454 @foo 2:0d2164f0ce0d
454 @foo 2:0d2164f0ce0d
455 $ hg pull ../a
455 $ hg pull ../a
456 pulling from ../a
456 pulling from ../a
457 searching for changes
457 searching for changes
458 no changes found
458 no changes found
459 warning: failed to assign numbered name to divergent bookmark X
459 warning: failed to assign numbered name to divergent bookmark X
460 divergent bookmark @ stored as @1
460 divergent bookmark @ stored as @1
461 $ hg bookmarks | grep '^ @'
461 $ hg bookmarks | grep '^ @'
462 @ 1:9b140be10808
462 @ 1:9b140be10808
463 @1 2:0d2164f0ce0d
463 @1 2:0d2164f0ce0d
464 @foo 2:0d2164f0ce0d
464 @foo 2:0d2164f0ce0d
465
465
466 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
466 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
467 $ hg bookmarks -d "@1"
467 $ hg bookmarks -d "@1"
468
468
469 $ hg push -f ../a
469 $ hg push -f ../a
470 pushing to ../a
470 pushing to ../a
471 searching for changes
471 searching for changes
472 adding changesets
472 adding changesets
473 adding manifests
473 adding manifests
474 adding file changes
474 adding file changes
475 added 1 changesets with 1 changes to 1 files (+1 heads)
475 added 1 changesets with 1 changes to 1 files (+1 heads)
476 $ hg -R ../a book
476 $ hg -R ../a book
477 @ 1:0d2164f0ce0d
477 @ 1:0d2164f0ce0d
478 * X 1:0d2164f0ce0d
478 * X 1:0d2164f0ce0d
479 Y 0:4e3505fd9583
479 Y 0:4e3505fd9583
480 Z 1:0d2164f0ce0d
480 Z 1:0d2164f0ce0d
481
481
482 explicit pull should overwrite the local version (issue4439)
482 explicit pull should overwrite the local version (issue4439)
483
483
484 $ hg update -r X
484 $ hg update -r X
485 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
485 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
486 (activating bookmark X)
486 (activating bookmark X)
487 $ hg pull --config paths.foo=../a foo -B . --config "$TESTHOOK"
487 $ hg pull --config paths.foo=../a foo -B . --config "$TESTHOOK"
488 pulling from $TESTTMP/a
488 pulling from $TESTTMP/a
489 no changes found
489 no changes found
490 divergent bookmark @ stored as @foo
490 divergent bookmark @ stored as @foo
491 importing bookmark X
491 importing bookmark X
492 test-hook-bookmark: @foo: 0d2164f0ce0d8f1d6f94351eba04b794909be66c -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
492 test-hook-bookmark: @foo: 0d2164f0ce0d8f1d6f94351eba04b794909be66c -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
493 test-hook-bookmark: X: 9b140be1080824d768c5a4691a564088eede71f9 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
493 test-hook-bookmark: X: 9b140be1080824d768c5a4691a564088eede71f9 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
494
494
495 reinstall state for further testing:
495 reinstall state for further testing:
496
496
497 $ hg book -fr 9b140be10808 X
497 $ hg book -fr 9b140be10808 X
498
498
499 revsets should not ignore divergent bookmarks
499 revsets should not ignore divergent bookmarks
500
500
501 $ hg bookmark -fr 1 Z
501 $ hg bookmark -fr 1 Z
502 $ hg log -r 'bookmark()' --template '{rev}:{node|short} {bookmarks}\n'
502 $ hg log -r 'bookmark()' --template '{rev}:{node|short} {bookmarks}\n'
503 0:4e3505fd9583 Y
503 0:4e3505fd9583 Y
504 1:9b140be10808 @ X Z foobar
504 1:9b140be10808 @ X Z foobar
505 2:0d2164f0ce0d @foo X@foo
505 2:0d2164f0ce0d @foo X@foo
506 $ hg log -r 'bookmark("X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
506 $ hg log -r 'bookmark("X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
507 2:0d2164f0ce0d @foo X@foo
507 2:0d2164f0ce0d @foo X@foo
508 $ hg log -r 'bookmark("re:X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
508 $ hg log -r 'bookmark("re:X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
509 2:0d2164f0ce0d @foo X@foo
509 2:0d2164f0ce0d @foo X@foo
510
510
511 update a remote bookmark from a non-head to a head
511 update a remote bookmark from a non-head to a head
512
512
513 $ hg up -q Y
513 $ hg up -q Y
514 $ echo c3 > f2
514 $ echo c3 > f2
515 $ hg ci -Am3
515 $ hg ci -Am3
516 adding f2
516 adding f2
517 created new head
517 created new head
518 $ hg push ../a --config "$TESTHOOK"
518 $ hg push ../a --config "$TESTHOOK"
519 pushing to ../a
519 pushing to ../a
520 searching for changes
520 searching for changes
521 adding changesets
521 adding changesets
522 adding manifests
522 adding manifests
523 adding file changes
523 adding file changes
524 added 1 changesets with 1 changes to 1 files (+1 heads)
524 added 1 changesets with 1 changes to 1 files (+1 heads)
525 test-hook-bookmark: Y: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
525 test-hook-bookmark: Y: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
526 updating bookmark Y
526 updating bookmark Y
527 $ hg -R ../a book
527 $ hg -R ../a book
528 @ 1:0d2164f0ce0d
528 @ 1:0d2164f0ce0d
529 * X 1:0d2164f0ce0d
529 * X 1:0d2164f0ce0d
530 Y 3:f6fc62dde3c0
530 Y 3:f6fc62dde3c0
531 Z 1:0d2164f0ce0d
531 Z 1:0d2164f0ce0d
532
532
533 update a bookmark in the middle of a client pulling changes
533 update a bookmark in the middle of a client pulling changes
534
534
535 $ cd ..
535 $ cd ..
536 $ hg clone -q a pull-race
536 $ hg clone -q a pull-race
537
537
538 We want to use http because it is stateless and therefore more susceptible to
538 We want to use http because it is stateless and therefore more susceptible to
539 race conditions
539 race conditions
540
540
541 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
541 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
542 $ cat pull-race.pid >> $DAEMON_PIDS
542 $ cat pull-race.pid >> $DAEMON_PIDS
543
543
544 $ cat <<EOF > $TESTTMP/out_makecommit.sh
544 $ cat <<EOF > $TESTTMP/out_makecommit.sh
545 > #!/bin/sh
545 > #!/bin/sh
546 > hg ci -Am5
546 > hg ci -Am5
547 > echo committed in pull-race
547 > echo committed in pull-race
548 > EOF
548 > EOF
549
549
550 $ hg clone -q http://localhost:$HGPORT/ pull-race2 --config "$TESTHOOK"
550 $ hg clone -q http://localhost:$HGPORT/ pull-race2 --config "$TESTHOOK"
551 test-hook-bookmark: @: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
551 test-hook-bookmark: @: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
552 test-hook-bookmark: X: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
552 test-hook-bookmark: X: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
553 test-hook-bookmark: Y: -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
553 test-hook-bookmark: Y: -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
554 test-hook-bookmark: Z: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
554 test-hook-bookmark: Z: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
555 $ cd pull-race
555 $ cd pull-race
556 $ hg up -q Y
556 $ hg up -q Y
557 $ echo c4 > f2
557 $ echo c4 > f2
558 $ hg ci -Am4
558 $ hg ci -Am4
559 $ echo c5 > f3
559 $ echo c5 > f3
560 $ cat <<EOF > .hg/hgrc
560 $ cat <<EOF > .hg/hgrc
561 > [hooks]
561 > [hooks]
562 > outgoing.makecommit = sh $TESTTMP/out_makecommit.sh
562 > outgoing.makecommit = sh $TESTTMP/out_makecommit.sh
563 > EOF
563 > EOF
564
564
565 (new config needs a server restart)
565 (new config needs a server restart)
566
566
567 $ cd ..
567 $ cd ..
568 $ killdaemons.py
568 $ killdaemons.py
569 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
569 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
570 $ cat pull-race.pid >> $DAEMON_PIDS
570 $ cat pull-race.pid >> $DAEMON_PIDS
571 $ cd pull-race2
571 $ cd pull-race2
572 $ hg -R $TESTTMP/pull-race book
572 $ hg -R $TESTTMP/pull-race book
573 @ 1:0d2164f0ce0d
573 @ 1:0d2164f0ce0d
574 X 1:0d2164f0ce0d
574 X 1:0d2164f0ce0d
575 * Y 4:b0a5eff05604
575 * Y 4:b0a5eff05604
576 Z 1:0d2164f0ce0d
576 Z 1:0d2164f0ce0d
577 $ hg pull
577 $ hg pull
578 pulling from http://localhost:$HGPORT/
578 pulling from http://localhost:$HGPORT/
579 searching for changes
579 searching for changes
580 adding changesets
580 adding changesets
581 adding manifests
581 adding manifests
582 adding file changes
582 adding file changes
583 added 1 changesets with 1 changes to 1 files
583 added 1 changesets with 1 changes to 1 files
584 updating bookmark Y
584 updating bookmark Y
585 new changesets b0a5eff05604
585 new changesets b0a5eff05604
586 (run 'hg update' to get a working copy)
586 (run 'hg update' to get a working copy)
587 $ hg book
587 $ hg book
588 * @ 1:0d2164f0ce0d
588 * @ 1:0d2164f0ce0d
589 X 1:0d2164f0ce0d
589 X 1:0d2164f0ce0d
590 Y 4:b0a5eff05604
590 Y 4:b0a5eff05604
591 Z 1:0d2164f0ce0d
591 Z 1:0d2164f0ce0d
592
592
593 Update a bookmark right after the initial lookup -B (issue4689)
593 Update a bookmark right after the initial lookup -B (issue4689)
594
594
595 $ echo c6 > ../pull-race/f3 # to be committed during the race
595 $ echo c6 > ../pull-race/f3 # to be committed during the race
596 $ cat <<EOF > $TESTTMP/listkeys_makecommit.sh
596 $ cat <<EOF > $TESTTMP/listkeys_makecommit.sh
597 > #!/bin/sh
597 > #!/bin/sh
598 > if hg st | grep -q M; then
598 > if hg st | grep -q M; then
599 > hg commit -m race
599 > hg commit -m race
600 > echo committed in pull-race
600 > echo committed in pull-race
601 > else
601 > else
602 > exit 0
602 > exit 0
603 > fi
603 > fi
604 > EOF
604 > EOF
605 $ cat <<EOF > ../pull-race/.hg/hgrc
605 $ cat <<EOF > ../pull-race/.hg/hgrc
606 > [hooks]
606 > [hooks]
607 > # If anything to commit, commit it right after the first key listing used
607 > # If anything to commit, commit it right after the first key listing used
608 > # during lookup. This makes the commit appear before the actual getbundle
608 > # during lookup. This makes the commit appear before the actual getbundle
609 > # call.
609 > # call.
610 > listkeys.makecommit= sh $TESTTMP/listkeys_makecommit.sh
610 > listkeys.makecommit= sh $TESTTMP/listkeys_makecommit.sh
611 > EOF
611 > EOF
612
612
613 (new config need server restart)
613 (new config need server restart)
614
614
615 $ killdaemons.py
615 $ killdaemons.py
616 $ hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
616 $ hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
617 $ cat ../pull-race.pid >> $DAEMON_PIDS
617 $ cat ../pull-race.pid >> $DAEMON_PIDS
618
618
619 $ hg -R $TESTTMP/pull-race book
619 $ hg -R $TESTTMP/pull-race book
620 @ 1:0d2164f0ce0d
620 @ 1:0d2164f0ce0d
621 X 1:0d2164f0ce0d
621 X 1:0d2164f0ce0d
622 * Y 5:35d1ef0a8d1b
622 * Y 5:35d1ef0a8d1b
623 Z 1:0d2164f0ce0d
623 Z 1:0d2164f0ce0d
624 $ hg update -r Y
624 $ hg update -r Y
625 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
625 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
626 (activating bookmark Y)
626 (activating bookmark Y)
627 $ hg pull -B .
627 $ hg pull -B .
628 pulling from http://localhost:$HGPORT/
628 pulling from http://localhost:$HGPORT/
629 searching for changes
629 searching for changes
630 adding changesets
630 adding changesets
631 adding manifests
631 adding manifests
632 adding file changes
632 adding file changes
633 added 1 changesets with 1 changes to 1 files
633 added 1 changesets with 1 changes to 1 files
634 updating bookmark Y
634 updating bookmark Y
635 new changesets 35d1ef0a8d1b
635 new changesets 35d1ef0a8d1b
636 (run 'hg update' to get a working copy)
636 (run 'hg update' to get a working copy)
637 $ hg book
637 $ hg book
638 @ 1:0d2164f0ce0d
638 @ 1:0d2164f0ce0d
639 X 1:0d2164f0ce0d
639 X 1:0d2164f0ce0d
640 * Y 5:35d1ef0a8d1b
640 * Y 5:35d1ef0a8d1b
641 Z 1:0d2164f0ce0d
641 Z 1:0d2164f0ce0d
642
642
643 (done with this section of the test)
643 (done with this section of the test)
644
644
645 $ killdaemons.py
645 $ killdaemons.py
646 $ cd ../b
646 $ cd ../b
647
647
648 diverging a remote bookmark fails
648 diverging a remote bookmark fails
649
649
650 $ hg up -q 4e3505fd9583
650 $ hg up -q 4e3505fd9583
651 $ echo c4 > f2
651 $ echo c4 > f2
652 $ hg ci -Am4
652 $ hg ci -Am4
653 adding f2
653 adding f2
654 created new head
654 created new head
655 $ echo c5 > f2
655 $ echo c5 > f2
656 $ hg ci -Am5
656 $ hg ci -Am5
657 $ hg log -G
657 $ hg log -G
658 @ 5:c922c0139ca0 5
658 @ 5:c922c0139ca0 5
659 |
659 |
660 o 4:4efff6d98829 4
660 o 4:4efff6d98829 4
661 |
661 |
662 | o 3:f6fc62dde3c0 3
662 | o 3:f6fc62dde3c0 3
663 |/
663 |/
664 | o 2:0d2164f0ce0d 1
664 | o 2:0d2164f0ce0d 1
665 |/
665 |/
666 | o 1:9b140be10808 2
666 | o 1:9b140be10808 2
667 |/
667 |/
668 o 0:4e3505fd9583 test
668 o 0:4e3505fd9583 test
669
669
670
670
671 $ hg book -f Y
671 $ hg book -f Y
672
672
673 $ cat <<EOF > ../a/.hg/hgrc
673 $ cat <<EOF > ../a/.hg/hgrc
674 > [web]
674 > [web]
675 > push_ssl = false
675 > push_ssl = false
676 > allow_push = *
676 > allow_push = *
677 > EOF
677 > EOF
678
678
679 $ hg serve -R ../a -p $HGPORT2 -d --pid-file=../hg2.pid
679 $ hg serve -R ../a -p $HGPORT2 -d --pid-file=../hg2.pid
680 $ cat ../hg2.pid >> $DAEMON_PIDS
680 $ cat ../hg2.pid >> $DAEMON_PIDS
681
681
682 $ hg push http://localhost:$HGPORT2/
682 $ hg push http://localhost:$HGPORT2/
683 pushing to http://localhost:$HGPORT2/
683 pushing to http://localhost:$HGPORT2/
684 searching for changes
684 searching for changes
685 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
685 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
686 (merge or see 'hg help push' for details about pushing new heads)
686 (merge or see 'hg help push' for details about pushing new heads)
687 [255]
687 [255]
688 $ hg -R ../a book
688 $ hg -R ../a book
689 @ 1:0d2164f0ce0d
689 @ 1:0d2164f0ce0d
690 * X 1:0d2164f0ce0d
690 * X 1:0d2164f0ce0d
691 Y 3:f6fc62dde3c0
691 Y 3:f6fc62dde3c0
692 Z 1:0d2164f0ce0d
692 Z 1:0d2164f0ce0d
693
693
694
694
695 Unrelated marker does not alter the decision
695 Unrelated marker does not alter the decision
696
696
697 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
697 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
698 $ hg push http://localhost:$HGPORT2/
698 $ hg push http://localhost:$HGPORT2/
699 pushing to http://localhost:$HGPORT2/
699 pushing to http://localhost:$HGPORT2/
700 searching for changes
700 searching for changes
701 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
701 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
702 (merge or see 'hg help push' for details about pushing new heads)
702 (merge or see 'hg help push' for details about pushing new heads)
703 [255]
703 [255]
704 $ hg -R ../a book
704 $ hg -R ../a book
705 @ 1:0d2164f0ce0d
705 @ 1:0d2164f0ce0d
706 * X 1:0d2164f0ce0d
706 * X 1:0d2164f0ce0d
707 Y 3:f6fc62dde3c0
707 Y 3:f6fc62dde3c0
708 Z 1:0d2164f0ce0d
708 Z 1:0d2164f0ce0d
709
709
710 Update to a successor works
710 Update to a successor works
711
711
712 $ hg id --debug -r 3
712 $ hg id --debug -r 3
713 f6fc62dde3c0771e29704af56ba4d8af77abcc2f
713 f6fc62dde3c0771e29704af56ba4d8af77abcc2f
714 $ hg id --debug -r 4
714 $ hg id --debug -r 4
715 4efff6d98829d9c824c621afd6e3f01865f5439f
715 4efff6d98829d9c824c621afd6e3f01865f5439f
716 $ hg id --debug -r 5
716 $ hg id --debug -r 5
717 c922c0139ca03858f655e4a2af4dd02796a63969 tip Y
717 c922c0139ca03858f655e4a2af4dd02796a63969 tip Y
718 $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc
718 $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc
719 obsoleted 1 changesets
719 obsoleted 1 changesets
720 $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f
720 $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f
721 $ hg push http://localhost:$HGPORT2/
721 $ hg push http://localhost:$HGPORT2/
722 pushing to http://localhost:$HGPORT2/
722 pushing to http://localhost:$HGPORT2/
723 searching for changes
723 searching for changes
724 remote: adding changesets
724 remote: adding changesets
725 remote: adding manifests
725 remote: adding manifests
726 remote: adding file changes
726 remote: adding file changes
727 remote: added 2 changesets with 2 changes to 1 files (+1 heads)
727 remote: added 2 changesets with 2 changes to 1 files (+1 heads)
728 remote: 2 new obsolescence markers
728 remote: 2 new obsolescence markers
729 remote: obsoleted 1 changesets
729 remote: obsoleted 1 changesets
730 updating bookmark Y
730 updating bookmark Y
731 $ hg -R ../a book
731 $ hg -R ../a book
732 @ 1:0d2164f0ce0d
732 @ 1:0d2164f0ce0d
733 * X 1:0d2164f0ce0d
733 * X 1:0d2164f0ce0d
734 Y 5:c922c0139ca0
734 Y 5:c922c0139ca0
735 Z 1:0d2164f0ce0d
735 Z 1:0d2164f0ce0d
736
736
737 hgweb
737 hgweb
738
738
739 $ cat <<EOF > .hg/hgrc
739 $ cat <<EOF > .hg/hgrc
740 > [web]
740 > [web]
741 > push_ssl = false
741 > push_ssl = false
742 > allow_push = *
742 > allow_push = *
743 > EOF
743 > EOF
744
744
745 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
745 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
746 $ cat ../hg.pid >> $DAEMON_PIDS
746 $ cat ../hg.pid >> $DAEMON_PIDS
747 $ cd ../a
747 $ cd ../a
748
748
749 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
749 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
750 bookmarks
750 bookmarks
751 namespaces
751 namespaces
752 obsolete
752 obsolete
753 phases
753 phases
754 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
754 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
755 @ 9b140be1080824d768c5a4691a564088eede71f9
755 @ 9b140be1080824d768c5a4691a564088eede71f9
756 X 9b140be1080824d768c5a4691a564088eede71f9
756 X 9b140be1080824d768c5a4691a564088eede71f9
757 Y c922c0139ca03858f655e4a2af4dd02796a63969
757 Y c922c0139ca03858f655e4a2af4dd02796a63969
758 Z 9b140be1080824d768c5a4691a564088eede71f9
758 Z 9b140be1080824d768c5a4691a564088eede71f9
759 foo 0000000000000000000000000000000000000000
759 foo 0000000000000000000000000000000000000000
760 foobar 9b140be1080824d768c5a4691a564088eede71f9
760 foobar 9b140be1080824d768c5a4691a564088eede71f9
761 $ hg out -B http://localhost:$HGPORT/
761 $ hg out -B http://localhost:$HGPORT/
762 comparing with http://localhost:$HGPORT/
762 comparing with http://localhost:$HGPORT/
763 searching for changed bookmarks
763 searching for changed bookmarks
764 @ 0d2164f0ce0d
764 @ 0d2164f0ce0d
765 X 0d2164f0ce0d
765 X 0d2164f0ce0d
766 Z 0d2164f0ce0d
766 Z 0d2164f0ce0d
767 foo
767 foo
768 foobar
768 foobar
769 $ hg push -B Z http://localhost:$HGPORT/
769 $ hg push -B Z http://localhost:$HGPORT/
770 pushing to http://localhost:$HGPORT/
770 pushing to http://localhost:$HGPORT/
771 searching for changes
771 searching for changes
772 no changes found
772 no changes found
773 updating bookmark Z
773 updating bookmark Z
774 [1]
774 [1]
775 $ hg book -d Z
775 $ hg book -d Z
776 $ hg in -B http://localhost:$HGPORT/
776 $ hg in -B http://localhost:$HGPORT/
777 comparing with http://localhost:$HGPORT/
777 comparing with http://localhost:$HGPORT/
778 searching for changed bookmarks
778 searching for changed bookmarks
779 @ 9b140be10808
779 @ 9b140be10808
780 X 9b140be10808
780 X 9b140be10808
781 Z 0d2164f0ce0d
781 Z 0d2164f0ce0d
782 foo 000000000000
782 foo 000000000000
783 foobar 9b140be10808
783 foobar 9b140be10808
784 $ hg pull -B Z http://localhost:$HGPORT/
784 $ hg pull -B Z http://localhost:$HGPORT/
785 pulling from http://localhost:$HGPORT/
785 pulling from http://localhost:$HGPORT/
786 no changes found
786 no changes found
787 divergent bookmark @ stored as @1
787 divergent bookmark @ stored as @1
788 divergent bookmark X stored as X@1
788 divergent bookmark X stored as X@1
789 adding remote bookmark Z
789 adding remote bookmark Z
790 adding remote bookmark foo
790 adding remote bookmark foo
791 adding remote bookmark foobar
791 adding remote bookmark foobar
792 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
792 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
793 requesting all changes
793 requesting all changes
794 adding changesets
794 adding changesets
795 adding manifests
795 adding manifests
796 adding file changes
796 adding file changes
797 added 5 changesets with 5 changes to 3 files (+2 heads)
797 added 5 changesets with 5 changes to 3 files (+2 heads)
798 2 new obsolescence markers
798 2 new obsolescence markers
799 new changesets 4e3505fd9583:c922c0139ca0
799 new changesets 4e3505fd9583:c922c0139ca0
800 updating to bookmark @
800 updating to bookmark @
801 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
801 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
802 $ hg -R cloned-bookmarks bookmarks
802 $ hg -R cloned-bookmarks bookmarks
803 * @ 1:9b140be10808
803 * @ 1:9b140be10808
804 X 1:9b140be10808
804 X 1:9b140be10808
805 Y 4:c922c0139ca0
805 Y 4:c922c0139ca0
806 Z 2:0d2164f0ce0d
806 Z 2:0d2164f0ce0d
807 foo -1:000000000000
807 foo -1:000000000000
808 foobar 1:9b140be10808
808 foobar 1:9b140be10808
809
809
810 $ cd ..
810 $ cd ..
811
811
812 Test to show result of bookmarks comparison
812 Test to show result of bookmarks comparison
813
813
814 $ mkdir bmcomparison
814 $ mkdir bmcomparison
815 $ cd bmcomparison
815 $ cd bmcomparison
816
816
817 $ hg init source
817 $ hg init source
818 $ hg -R source debugbuilddag '+2*2*3*4'
818 $ hg -R source debugbuilddag '+2*2*3*4'
819 $ hg -R source log -G --template '{rev}:{node|short}'
819 $ hg -R source log -G --template '{rev}:{node|short}'
820 o 4:e7bd5218ca15
820 o 4:e7bd5218ca15
821 |
821 |
822 | o 3:6100d3090acf
822 | o 3:6100d3090acf
823 |/
823 |/
824 | o 2:fa942426a6fd
824 | o 2:fa942426a6fd
825 |/
825 |/
826 | o 1:66f7d451a68b
826 | o 1:66f7d451a68b
827 |/
827 |/
828 o 0:1ea73414a91b
828 o 0:1ea73414a91b
829
829
830 $ hg -R source bookmarks -r 0 SAME
830 $ hg -R source bookmarks -r 0 SAME
831 $ hg -R source bookmarks -r 0 ADV_ON_REPO1
831 $ hg -R source bookmarks -r 0 ADV_ON_REPO1
832 $ hg -R source bookmarks -r 0 ADV_ON_REPO2
832 $ hg -R source bookmarks -r 0 ADV_ON_REPO2
833 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO1
833 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO1
834 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO2
834 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO2
835 $ hg -R source bookmarks -r 1 DIVERGED
835 $ hg -R source bookmarks -r 1 DIVERGED
836
836
837 $ hg clone -U source repo1
837 $ hg clone -U source repo1
838
838
839 (test that incoming/outgoing exit with 1, if there is no bookmark to
839 (test that incoming/outgoing exit with 1, if there is no bookmark to
840 be exchanged)
840 be exchanged)
841
841
842 $ hg -R repo1 incoming -B
842 $ hg -R repo1 incoming -B
843 comparing with $TESTTMP/bmcomparison/source
843 comparing with $TESTTMP/bmcomparison/source
844 searching for changed bookmarks
844 searching for changed bookmarks
845 no changed bookmarks found
845 no changed bookmarks found
846 [1]
846 [1]
847 $ hg -R repo1 outgoing -B
847 $ hg -R repo1 outgoing -B
848 comparing with $TESTTMP/bmcomparison/source
848 comparing with $TESTTMP/bmcomparison/source
849 searching for changed bookmarks
849 searching for changed bookmarks
850 no changed bookmarks found
850 no changed bookmarks found
851 [1]
851 [1]
852
852
853 $ hg -R repo1 bookmarks -f -r 1 ADD_ON_REPO1
853 $ hg -R repo1 bookmarks -f -r 1 ADD_ON_REPO1
854 $ hg -R repo1 bookmarks -f -r 2 ADV_ON_REPO1
854 $ hg -R repo1 bookmarks -f -r 2 ADV_ON_REPO1
855 $ hg -R repo1 bookmarks -f -r 3 DIFF_ADV_ON_REPO1
855 $ hg -R repo1 bookmarks -f -r 3 DIFF_ADV_ON_REPO1
856 $ hg -R repo1 bookmarks -f -r 3 DIFF_DIVERGED
856 $ hg -R repo1 bookmarks -f -r 3 DIFF_DIVERGED
857 $ hg -R repo1 -q --config extensions.mq= strip 4
857 $ hg -R repo1 -q --config extensions.mq= strip 4
858 $ hg -R repo1 log -G --template '{node|short} ({bookmarks})'
858 $ hg -R repo1 log -G --template '{node|short} ({bookmarks})'
859 o 6100d3090acf (DIFF_ADV_ON_REPO1 DIFF_DIVERGED)
859 o 6100d3090acf (DIFF_ADV_ON_REPO1 DIFF_DIVERGED)
860 |
860 |
861 | o fa942426a6fd (ADV_ON_REPO1)
861 | o fa942426a6fd (ADV_ON_REPO1)
862 |/
862 |/
863 | o 66f7d451a68b (ADD_ON_REPO1 DIVERGED)
863 | o 66f7d451a68b (ADD_ON_REPO1 DIVERGED)
864 |/
864 |/
865 o 1ea73414a91b (ADV_ON_REPO2 DIFF_ADV_ON_REPO2 SAME)
865 o 1ea73414a91b (ADV_ON_REPO2 DIFF_ADV_ON_REPO2 SAME)
866
866
867
867
868 $ hg clone -U source repo2
868 $ hg clone -U source repo2
869 $ hg -R repo2 bookmarks -f -r 1 ADD_ON_REPO2
869 $ hg -R repo2 bookmarks -f -r 1 ADD_ON_REPO2
870 $ hg -R repo2 bookmarks -f -r 1 ADV_ON_REPO2
870 $ hg -R repo2 bookmarks -f -r 1 ADV_ON_REPO2
871 $ hg -R repo2 bookmarks -f -r 2 DIVERGED
871 $ hg -R repo2 bookmarks -f -r 2 DIVERGED
872 $ hg -R repo2 bookmarks -f -r 4 DIFF_ADV_ON_REPO2
872 $ hg -R repo2 bookmarks -f -r 4 DIFF_ADV_ON_REPO2
873 $ hg -R repo2 bookmarks -f -r 4 DIFF_DIVERGED
873 $ hg -R repo2 bookmarks -f -r 4 DIFF_DIVERGED
874 $ hg -R repo2 -q --config extensions.mq= strip 3
874 $ hg -R repo2 -q --config extensions.mq= strip 3
875 $ hg -R repo2 log -G --template '{node|short} ({bookmarks})'
875 $ hg -R repo2 log -G --template '{node|short} ({bookmarks})'
876 o e7bd5218ca15 (DIFF_ADV_ON_REPO2 DIFF_DIVERGED)
876 o e7bd5218ca15 (DIFF_ADV_ON_REPO2 DIFF_DIVERGED)
877 |
877 |
878 | o fa942426a6fd (DIVERGED)
878 | o fa942426a6fd (DIVERGED)
879 |/
879 |/
880 | o 66f7d451a68b (ADD_ON_REPO2 ADV_ON_REPO2)
880 | o 66f7d451a68b (ADD_ON_REPO2 ADV_ON_REPO2)
881 |/
881 |/
882 o 1ea73414a91b (ADV_ON_REPO1 DIFF_ADV_ON_REPO1 SAME)
882 o 1ea73414a91b (ADV_ON_REPO1 DIFF_ADV_ON_REPO1 SAME)
883
883
884
884
885 (test that difference of bookmarks between repositories are fully shown)
885 (test that difference of bookmarks between repositories are fully shown)
886
886
887 $ hg -R repo1 incoming -B repo2 -v
887 $ hg -R repo1 incoming -B repo2 -v
888 comparing with repo2
888 comparing with repo2
889 searching for changed bookmarks
889 searching for changed bookmarks
890 ADD_ON_REPO2 66f7d451a68b added
890 ADD_ON_REPO2 66f7d451a68b added
891 ADV_ON_REPO2 66f7d451a68b advanced
891 ADV_ON_REPO2 66f7d451a68b advanced
892 DIFF_ADV_ON_REPO2 e7bd5218ca15 changed
892 DIFF_ADV_ON_REPO2 e7bd5218ca15 changed
893 DIFF_DIVERGED e7bd5218ca15 changed
893 DIFF_DIVERGED e7bd5218ca15 changed
894 DIVERGED fa942426a6fd diverged
894 DIVERGED fa942426a6fd diverged
895 $ hg -R repo1 outgoing -B repo2 -v
895 $ hg -R repo1 outgoing -B repo2 -v
896 comparing with repo2
896 comparing with repo2
897 searching for changed bookmarks
897 searching for changed bookmarks
898 ADD_ON_REPO1 66f7d451a68b added
898 ADD_ON_REPO1 66f7d451a68b added
899 ADD_ON_REPO2 deleted
899 ADD_ON_REPO2 deleted
900 ADV_ON_REPO1 fa942426a6fd advanced
900 ADV_ON_REPO1 fa942426a6fd advanced
901 DIFF_ADV_ON_REPO1 6100d3090acf advanced
901 DIFF_ADV_ON_REPO1 6100d3090acf advanced
902 DIFF_ADV_ON_REPO2 1ea73414a91b changed
902 DIFF_ADV_ON_REPO2 1ea73414a91b changed
903 DIFF_DIVERGED 6100d3090acf changed
903 DIFF_DIVERGED 6100d3090acf changed
904 DIVERGED 66f7d451a68b diverged
904 DIVERGED 66f7d451a68b diverged
905
905
906 $ hg -R repo2 incoming -B repo1 -v
906 $ hg -R repo2 incoming -B repo1 -v
907 comparing with repo1
907 comparing with repo1
908 searching for changed bookmarks
908 searching for changed bookmarks
909 ADD_ON_REPO1 66f7d451a68b added
909 ADD_ON_REPO1 66f7d451a68b added
910 ADV_ON_REPO1 fa942426a6fd advanced
910 ADV_ON_REPO1 fa942426a6fd advanced
911 DIFF_ADV_ON_REPO1 6100d3090acf changed
911 DIFF_ADV_ON_REPO1 6100d3090acf changed
912 DIFF_DIVERGED 6100d3090acf changed
912 DIFF_DIVERGED 6100d3090acf changed
913 DIVERGED 66f7d451a68b diverged
913 DIVERGED 66f7d451a68b diverged
914 $ hg -R repo2 outgoing -B repo1 -v
914 $ hg -R repo2 outgoing -B repo1 -v
915 comparing with repo1
915 comparing with repo1
916 searching for changed bookmarks
916 searching for changed bookmarks
917 ADD_ON_REPO1 deleted
917 ADD_ON_REPO1 deleted
918 ADD_ON_REPO2 66f7d451a68b added
918 ADD_ON_REPO2 66f7d451a68b added
919 ADV_ON_REPO2 66f7d451a68b advanced
919 ADV_ON_REPO2 66f7d451a68b advanced
920 DIFF_ADV_ON_REPO1 1ea73414a91b changed
920 DIFF_ADV_ON_REPO1 1ea73414a91b changed
921 DIFF_ADV_ON_REPO2 e7bd5218ca15 advanced
921 DIFF_ADV_ON_REPO2 e7bd5218ca15 advanced
922 DIFF_DIVERGED e7bd5218ca15 changed
922 DIFF_DIVERGED e7bd5218ca15 changed
923 DIVERGED fa942426a6fd diverged
923 DIVERGED fa942426a6fd diverged
924
924
925 $ cd ..
925 $ cd ..
926
926
927 Pushing a bookmark should only push the changes required by that
927 Pushing a bookmark should only push the changes required by that
928 bookmark, not all outgoing changes:
928 bookmark, not all outgoing changes:
929 $ hg clone http://localhost:$HGPORT/ addmarks
929 $ hg clone http://localhost:$HGPORT/ addmarks
930 requesting all changes
930 requesting all changes
931 adding changesets
931 adding changesets
932 adding manifests
932 adding manifests
933 adding file changes
933 adding file changes
934 added 5 changesets with 5 changes to 3 files (+2 heads)
934 added 5 changesets with 5 changes to 3 files (+2 heads)
935 2 new obsolescence markers
935 2 new obsolescence markers
936 new changesets 4e3505fd9583:c922c0139ca0
936 new changesets 4e3505fd9583:c922c0139ca0
937 updating to bookmark @
937 updating to bookmark @
938 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
938 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
939 $ cd addmarks
939 $ cd addmarks
940 $ echo foo > foo
940 $ echo foo > foo
941 $ hg add foo
941 $ hg add foo
942 $ hg commit -m 'add foo'
942 $ hg commit -m 'add foo'
943 $ echo bar > bar
943 $ echo bar > bar
944 $ hg add bar
944 $ hg add bar
945 $ hg commit -m 'add bar'
945 $ hg commit -m 'add bar'
946 $ hg co "tip^"
946 $ hg co "tip^"
947 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
947 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
948 (leaving bookmark @)
948 (leaving bookmark @)
949 $ hg book add-foo
949 $ hg book add-foo
950 $ hg book -r tip add-bar
950 $ hg book -r tip add-bar
951 Note: this push *must* push only a single changeset, as that's the point
951 Note: this push *must* push only a single changeset, as that's the point
952 of this test.
952 of this test.
953 $ hg push -B add-foo --traceback
953 $ hg push -B add-foo --traceback
954 pushing to http://localhost:$HGPORT/
954 pushing to http://localhost:$HGPORT/
955 searching for changes
955 searching for changes
956 remote: adding changesets
956 remote: adding changesets
957 remote: adding manifests
957 remote: adding manifests
958 remote: adding file changes
958 remote: adding file changes
959 remote: added 1 changesets with 1 changes to 1 files
959 remote: added 1 changesets with 1 changes to 1 files
960 exporting bookmark add-foo
960 exporting bookmark add-foo
961
961
962 pushing a new bookmark on a new head does not require -f if -B is specified
962 pushing a new bookmark on a new head does not require -f if -B is specified
963
963
964 $ hg up -q X
964 $ hg up -q X
965 $ hg book W
965 $ hg book W
966 $ echo c5 > f2
966 $ echo c5 > f2
967 $ hg ci -Am5
967 $ hg ci -Am5
968 created new head
968 created new head
969 $ hg push -B .
969 $ hg push -B .
970 pushing to http://localhost:$HGPORT/
970 pushing to http://localhost:$HGPORT/
971 searching for changes
971 searching for changes
972 remote: adding changesets
972 remote: adding changesets
973 remote: adding manifests
973 remote: adding manifests
974 remote: adding file changes
974 remote: adding file changes
975 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
975 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
976 exporting bookmark W
976 exporting bookmark W
977 $ hg -R ../b id -r W
977 $ hg -R ../b id -r W
978 cc978a373a53 tip W
978 cc978a373a53 tip W
979
979
980 pushing an existing but divergent bookmark with -B still requires -f
980 pushing an existing but divergent bookmark with -B still requires -f
981
981
982 $ hg clone -q . ../r
982 $ hg clone -q . ../r
983 $ hg up -q X
983 $ hg up -q X
984 $ echo 1 > f2
984 $ echo 1 > f2
985 $ hg ci -qAml
985 $ hg ci -qAml
986
986
987 $ cd ../r
987 $ cd ../r
988 $ hg up -q X
988 $ hg up -q X
989 $ echo 2 > f2
989 $ echo 2 > f2
990 $ hg ci -qAmr
990 $ hg ci -qAmr
991 $ hg push -B X
991 $ hg push -B X
992 pushing to $TESTTMP/addmarks
992 pushing to $TESTTMP/addmarks
993 searching for changes
993 searching for changes
994 remote has heads on branch 'default' that are not known locally: a2a606d9ff1b
994 remote has heads on branch 'default' that are not known locally: a2a606d9ff1b
995 abort: push creates new remote head 54694f811df9 with bookmark 'X'!
995 abort: push creates new remote head 54694f811df9 with bookmark 'X'!
996 (pull and merge or see 'hg help push' for details about pushing new heads)
996 (pull and merge or see 'hg help push' for details about pushing new heads)
997 [255]
997 [255]
998 $ cd ../addmarks
998 $ cd ../addmarks
999
999
1000 Check summary output for incoming/outgoing bookmarks
1000 Check summary output for incoming/outgoing bookmarks
1001
1001
1002 $ hg bookmarks -d X
1002 $ hg bookmarks -d X
1003 $ hg bookmarks -d Y
1003 $ hg bookmarks -d Y
1004 $ hg summary --remote | grep '^remote:'
1004 $ hg summary --remote | grep '^remote:'
1005 remote: *, 2 incoming bookmarks, 1 outgoing bookmarks (glob)
1005 remote: *, 2 incoming bookmarks, 1 outgoing bookmarks (glob)
1006
1006
1007 $ cd ..
1007 $ cd ..
1008
1008
1009 pushing an unchanged bookmark should result in no changes
1009 pushing an unchanged bookmark should result in no changes
1010
1010
1011 $ hg init unchanged-a
1011 $ hg init unchanged-a
1012 $ hg init unchanged-b
1012 $ hg init unchanged-b
1013 $ cd unchanged-a
1013 $ cd unchanged-a
1014 $ echo initial > foo
1014 $ echo initial > foo
1015 $ hg commit -A -m initial
1015 $ hg commit -A -m initial
1016 adding foo
1016 adding foo
1017 $ hg bookmark @
1017 $ hg bookmark @
1018 $ hg push -B @ ../unchanged-b
1018 $ hg push -B @ ../unchanged-b
1019 pushing to ../unchanged-b
1019 pushing to ../unchanged-b
1020 searching for changes
1020 searching for changes
1021 adding changesets
1021 adding changesets
1022 adding manifests
1022 adding manifests
1023 adding file changes
1023 adding file changes
1024 added 1 changesets with 1 changes to 1 files
1024 added 1 changesets with 1 changes to 1 files
1025 exporting bookmark @
1025 exporting bookmark @
1026
1026
1027 $ hg push -B @ ../unchanged-b
1027 $ hg push -B @ ../unchanged-b
1028 pushing to ../unchanged-b
1028 pushing to ../unchanged-b
1029 searching for changes
1029 searching for changes
1030 no changes found
1030 no changes found
1031 [1]
1031 [1]
1032
1032
1033 Pushing a really long bookmark should work fine (issue5165)
1033 Pushing a really long bookmark should work fine (issue5165)
1034 ===============================================
1034 ===============================================
1035
1035
1036 #if b2-binary
1036 #if b2-binary
1037 >>> with open('longname', 'w') as f:
1037 >>> with open('longname', 'w') as f:
1038 ... f.write('wat' * 100) and None
1038 ... f.write('wat' * 100) and None
1039 $ hg book `cat longname`
1039 $ hg book `cat longname`
1040 $ hg push -B `cat longname` ../unchanged-b
1040 $ hg push -B `cat longname` ../unchanged-b
1041 pushing to ../unchanged-b
1041 pushing to ../unchanged-b
1042 searching for changes
1042 searching for changes
1043 no changes found
1043 no changes found
1044 exporting bookmark (wat){100} (re)
1044 exporting bookmark (wat){100} (re)
1045 [1]
1045 [1]
1046 $ hg -R ../unchanged-b book --delete `cat longname`
1046 $ hg -R ../unchanged-b book --delete `cat longname`
1047
1047
1048 Test again but forcing bundle2 exchange to make sure that doesn't regress.
1048 Test again but forcing bundle2 exchange to make sure that doesn't regress.
1049
1049
1050 $ hg push -B `cat longname` ../unchanged-b --config devel.legacy.exchange=bundle1
1050 $ hg push -B `cat longname` ../unchanged-b --config devel.legacy.exchange=bundle1
1051 pushing to ../unchanged-b
1051 pushing to ../unchanged-b
1052 searching for changes
1052 searching for changes
1053 no changes found
1053 no changes found
1054 exporting bookmark (wat){100} (re)
1054 exporting bookmark (wat){100} (re)
1055 [1]
1055 [1]
1056 $ hg -R ../unchanged-b book --delete `cat longname`
1056 $ hg -R ../unchanged-b book --delete `cat longname`
1057 $ hg book --delete `cat longname`
1057 $ hg book --delete `cat longname`
1058 $ hg co @
1058 $ hg co @
1059 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1060 (activating bookmark @)
1060 (activating bookmark @)
1061 #endif
1061 #endif
1062
1062
1063 Check hook preventing push (issue4455)
1063 Check hook preventing push (issue4455)
1064 ======================================
1064 ======================================
1065
1065
1066 $ hg bookmarks
1066 $ hg bookmarks
1067 * @ 0:55482a6fb4b1
1067 * @ 0:55482a6fb4b1
1068 $ hg log -G
1068 $ hg log -G
1069 @ 0:55482a6fb4b1 initial
1069 @ 0:55482a6fb4b1 initial
1070
1070
1071 $ hg init ../issue4455-dest
1071 $ hg init ../issue4455-dest
1072 $ hg push ../issue4455-dest # changesets only
1072 $ hg push ../issue4455-dest # changesets only
1073 pushing to ../issue4455-dest
1073 pushing to ../issue4455-dest
1074 searching for changes
1074 searching for changes
1075 adding changesets
1075 adding changesets
1076 adding manifests
1076 adding manifests
1077 adding file changes
1077 adding file changes
1078 added 1 changesets with 1 changes to 1 files
1078 added 1 changesets with 1 changes to 1 files
1079 $ cat >> .hg/hgrc << EOF
1079 $ cat >> .hg/hgrc << EOF
1080 > [paths]
1080 > [paths]
1081 > local=../issue4455-dest/
1081 > local=../issue4455-dest/
1082 > ssh=ssh://user@dummy/issue4455-dest
1082 > ssh=ssh://user@dummy/issue4455-dest
1083 > http=http://localhost:$HGPORT/
1083 > http=http://localhost:$HGPORT/
1084 > [ui]
1084 > [ui]
1085 > ssh=$PYTHON "$TESTDIR/dummyssh"
1085 > ssh=$PYTHON "$TESTDIR/dummyssh"
1086 > EOF
1086 > EOF
1087 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1087 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1088 > [hooks]
1088 > [hooks]
1089 > prepushkey=false
1089 > prepushkey=false
1090 > [web]
1090 > [web]
1091 > push_ssl = false
1091 > push_ssl = false
1092 > allow_push = *
1092 > allow_push = *
1093 > EOF
1093 > EOF
1094 $ killdaemons.py
1094 $ killdaemons.py
1095 $ hg serve -R ../issue4455-dest -p $HGPORT -d --pid-file=../issue4455.pid -E ../issue4455-error.log
1095 $ hg serve -R ../issue4455-dest -p $HGPORT -d --pid-file=../issue4455.pid -E ../issue4455-error.log
1096 $ cat ../issue4455.pid >> $DAEMON_PIDS
1096 $ cat ../issue4455.pid >> $DAEMON_PIDS
1097
1097
1098 Local push
1098 Local push
1099 ----------
1099 ----------
1100
1100
1101 #if b2-pushkey
1101 #if b2-pushkey
1102
1102
1103 $ hg push -B @ local
1103 $ hg push -B @ local
1104 pushing to $TESTTMP/issue4455-dest
1104 pushing to $TESTTMP/issue4455-dest
1105 searching for changes
1105 searching for changes
1106 no changes found
1106 no changes found
1107 pushkey-abort: prepushkey hook exited with status 1
1107 pushkey-abort: prepushkey hook exited with status 1
1108 abort: exporting bookmark @ failed!
1108 abort: exporting bookmark @ failed!
1109 [255]
1109 [255]
1110
1110
1111 #endif
1111 #endif
1112 #if b2-binary
1112 #if b2-binary
1113
1113
1114 $ hg push -B @ local
1114 $ hg push -B @ local
1115 pushing to $TESTTMP/issue4455-dest
1115 pushing to $TESTTMP/issue4455-dest
1116 searching for changes
1116 searching for changes
1117 no changes found
1117 no changes found
1118 abort: prepushkey hook exited with status 1
1118 abort: prepushkey hook exited with status 1
1119 [255]
1119 [255]
1120
1120
1121 #endif
1121 #endif
1122
1122
1123 $ hg -R ../issue4455-dest/ bookmarks
1123 $ hg -R ../issue4455-dest/ bookmarks
1124 no bookmarks set
1124 no bookmarks set
1125
1125
1126 Using ssh
1126 Using ssh
1127 ---------
1127 ---------
1128
1128
1129 #if b2-pushkey
1129 #if b2-pushkey
1130
1130
1131 $ hg push -B @ ssh # bundle2+
1131 $ hg push -B @ ssh # bundle2+
1132 pushing to ssh://user@dummy/issue4455-dest
1132 pushing to ssh://user@dummy/issue4455-dest
1133 searching for changes
1133 searching for changes
1134 no changes found
1134 no changes found
1135 remote: pushkey-abort: prepushkey hook exited with status 1
1135 remote: pushkey-abort: prepushkey hook exited with status 1
1136 abort: exporting bookmark @ failed!
1136 abort: exporting bookmark @ failed!
1137 [255]
1137 [255]
1138
1138
1139 $ hg -R ../issue4455-dest/ bookmarks
1139 $ hg -R ../issue4455-dest/ bookmarks
1140 no bookmarks set
1140 no bookmarks set
1141
1141
1142 $ hg push -B @ ssh --config devel.legacy.exchange=bundle1
1142 $ hg push -B @ ssh --config devel.legacy.exchange=bundle1
1143 pushing to ssh://user@dummy/issue4455-dest
1143 pushing to ssh://user@dummy/issue4455-dest
1144 searching for changes
1144 searching for changes
1145 no changes found
1145 no changes found
1146 remote: pushkey-abort: prepushkey hook exited with status 1
1146 remote: pushkey-abort: prepushkey hook exited with status 1
1147 exporting bookmark @ failed!
1147 exporting bookmark @ failed!
1148 [1]
1148 [1]
1149
1149
1150 #endif
1150 #endif
1151 #if b2-binary
1151 #if b2-binary
1152
1152
1153 $ hg push -B @ ssh # bundle2+
1153 $ hg push -B @ ssh # bundle2+
1154 pushing to ssh://user@dummy/issue4455-dest
1154 pushing to ssh://user@dummy/issue4455-dest
1155 searching for changes
1155 searching for changes
1156 no changes found
1156 no changes found
1157 remote: prepushkey hook exited with status 1
1157 remote: prepushkey hook exited with status 1
1158 abort: push failed on remote
1158 abort: push failed on remote
1159 [255]
1159 [255]
1160
1160
1161 #endif
1161 #endif
1162
1162
1163 $ hg -R ../issue4455-dest/ bookmarks
1163 $ hg -R ../issue4455-dest/ bookmarks
1164 no bookmarks set
1164 no bookmarks set
1165
1165
1166 Using http
1166 Using http
1167 ----------
1167 ----------
1168
1168
1169 #if b2-pushkey
1169 #if b2-pushkey
1170 $ hg push -B @ http # bundle2+
1170 $ hg push -B @ http # bundle2+
1171 pushing to http://localhost:$HGPORT/
1171 pushing to http://localhost:$HGPORT/
1172 searching for changes
1172 searching for changes
1173 no changes found
1173 no changes found
1174 remote: pushkey-abort: prepushkey hook exited with status 1
1174 remote: pushkey-abort: prepushkey hook exited with status 1
1175 abort: exporting bookmark @ failed!
1175 abort: exporting bookmark @ failed!
1176 [255]
1176 [255]
1177
1177
1178 $ hg -R ../issue4455-dest/ bookmarks
1178 $ hg -R ../issue4455-dest/ bookmarks
1179 no bookmarks set
1179 no bookmarks set
1180
1180
1181 $ hg push -B @ http --config devel.legacy.exchange=bundle1
1181 $ hg push -B @ http --config devel.legacy.exchange=bundle1
1182 pushing to http://localhost:$HGPORT/
1182 pushing to http://localhost:$HGPORT/
1183 searching for changes
1183 searching for changes
1184 no changes found
1184 no changes found
1185 remote: pushkey-abort: prepushkey hook exited with status 1
1185 remote: pushkey-abort: prepushkey hook exited with status 1
1186 exporting bookmark @ failed!
1186 exporting bookmark @ failed!
1187 [1]
1187 [1]
1188
1188
1189 #endif
1189 #endif
1190
1190
1191 #if b2-binary
1191 #if b2-binary
1192
1192
1193 $ hg push -B @ ssh # bundle2+
1193 $ hg push -B @ ssh # bundle2+
1194 pushing to ssh://user@dummy/issue4455-dest
1194 pushing to ssh://user@dummy/issue4455-dest
1195 searching for changes
1195 searching for changes
1196 no changes found
1196 no changes found
1197 remote: prepushkey hook exited with status 1
1197 remote: prepushkey hook exited with status 1
1198 abort: push failed on remote
1198 abort: push failed on remote
1199 [255]
1199 [255]
1200
1200
1201 #endif
1201 #endif
1202
1202
1203 $ hg -R ../issue4455-dest/ bookmarks
1203 $ hg -R ../issue4455-dest/ bookmarks
1204 no bookmarks set
1204 no bookmarks set
1205
1205
1206 $ cd ..
1206 $ cd ..
1207
1207
1208 Test that pre-pushkey compat for bookmark works as expected (issue5777)
1208 Test that pre-pushkey compat for bookmark works as expected (issue5777)
1209
1209
1210 $ cat << EOF >> $HGRCPATH
1210 $ cat << EOF >> $HGRCPATH
1211 > [ui]
1211 > [ui]
1212 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1212 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1213 > [server]
1213 > [server]
1214 > bookmarks-pushkey-compat = yes
1214 > bookmarks-pushkey-compat = yes
1215 > EOF
1215 > EOF
1216
1216
1217 $ hg init server
1217 $ hg init server
1218 $ echo foo > server/a
1218 $ echo foo > server/a
1219 $ hg -R server book foo
1219 $ hg -R server book foo
1220 $ hg -R server commit -Am a
1220 $ hg -R server commit -Am a
1221 adding a
1221 adding a
1222 $ hg clone ssh://user@dummy/server client
1222 $ hg clone ssh://user@dummy/server client
1223 requesting all changes
1223 requesting all changes
1224 adding changesets
1224 adding changesets
1225 adding manifests
1225 adding manifests
1226 adding file changes
1226 adding file changes
1227 added 1 changesets with 1 changes to 1 files
1227 added 1 changesets with 1 changes to 1 files
1228 new changesets 79513d0d7716
1228 new changesets 79513d0d7716
1229 updating to branch default
1229 updating to branch default
1230 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1230 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1231
1231
1232 Forbid bookmark move on the server
1232 Forbid bookmark move on the server
1233
1233
1234 $ cat << EOF >> $TESTDIR/no-bm-move.sh
1234 $ cat << EOF >> $TESTDIR/no-bm-move.sh
1235 > #!/bin/sh
1235 > #!/bin/sh
1236 > echo \$HG_NAMESPACE | grep -v bookmarks
1236 > echo \$HG_NAMESPACE | grep -v bookmarks
1237 > EOF
1237 > EOF
1238 $ cat << EOF >> server/.hg/hgrc
1238 $ cat << EOF >> server/.hg/hgrc
1239 > [hooks]
1239 > [hooks]
1240 > prepushkey.no-bm-move= sh $TESTDIR/no-bm-move.sh
1240 > prepushkey.no-bm-move= sh $TESTDIR/no-bm-move.sh
1241 > EOF
1241 > EOF
1242
1242
1243 pushing changeset is okay
1243 pushing changeset is okay
1244
1244
1245 $ echo bar >> client/a
1245 $ echo bar >> client/a
1246 $ hg -R client commit -m b
1246 $ hg -R client commit -m b
1247 $ hg -R client push
1247 $ hg -R client push
1248 pushing to ssh://user@dummy/server
1248 pushing to ssh://user@dummy/server
1249 searching for changes
1249 searching for changes
1250 remote: adding changesets
1250 remote: adding changesets
1251 remote: adding manifests
1251 remote: adding manifests
1252 remote: adding file changes
1252 remote: adding file changes
1253 remote: added 1 changesets with 1 changes to 1 files
1253 remote: added 1 changesets with 1 changes to 1 files
1254
1254
1255 attempt to move the bookmark is rejected
1255 attempt to move the bookmark is rejected
1256
1256
1257 $ hg -R client book foo -r .
1257 $ hg -R client book foo -r .
1258 moving bookmark 'foo' forward from 79513d0d7716
1258 moving bookmark 'foo' forward from 79513d0d7716
1259
1259
1260 #if b2-pushkey
1260 #if b2-pushkey
1261 $ hg -R client push
1261 $ hg -R client push
1262 pushing to ssh://user@dummy/server
1262 pushing to ssh://user@dummy/server
1263 searching for changes
1263 searching for changes
1264 no changes found
1264 no changes found
1265 remote: pushkey-abort: prepushkey.no-bm-move hook exited with status 1
1265 remote: pushkey-abort: prepushkey.no-bm-move hook exited with status 1
1266 abort: updating bookmark foo failed!
1266 abort: updating bookmark foo failed!
1267 [255]
1267 [255]
1268 #endif
1268 #endif
1269 #if b2-binary
1269 #if b2-binary
1270 $ hg -R client push
1270 $ hg -R client push
1271 pushing to ssh://user@dummy/server
1271 pushing to ssh://user@dummy/server
1272 searching for changes
1272 searching for changes
1273 no changes found
1273 no changes found
1274 remote: prepushkey.no-bm-move hook exited with status 1
1274 remote: prepushkey.no-bm-move hook exited with status 1
1275 abort: push failed on remote
1275 abort: push failed on remote
1276 [255]
1276 [255]
1277 #endif
1277 #endif
@@ -1,215 +1,220 b''
1 #require test-repo
1 #require test-repo
2
2
3 Set vars:
3 Set vars:
4
4
5 $ . "$TESTDIR/helpers-testrepo.sh"
5 $ . "$TESTDIR/helpers-testrepo.sh"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
6 $ CONTRIBDIR="$TESTDIR/../contrib"
7
7
8 Prepare repo:
8 Prepare repo:
9
9
10 $ hg init
10 $ hg init
11
11
12 $ echo this is file a > a
12 $ echo this is file a > a
13 $ hg add a
13 $ hg add a
14 $ hg commit -m first
14 $ hg commit -m first
15
15
16 $ echo adding to file a >> a
16 $ echo adding to file a >> a
17 $ hg commit -m second
17 $ hg commit -m second
18
18
19 $ echo adding more to file a >> a
19 $ echo adding more to file a >> a
20 $ hg commit -m third
20 $ hg commit -m third
21
21
22 $ hg up -r 0
22 $ hg up -r 0
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 $ echo merge-this >> a
24 $ echo merge-this >> a
25 $ hg commit -m merge-able
25 $ hg commit -m merge-able
26 created new head
26 created new head
27
27
28 $ hg up -r 2
28 $ hg up -r 2
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 perfstatus
31 perfstatus
32
32
33 $ cat >> $HGRCPATH << EOF
33 $ cat >> $HGRCPATH << EOF
34 > [extensions]
34 > [extensions]
35 > perfstatusext=$CONTRIBDIR/perf.py
35 > perfstatusext=$CONTRIBDIR/perf.py
36 > [perf]
36 > [perf]
37 > presleep=0
37 > presleep=0
38 > stub=on
38 > stub=on
39 > parentscount=1
39 > parentscount=1
40 > EOF
40 > EOF
41 $ hg help perfstatusext
41 $ hg help perfstatusext
42 perfstatusext extension - helper extension to measure performance
42 perfstatusext extension - helper extension to measure performance
43
43
44 list of commands:
44 list of commands:
45
45
46 perfaddremove
46 perfaddremove
47 (no help text available)
47 (no help text available)
48 perfancestors
48 perfancestors
49 (no help text available)
49 (no help text available)
50 perfancestorset
50 perfancestorset
51 (no help text available)
51 (no help text available)
52 perfannotate (no help text available)
52 perfannotate (no help text available)
53 perfbdiff benchmark a bdiff between revisions
53 perfbdiff benchmark a bdiff between revisions
54 perfbookmarks
54 perfbookmarks
55 benchmark parsing bookmarks from disk to memory
55 benchmark parsing bookmarks from disk to memory
56 perfbranchmap
56 perfbranchmap
57 benchmark the update of a branchmap
57 benchmark the update of a branchmap
58 perfbranchmapload
58 perfbranchmapload
59 benchmark reading the branchmap
59 benchmark reading the branchmap
60 perfbundleread
60 perfbundleread
61 Benchmark reading of bundle files.
61 Benchmark reading of bundle files.
62 perfcca (no help text available)
62 perfcca (no help text available)
63 perfchangegroupchangelog
63 perfchangegroupchangelog
64 Benchmark producing a changelog group for a changegroup.
64 Benchmark producing a changelog group for a changegroup.
65 perfchangeset
65 perfchangeset
66 (no help text available)
66 (no help text available)
67 perfctxfiles (no help text available)
67 perfctxfiles (no help text available)
68 perfdiffwd Profile diff of working directory changes
68 perfdiffwd Profile diff of working directory changes
69 perfdirfoldmap
69 perfdirfoldmap
70 (no help text available)
70 (no help text available)
71 perfdirs (no help text available)
71 perfdirs (no help text available)
72 perfdirstate (no help text available)
72 perfdirstate (no help text available)
73 perfdirstatedirs
73 perfdirstatedirs
74 (no help text available)
74 (no help text available)
75 perfdirstatefoldmap
75 perfdirstatefoldmap
76 (no help text available)
76 (no help text available)
77 perfdirstatewrite
77 perfdirstatewrite
78 (no help text available)
78 (no help text available)
79 perffncacheencode
79 perffncacheencode
80 (no help text available)
80 (no help text available)
81 perffncacheload
81 perffncacheload
82 (no help text available)
82 (no help text available)
83 perffncachewrite
83 perffncachewrite
84 (no help text available)
84 (no help text available)
85 perfheads (no help text available)
85 perfheads (no help text available)
86 perfindex (no help text available)
86 perfindex (no help text available)
87 perflinelogedits
87 perflinelogedits
88 (no help text available)
88 (no help text available)
89 perfloadmarkers
89 perfloadmarkers
90 benchmark the time to parse the on-disk markers for a repo
90 benchmark the time to parse the on-disk markers for a repo
91 perflog (no help text available)
91 perflog (no help text available)
92 perflookup (no help text available)
92 perflookup (no help text available)
93 perflrucachedict
93 perflrucachedict
94 (no help text available)
94 (no help text available)
95 perfmanifest benchmark the time to read a manifest from disk and return a
95 perfmanifest benchmark the time to read a manifest from disk and return a
96 usable
96 usable
97 perfmergecalculate
97 perfmergecalculate
98 (no help text available)
98 (no help text available)
99 perfmoonwalk benchmark walking the changelog backwards
99 perfmoonwalk benchmark walking the changelog backwards
100 perfnodelookup
100 perfnodelookup
101 (no help text available)
101 (no help text available)
102 perfparents (no help text available)
102 perfparents (no help text available)
103 perfpathcopies
103 perfpathcopies
104 (no help text available)
104 (no help text available)
105 perfphases benchmark phasesets computation
105 perfphases benchmark phasesets computation
106 perfphasesremote
107 benchmark time needed to analyse phases of the remote server
106 perfrawfiles (no help text available)
108 perfrawfiles (no help text available)
107 perfrevlogchunks
109 perfrevlogchunks
108 Benchmark operations on revlog chunks.
110 Benchmark operations on revlog chunks.
109 perfrevlogindex
111 perfrevlogindex
110 Benchmark operations against a revlog index.
112 Benchmark operations against a revlog index.
111 perfrevlogrevision
113 perfrevlogrevision
112 Benchmark obtaining a revlog revision.
114 Benchmark obtaining a revlog revision.
113 perfrevlogrevisions
115 perfrevlogrevisions
114 Benchmark reading a series of revisions from a revlog.
116 Benchmark reading a series of revisions from a revlog.
115 perfrevrange (no help text available)
117 perfrevrange (no help text available)
116 perfrevset benchmark the execution time of a revset
118 perfrevset benchmark the execution time of a revset
117 perfstartup (no help text available)
119 perfstartup (no help text available)
118 perfstatus (no help text available)
120 perfstatus (no help text available)
119 perftags (no help text available)
121 perftags (no help text available)
120 perftemplating
122 perftemplating
121 test the rendering time of a given template
123 test the rendering time of a given template
122 perfunidiff benchmark a unified diff between revisions
124 perfunidiff benchmark a unified diff between revisions
123 perfvolatilesets
125 perfvolatilesets
124 benchmark the computation of various volatile set
126 benchmark the computation of various volatile set
125 perfwalk (no help text available)
127 perfwalk (no help text available)
126 perfwrite microbenchmark ui.write
128 perfwrite microbenchmark ui.write
127
129
128 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
130 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
129 $ hg perfaddremove
131 $ hg perfaddremove
130 $ hg perfancestors
132 $ hg perfancestors
131 $ hg perfancestorset 2
133 $ hg perfancestorset 2
132 $ hg perfannotate a
134 $ hg perfannotate a
133 $ hg perfbdiff -c 1
135 $ hg perfbdiff -c 1
134 $ hg perfbdiff --alldata 1
136 $ hg perfbdiff --alldata 1
135 $ hg perfunidiff -c 1
137 $ hg perfunidiff -c 1
136 $ hg perfunidiff --alldata 1
138 $ hg perfunidiff --alldata 1
137 $ hg perfbookmarks
139 $ hg perfbookmarks
138 $ hg perfbranchmap
140 $ hg perfbranchmap
139 $ hg perfcca
141 $ hg perfcca
140 $ hg perfchangegroupchangelog
142 $ hg perfchangegroupchangelog
141 $ hg perfchangeset 2
143 $ hg perfchangeset 2
142 $ hg perfctxfiles 2
144 $ hg perfctxfiles 2
143 $ hg perfdiffwd
145 $ hg perfdiffwd
144 $ hg perfdirfoldmap
146 $ hg perfdirfoldmap
145 $ hg perfdirs
147 $ hg perfdirs
146 $ hg perfdirstate
148 $ hg perfdirstate
147 $ hg perfdirstatedirs
149 $ hg perfdirstatedirs
148 $ hg perfdirstatefoldmap
150 $ hg perfdirstatefoldmap
149 $ hg perfdirstatewrite
151 $ hg perfdirstatewrite
150 #if repofncache
152 #if repofncache
151 $ hg perffncacheencode
153 $ hg perffncacheencode
152 $ hg perffncacheload
154 $ hg perffncacheload
153 $ hg debugrebuildfncache
155 $ hg debugrebuildfncache
154 fncache already up to date
156 fncache already up to date
155 $ hg perffncachewrite
157 $ hg perffncachewrite
156 $ hg debugrebuildfncache
158 $ hg debugrebuildfncache
157 fncache already up to date
159 fncache already up to date
158 #endif
160 #endif
159 $ hg perfheads
161 $ hg perfheads
160 $ hg perfindex
162 $ hg perfindex
161 $ hg perflinelogedits -n 1
163 $ hg perflinelogedits -n 1
162 $ hg perfloadmarkers
164 $ hg perfloadmarkers
163 $ hg perflog
165 $ hg perflog
164 $ hg perflookup 2
166 $ hg perflookup 2
165 $ hg perflrucache
167 $ hg perflrucache
166 $ hg perfmanifest 2
168 $ hg perfmanifest 2
167 $ hg perfmergecalculate -r 3
169 $ hg perfmergecalculate -r 3
168 $ hg perfmoonwalk
170 $ hg perfmoonwalk
169 $ hg perfnodelookup 2
171 $ hg perfnodelookup 2
170 $ hg perfpathcopies 1 2
172 $ hg perfpathcopies 1 2
171 $ hg perfrawfiles 2
173 $ hg perfrawfiles 2
172 $ hg perfrevlogindex -c
174 $ hg perfrevlogindex -c
173 #if reporevlogstore
175 #if reporevlogstore
174 $ hg perfrevlogrevisions .hg/store/data/a.i
176 $ hg perfrevlogrevisions .hg/store/data/a.i
175 #endif
177 #endif
176 $ hg perfrevlogrevision -m 0
178 $ hg perfrevlogrevision -m 0
177 $ hg perfrevlogchunks -c
179 $ hg perfrevlogchunks -c
178 $ hg perfrevrange
180 $ hg perfrevrange
179 $ hg perfrevset 'all()'
181 $ hg perfrevset 'all()'
180 $ hg perfstartup
182 $ hg perfstartup
181 $ hg perfstatus
183 $ hg perfstatus
182 $ hg perftags
184 $ hg perftags
183 $ hg perftemplating
185 $ hg perftemplating
184 $ hg perfvolatilesets
186 $ hg perfvolatilesets
185 $ hg perfwalk
187 $ hg perfwalk
186 $ hg perfparents
188 $ hg perfparents
187
189
188 test actual output
190 test actual output
189 ------------------
191 ------------------
190
192
191 normal output:
193 normal output:
192
194
193 $ hg perfheads --config perf.stub=no
195 $ hg perfheads --config perf.stub=no
194 ! wall * comb * user * sys * (best of *) (glob)
196 ! wall * comb * user * sys * (best of *) (glob)
195
197
196 detailed output:
198 detailed output:
197
199
198 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
200 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
199 ! wall * comb * user * sys * (best of *) (glob)
201 ! wall * comb * user * sys * (best of *) (glob)
200 ! wall * comb * user * sys * (max of *) (glob)
202 ! wall * comb * user * sys * (max of *) (glob)
201 ! wall * comb * user * sys * (avg of *) (glob)
203 ! wall * comb * user * sys * (avg of *) (glob)
202 ! wall * comb * user * sys * (median of *) (glob)
204 ! wall * comb * user * sys * (median of *) (glob)
203
205
204 Check perf.py for historical portability
206 Check perf.py for historical portability
205 ----------------------------------------
207 ----------------------------------------
206
208
207 $ cd "$TESTDIR/.."
209 $ cd "$TESTDIR/.."
208
210
209 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
211 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
210 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
212 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
211 > "$TESTDIR"/check-perf-code.py contrib/perf.py
213 > "$TESTDIR"/check-perf-code.py contrib/perf.py
212 contrib/perf.py:\d+: (re)
214 contrib/perf.py:\d+: (re)
213 > from mercurial import (
215 > from mercurial import (
214 import newer module separately in try clause for early Mercurial
216 import newer module separately in try clause for early Mercurial
217 contrib/perf.py:\d+: (re)
218 > from mercurial import (
219 import newer module separately in try clause for early Mercurial
215 [1]
220 [1]
General Comments 0
You need to be logged in to leave comments. Login now