##// END OF EJS Templates
branchmap: encapsulate cache updating in the map itself...
Martijn Pieters -
r41764:328ca3b9 default
parent child Browse files
Show More
@@ -1,2772 +1,2777 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import contextlib
22 import contextlib
23 import functools
23 import functools
24 import gc
24 import gc
25 import os
25 import os
26 import random
26 import random
27 import shutil
27 import shutil
28 import struct
28 import struct
29 import sys
29 import sys
30 import tempfile
30 import tempfile
31 import threading
31 import threading
32 import time
32 import time
33 from mercurial import (
33 from mercurial import (
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 commands,
36 commands,
37 copies,
37 copies,
38 error,
38 error,
39 extensions,
39 extensions,
40 hg,
40 hg,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 revlog,
43 revlog,
44 util,
44 util,
45 )
45 )
46
46
47 # for "historical portability":
47 # for "historical portability":
48 # try to import modules separately (in dict order), and ignore
48 # try to import modules separately (in dict order), and ignore
49 # failure, because these aren't available with early Mercurial
49 # failure, because these aren't available with early Mercurial
50 try:
50 try:
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
51 from mercurial import branchmap # since 2.5 (or bcee63733aad)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
55 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
56 except ImportError:
56 except ImportError:
57 pass
57 pass
58 try:
58 try:
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
59 from mercurial import registrar # since 3.7 (or 37d50250b696)
60 dir(registrar) # forcibly load it
60 dir(registrar) # forcibly load it
61 except ImportError:
61 except ImportError:
62 registrar = None
62 registrar = None
63 try:
63 try:
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
64 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
68 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
69 except ImportError:
69 except ImportError:
70 pass
70 pass
71 try:
71 try:
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
72 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
73 except ImportError:
73 except ImportError:
74 pass
74 pass
75
75
76
76
77 def identity(a):
77 def identity(a):
78 return a
78 return a
79
79
80 try:
80 try:
81 from mercurial import pycompat
81 from mercurial import pycompat
82 getargspec = pycompat.getargspec # added to module after 4.5
82 getargspec = pycompat.getargspec # added to module after 4.5
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
83 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
84 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
85 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
86 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
87 if pycompat.ispy3:
87 if pycompat.ispy3:
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
88 _maxint = sys.maxsize # per py3 docs for replacing maxint
89 else:
89 else:
90 _maxint = sys.maxint
90 _maxint = sys.maxint
91 except (ImportError, AttributeError):
91 except (ImportError, AttributeError):
92 import inspect
92 import inspect
93 getargspec = inspect.getargspec
93 getargspec = inspect.getargspec
94 _byteskwargs = identity
94 _byteskwargs = identity
95 fsencode = identity # no py3 support
95 fsencode = identity # no py3 support
96 _maxint = sys.maxint # no py3 support
96 _maxint = sys.maxint # no py3 support
97 _sysstr = lambda x: x # no py3 support
97 _sysstr = lambda x: x # no py3 support
98 _xrange = xrange
98 _xrange = xrange
99
99
100 try:
100 try:
101 # 4.7+
101 # 4.7+
102 queue = pycompat.queue.Queue
102 queue = pycompat.queue.Queue
103 except (AttributeError, ImportError):
103 except (AttributeError, ImportError):
104 # <4.7.
104 # <4.7.
105 try:
105 try:
106 queue = pycompat.queue
106 queue = pycompat.queue
107 except (AttributeError, ImportError):
107 except (AttributeError, ImportError):
108 queue = util.queue
108 queue = util.queue
109
109
110 try:
110 try:
111 from mercurial import logcmdutil
111 from mercurial import logcmdutil
112 makelogtemplater = logcmdutil.maketemplater
112 makelogtemplater = logcmdutil.maketemplater
113 except (AttributeError, ImportError):
113 except (AttributeError, ImportError):
114 try:
114 try:
115 makelogtemplater = cmdutil.makelogtemplater
115 makelogtemplater = cmdutil.makelogtemplater
116 except (AttributeError, ImportError):
116 except (AttributeError, ImportError):
117 makelogtemplater = None
117 makelogtemplater = None
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # define util.safehasattr forcibly, because util.safehasattr has been
120 # define util.safehasattr forcibly, because util.safehasattr has been
121 # available since 1.9.3 (or 94b200a11cf7)
121 # available since 1.9.3 (or 94b200a11cf7)
122 _undefined = object()
122 _undefined = object()
123 def safehasattr(thing, attr):
123 def safehasattr(thing, attr):
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
124 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
125 setattr(util, 'safehasattr', safehasattr)
125 setattr(util, 'safehasattr', safehasattr)
126
126
127 # for "historical portability":
127 # for "historical portability":
128 # define util.timer forcibly, because util.timer has been available
128 # define util.timer forcibly, because util.timer has been available
129 # since ae5d60bb70c9
129 # since ae5d60bb70c9
130 if safehasattr(time, 'perf_counter'):
130 if safehasattr(time, 'perf_counter'):
131 util.timer = time.perf_counter
131 util.timer = time.perf_counter
132 elif os.name == b'nt':
132 elif os.name == b'nt':
133 util.timer = time.clock
133 util.timer = time.clock
134 else:
134 else:
135 util.timer = time.time
135 util.timer = time.time
136
136
137 # for "historical portability":
137 # for "historical portability":
138 # use locally defined empty option list, if formatteropts isn't
138 # use locally defined empty option list, if formatteropts isn't
139 # available, because commands.formatteropts has been available since
139 # available, because commands.formatteropts has been available since
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
140 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
141 # available since 2.2 (or ae5f92e154d3)
141 # available since 2.2 (or ae5f92e154d3)
142 formatteropts = getattr(cmdutil, "formatteropts",
142 formatteropts = getattr(cmdutil, "formatteropts",
143 getattr(commands, "formatteropts", []))
143 getattr(commands, "formatteropts", []))
144
144
145 # for "historical portability":
145 # for "historical portability":
146 # use locally defined option list, if debugrevlogopts isn't available,
146 # use locally defined option list, if debugrevlogopts isn't available,
147 # because commands.debugrevlogopts has been available since 3.7 (or
147 # because commands.debugrevlogopts has been available since 3.7 (or
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
148 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
149 # since 1.9 (or a79fea6b3e77).
149 # since 1.9 (or a79fea6b3e77).
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
150 revlogopts = getattr(cmdutil, "debugrevlogopts",
151 getattr(commands, "debugrevlogopts", [
151 getattr(commands, "debugrevlogopts", [
152 (b'c', b'changelog', False, (b'open changelog')),
152 (b'c', b'changelog', False, (b'open changelog')),
153 (b'm', b'manifest', False, (b'open manifest')),
153 (b'm', b'manifest', False, (b'open manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
154 (b'', b'dir', False, (b'open directory manifest')),
155 ]))
155 ]))
156
156
157 cmdtable = {}
157 cmdtable = {}
158
158
159 # for "historical portability":
159 # for "historical portability":
160 # define parsealiases locally, because cmdutil.parsealiases has been
160 # define parsealiases locally, because cmdutil.parsealiases has been
161 # available since 1.5 (or 6252852b4332)
161 # available since 1.5 (or 6252852b4332)
162 def parsealiases(cmd):
162 def parsealiases(cmd):
163 return cmd.split(b"|")
163 return cmd.split(b"|")
164
164
165 if safehasattr(registrar, 'command'):
165 if safehasattr(registrar, 'command'):
166 command = registrar.command(cmdtable)
166 command = registrar.command(cmdtable)
167 elif safehasattr(cmdutil, 'command'):
167 elif safehasattr(cmdutil, 'command'):
168 command = cmdutil.command(cmdtable)
168 command = cmdutil.command(cmdtable)
169 if b'norepo' not in getargspec(command).args:
169 if b'norepo' not in getargspec(command).args:
170 # for "historical portability":
170 # for "historical portability":
171 # wrap original cmdutil.command, because "norepo" option has
171 # wrap original cmdutil.command, because "norepo" option has
172 # been available since 3.1 (or 75a96326cecb)
172 # been available since 3.1 (or 75a96326cecb)
173 _command = command
173 _command = command
174 def command(name, options=(), synopsis=None, norepo=False):
174 def command(name, options=(), synopsis=None, norepo=False):
175 if norepo:
175 if norepo:
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
176 commands.norepo += b' %s' % b' '.join(parsealiases(name))
177 return _command(name, list(options), synopsis)
177 return _command(name, list(options), synopsis)
178 else:
178 else:
179 # for "historical portability":
179 # for "historical portability":
180 # define "@command" annotation locally, because cmdutil.command
180 # define "@command" annotation locally, because cmdutil.command
181 # has been available since 1.9 (or 2daa5179e73f)
181 # has been available since 1.9 (or 2daa5179e73f)
182 def command(name, options=(), synopsis=None, norepo=False):
182 def command(name, options=(), synopsis=None, norepo=False):
183 def decorator(func):
183 def decorator(func):
184 if synopsis:
184 if synopsis:
185 cmdtable[name] = func, list(options), synopsis
185 cmdtable[name] = func, list(options), synopsis
186 else:
186 else:
187 cmdtable[name] = func, list(options)
187 cmdtable[name] = func, list(options)
188 if norepo:
188 if norepo:
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
189 commands.norepo += b' %s' % b' '.join(parsealiases(name))
190 return func
190 return func
191 return decorator
191 return decorator
192
192
193 try:
193 try:
194 import mercurial.registrar
194 import mercurial.registrar
195 import mercurial.configitems
195 import mercurial.configitems
196 configtable = {}
196 configtable = {}
197 configitem = mercurial.registrar.configitem(configtable)
197 configitem = mercurial.registrar.configitem(configtable)
198 configitem(b'perf', b'presleep',
198 configitem(b'perf', b'presleep',
199 default=mercurial.configitems.dynamicdefault,
199 default=mercurial.configitems.dynamicdefault,
200 )
200 )
201 configitem(b'perf', b'stub',
201 configitem(b'perf', b'stub',
202 default=mercurial.configitems.dynamicdefault,
202 default=mercurial.configitems.dynamicdefault,
203 )
203 )
204 configitem(b'perf', b'parentscount',
204 configitem(b'perf', b'parentscount',
205 default=mercurial.configitems.dynamicdefault,
205 default=mercurial.configitems.dynamicdefault,
206 )
206 )
207 configitem(b'perf', b'all-timing',
207 configitem(b'perf', b'all-timing',
208 default=mercurial.configitems.dynamicdefault,
208 default=mercurial.configitems.dynamicdefault,
209 )
209 )
210 except (ImportError, AttributeError):
210 except (ImportError, AttributeError):
211 pass
211 pass
212
212
213 def getlen(ui):
213 def getlen(ui):
214 if ui.configbool(b"perf", b"stub", False):
214 if ui.configbool(b"perf", b"stub", False):
215 return lambda x: 1
215 return lambda x: 1
216 return len
216 return len
217
217
218 def gettimer(ui, opts=None):
218 def gettimer(ui, opts=None):
219 """return a timer function and formatter: (timer, formatter)
219 """return a timer function and formatter: (timer, formatter)
220
220
221 This function exists to gather the creation of formatter in a single
221 This function exists to gather the creation of formatter in a single
222 place instead of duplicating it in all performance commands."""
222 place instead of duplicating it in all performance commands."""
223
223
224 # enforce an idle period before execution to counteract power management
224 # enforce an idle period before execution to counteract power management
225 # experimental config: perf.presleep
225 # experimental config: perf.presleep
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
226 time.sleep(getint(ui, b"perf", b"presleep", 1))
227
227
228 if opts is None:
228 if opts is None:
229 opts = {}
229 opts = {}
230 # redirect all to stderr unless buffer api is in use
230 # redirect all to stderr unless buffer api is in use
231 if not ui._buffers:
231 if not ui._buffers:
232 ui = ui.copy()
232 ui = ui.copy()
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
233 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
234 if uifout:
234 if uifout:
235 # for "historical portability":
235 # for "historical portability":
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
236 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
237 uifout.set(ui.ferr)
237 uifout.set(ui.ferr)
238
238
239 # get a formatter
239 # get a formatter
240 uiformatter = getattr(ui, 'formatter', None)
240 uiformatter = getattr(ui, 'formatter', None)
241 if uiformatter:
241 if uiformatter:
242 fm = uiformatter(b'perf', opts)
242 fm = uiformatter(b'perf', opts)
243 else:
243 else:
244 # for "historical portability":
244 # for "historical portability":
245 # define formatter locally, because ui.formatter has been
245 # define formatter locally, because ui.formatter has been
246 # available since 2.2 (or ae5f92e154d3)
246 # available since 2.2 (or ae5f92e154d3)
247 from mercurial import node
247 from mercurial import node
248 class defaultformatter(object):
248 class defaultformatter(object):
249 """Minimized composition of baseformatter and plainformatter
249 """Minimized composition of baseformatter and plainformatter
250 """
250 """
251 def __init__(self, ui, topic, opts):
251 def __init__(self, ui, topic, opts):
252 self._ui = ui
252 self._ui = ui
253 if ui.debugflag:
253 if ui.debugflag:
254 self.hexfunc = node.hex
254 self.hexfunc = node.hex
255 else:
255 else:
256 self.hexfunc = node.short
256 self.hexfunc = node.short
257 def __nonzero__(self):
257 def __nonzero__(self):
258 return False
258 return False
259 __bool__ = __nonzero__
259 __bool__ = __nonzero__
260 def startitem(self):
260 def startitem(self):
261 pass
261 pass
262 def data(self, **data):
262 def data(self, **data):
263 pass
263 pass
264 def write(self, fields, deftext, *fielddata, **opts):
264 def write(self, fields, deftext, *fielddata, **opts):
265 self._ui.write(deftext % fielddata, **opts)
265 self._ui.write(deftext % fielddata, **opts)
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
266 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
267 if cond:
267 if cond:
268 self._ui.write(deftext % fielddata, **opts)
268 self._ui.write(deftext % fielddata, **opts)
269 def plain(self, text, **opts):
269 def plain(self, text, **opts):
270 self._ui.write(text, **opts)
270 self._ui.write(text, **opts)
271 def end(self):
271 def end(self):
272 pass
272 pass
273 fm = defaultformatter(ui, b'perf', opts)
273 fm = defaultformatter(ui, b'perf', opts)
274
274
275 # stub function, runs code only once instead of in a loop
275 # stub function, runs code only once instead of in a loop
276 # experimental config: perf.stub
276 # experimental config: perf.stub
277 if ui.configbool(b"perf", b"stub", False):
277 if ui.configbool(b"perf", b"stub", False):
278 return functools.partial(stub_timer, fm), fm
278 return functools.partial(stub_timer, fm), fm
279
279
280 # experimental config: perf.all-timing
280 # experimental config: perf.all-timing
281 displayall = ui.configbool(b"perf", b"all-timing", False)
281 displayall = ui.configbool(b"perf", b"all-timing", False)
282 return functools.partial(_timer, fm, displayall=displayall), fm
282 return functools.partial(_timer, fm, displayall=displayall), fm
283
283
284 def stub_timer(fm, func, setup=None, title=None):
284 def stub_timer(fm, func, setup=None, title=None):
285 if setup is not None:
285 if setup is not None:
286 setup()
286 setup()
287 func()
287 func()
288
288
289 @contextlib.contextmanager
289 @contextlib.contextmanager
290 def timeone():
290 def timeone():
291 r = []
291 r = []
292 ostart = os.times()
292 ostart = os.times()
293 cstart = util.timer()
293 cstart = util.timer()
294 yield r
294 yield r
295 cstop = util.timer()
295 cstop = util.timer()
296 ostop = os.times()
296 ostop = os.times()
297 a, b = ostart, ostop
297 a, b = ostart, ostop
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
298 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
299
299
300 def _timer(fm, func, setup=None, title=None, displayall=False):
300 def _timer(fm, func, setup=None, title=None, displayall=False):
301 gc.collect()
301 gc.collect()
302 results = []
302 results = []
303 begin = util.timer()
303 begin = util.timer()
304 count = 0
304 count = 0
305 while True:
305 while True:
306 if setup is not None:
306 if setup is not None:
307 setup()
307 setup()
308 with timeone() as item:
308 with timeone() as item:
309 r = func()
309 r = func()
310 count += 1
310 count += 1
311 results.append(item[0])
311 results.append(item[0])
312 cstop = util.timer()
312 cstop = util.timer()
313 if cstop - begin > 3 and count >= 100:
313 if cstop - begin > 3 and count >= 100:
314 break
314 break
315 if cstop - begin > 10 and count >= 3:
315 if cstop - begin > 10 and count >= 3:
316 break
316 break
317
317
318 formatone(fm, results, title=title, result=r,
318 formatone(fm, results, title=title, result=r,
319 displayall=displayall)
319 displayall=displayall)
320
320
321 def formatone(fm, timings, title=None, result=None, displayall=False):
321 def formatone(fm, timings, title=None, result=None, displayall=False):
322
322
323 count = len(timings)
323 count = len(timings)
324
324
325 fm.startitem()
325 fm.startitem()
326
326
327 if title:
327 if title:
328 fm.write(b'title', b'! %s\n', title)
328 fm.write(b'title', b'! %s\n', title)
329 if result:
329 if result:
330 fm.write(b'result', b'! result: %s\n', result)
330 fm.write(b'result', b'! result: %s\n', result)
331 def display(role, entry):
331 def display(role, entry):
332 prefix = b''
332 prefix = b''
333 if role != b'best':
333 if role != b'best':
334 prefix = b'%s.' % role
334 prefix = b'%s.' % role
335 fm.plain(b'!')
335 fm.plain(b'!')
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
336 fm.write(prefix + b'wall', b' wall %f', entry[0])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
337 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
338 fm.write(prefix + b'user', b' user %f', entry[1])
338 fm.write(prefix + b'user', b' user %f', entry[1])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
339 fm.write(prefix + b'sys', b' sys %f', entry[2])
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
340 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
341 fm.plain(b'\n')
341 fm.plain(b'\n')
342 timings.sort()
342 timings.sort()
343 min_val = timings[0]
343 min_val = timings[0]
344 display(b'best', min_val)
344 display(b'best', min_val)
345 if displayall:
345 if displayall:
346 max_val = timings[-1]
346 max_val = timings[-1]
347 display(b'max', max_val)
347 display(b'max', max_val)
348 avg = tuple([sum(x) / count for x in zip(*timings)])
348 avg = tuple([sum(x) / count for x in zip(*timings)])
349 display(b'avg', avg)
349 display(b'avg', avg)
350 median = timings[len(timings) // 2]
350 median = timings[len(timings) // 2]
351 display(b'median', median)
351 display(b'median', median)
352
352
353 # utilities for historical portability
353 # utilities for historical portability
354
354
355 def getint(ui, section, name, default):
355 def getint(ui, section, name, default):
356 # for "historical portability":
356 # for "historical portability":
357 # ui.configint has been available since 1.9 (or fa2b596db182)
357 # ui.configint has been available since 1.9 (or fa2b596db182)
358 v = ui.config(section, name, None)
358 v = ui.config(section, name, None)
359 if v is None:
359 if v is None:
360 return default
360 return default
361 try:
361 try:
362 return int(v)
362 return int(v)
363 except ValueError:
363 except ValueError:
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
364 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
365 % (section, name, v))
365 % (section, name, v))
366
366
367 def safeattrsetter(obj, name, ignoremissing=False):
367 def safeattrsetter(obj, name, ignoremissing=False):
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
368 """Ensure that 'obj' has 'name' attribute before subsequent setattr
369
369
370 This function is aborted, if 'obj' doesn't have 'name' attribute
370 This function is aborted, if 'obj' doesn't have 'name' attribute
371 at runtime. This avoids overlooking removal of an attribute, which
371 at runtime. This avoids overlooking removal of an attribute, which
372 breaks assumption of performance measurement, in the future.
372 breaks assumption of performance measurement, in the future.
373
373
374 This function returns the object to (1) assign a new value, and
374 This function returns the object to (1) assign a new value, and
375 (2) restore an original value to the attribute.
375 (2) restore an original value to the attribute.
376
376
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
377 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
378 abortion, and this function returns None. This is useful to
378 abortion, and this function returns None. This is useful to
379 examine an attribute, which isn't ensured in all Mercurial
379 examine an attribute, which isn't ensured in all Mercurial
380 versions.
380 versions.
381 """
381 """
382 if not util.safehasattr(obj, name):
382 if not util.safehasattr(obj, name):
383 if ignoremissing:
383 if ignoremissing:
384 return None
384 return None
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
385 raise error.Abort((b"missing attribute %s of %s might break assumption"
386 b" of performance measurement") % (name, obj))
386 b" of performance measurement") % (name, obj))
387
387
388 origvalue = getattr(obj, _sysstr(name))
388 origvalue = getattr(obj, _sysstr(name))
389 class attrutil(object):
389 class attrutil(object):
390 def set(self, newvalue):
390 def set(self, newvalue):
391 setattr(obj, _sysstr(name), newvalue)
391 setattr(obj, _sysstr(name), newvalue)
392 def restore(self):
392 def restore(self):
393 setattr(obj, _sysstr(name), origvalue)
393 setattr(obj, _sysstr(name), origvalue)
394
394
395 return attrutil()
395 return attrutil()
396
396
397 # utilities to examine each internal API changes
397 # utilities to examine each internal API changes
398
398
399 def getbranchmapsubsettable():
399 def getbranchmapsubsettable():
400 # for "historical portability":
400 # for "historical portability":
401 # subsettable is defined in:
401 # subsettable is defined in:
402 # - branchmap since 2.9 (or 175c6fd8cacc)
402 # - branchmap since 2.9 (or 175c6fd8cacc)
403 # - repoview since 2.5 (or 59a9f18d4587)
403 # - repoview since 2.5 (or 59a9f18d4587)
404 for mod in (branchmap, repoview):
404 for mod in (branchmap, repoview):
405 subsettable = getattr(mod, 'subsettable', None)
405 subsettable = getattr(mod, 'subsettable', None)
406 if subsettable:
406 if subsettable:
407 return subsettable
407 return subsettable
408
408
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
409 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
410 # branchmap and repoview modules exist, but subsettable attribute
410 # branchmap and repoview modules exist, but subsettable attribute
411 # doesn't)
411 # doesn't)
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
412 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
413 hint=b"use 2.5 or later")
413 hint=b"use 2.5 or later")
414
414
415 def getsvfs(repo):
415 def getsvfs(repo):
416 """Return appropriate object to access files under .hg/store
416 """Return appropriate object to access files under .hg/store
417 """
417 """
418 # for "historical portability":
418 # for "historical portability":
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
419 # repo.svfs has been available since 2.3 (or 7034365089bf)
420 svfs = getattr(repo, 'svfs', None)
420 svfs = getattr(repo, 'svfs', None)
421 if svfs:
421 if svfs:
422 return svfs
422 return svfs
423 else:
423 else:
424 return getattr(repo, 'sopener')
424 return getattr(repo, 'sopener')
425
425
426 def getvfs(repo):
426 def getvfs(repo):
427 """Return appropriate object to access files under .hg
427 """Return appropriate object to access files under .hg
428 """
428 """
429 # for "historical portability":
429 # for "historical portability":
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
430 # repo.vfs has been available since 2.3 (or 7034365089bf)
431 vfs = getattr(repo, 'vfs', None)
431 vfs = getattr(repo, 'vfs', None)
432 if vfs:
432 if vfs:
433 return vfs
433 return vfs
434 else:
434 else:
435 return getattr(repo, 'opener')
435 return getattr(repo, 'opener')
436
436
437 def repocleartagscachefunc(repo):
437 def repocleartagscachefunc(repo):
438 """Return the function to clear tags cache according to repo internal API
438 """Return the function to clear tags cache according to repo internal API
439 """
439 """
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
440 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
441 # in this case, setattr(repo, '_tagscache', None) or so isn't
442 # correct way to clear tags cache, because existing code paths
442 # correct way to clear tags cache, because existing code paths
443 # expect _tagscache to be a structured object.
443 # expect _tagscache to be a structured object.
444 def clearcache():
444 def clearcache():
445 # _tagscache has been filteredpropertycache since 2.5 (or
445 # _tagscache has been filteredpropertycache since 2.5 (or
446 # 98c867ac1330), and delattr() can't work in such case
446 # 98c867ac1330), and delattr() can't work in such case
447 if b'_tagscache' in vars(repo):
447 if b'_tagscache' in vars(repo):
448 del repo.__dict__[b'_tagscache']
448 del repo.__dict__[b'_tagscache']
449 return clearcache
449 return clearcache
450
450
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
451 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
452 if repotags: # since 1.4 (or 5614a628d173)
452 if repotags: # since 1.4 (or 5614a628d173)
453 return lambda : repotags.set(None)
453 return lambda : repotags.set(None)
454
454
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
455 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
456 if repotagscache: # since 0.6 (or d7df759d0e97)
457 return lambda : repotagscache.set(None)
457 return lambda : repotagscache.set(None)
458
458
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
459 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
460 # this point, but it isn't so problematic, because:
460 # this point, but it isn't so problematic, because:
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
461 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
462 # in perftags() causes failure soon
462 # in perftags() causes failure soon
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
463 # - perf.py itself has been available since 1.1 (or eb240755386d)
464 raise error.Abort((b"tags API of this hg command is unknown"))
464 raise error.Abort((b"tags API of this hg command is unknown"))
465
465
466 # utilities to clear cache
466 # utilities to clear cache
467
467
468 def clearfilecache(obj, attrname):
468 def clearfilecache(obj, attrname):
469 unfiltered = getattr(obj, 'unfiltered', None)
469 unfiltered = getattr(obj, 'unfiltered', None)
470 if unfiltered is not None:
470 if unfiltered is not None:
471 obj = obj.unfiltered()
471 obj = obj.unfiltered()
472 if attrname in vars(obj):
472 if attrname in vars(obj):
473 delattr(obj, attrname)
473 delattr(obj, attrname)
474 obj._filecache.pop(attrname, None)
474 obj._filecache.pop(attrname, None)
475
475
476 def clearchangelog(repo):
476 def clearchangelog(repo):
477 if repo is not repo.unfiltered():
477 if repo is not repo.unfiltered():
478 object.__setattr__(repo, r'_clcachekey', None)
478 object.__setattr__(repo, r'_clcachekey', None)
479 object.__setattr__(repo, r'_clcache', None)
479 object.__setattr__(repo, r'_clcache', None)
480 clearfilecache(repo.unfiltered(), 'changelog')
480 clearfilecache(repo.unfiltered(), 'changelog')
481
481
482 # perf commands
482 # perf commands
483
483
484 @command(b'perfwalk', formatteropts)
484 @command(b'perfwalk', formatteropts)
485 def perfwalk(ui, repo, *pats, **opts):
485 def perfwalk(ui, repo, *pats, **opts):
486 opts = _byteskwargs(opts)
486 opts = _byteskwargs(opts)
487 timer, fm = gettimer(ui, opts)
487 timer, fm = gettimer(ui, opts)
488 m = scmutil.match(repo[None], pats, {})
488 m = scmutil.match(repo[None], pats, {})
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
489 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
490 ignored=False))))
490 ignored=False))))
491 fm.end()
491 fm.end()
492
492
493 @command(b'perfannotate', formatteropts)
493 @command(b'perfannotate', formatteropts)
494 def perfannotate(ui, repo, f, **opts):
494 def perfannotate(ui, repo, f, **opts):
495 opts = _byteskwargs(opts)
495 opts = _byteskwargs(opts)
496 timer, fm = gettimer(ui, opts)
496 timer, fm = gettimer(ui, opts)
497 fc = repo[b'.'][f]
497 fc = repo[b'.'][f]
498 timer(lambda: len(fc.annotate(True)))
498 timer(lambda: len(fc.annotate(True)))
499 fm.end()
499 fm.end()
500
500
501 @command(b'perfstatus',
501 @command(b'perfstatus',
502 [(b'u', b'unknown', False,
502 [(b'u', b'unknown', False,
503 b'ask status to look for unknown files')] + formatteropts)
503 b'ask status to look for unknown files')] + formatteropts)
504 def perfstatus(ui, repo, **opts):
504 def perfstatus(ui, repo, **opts):
505 opts = _byteskwargs(opts)
505 opts = _byteskwargs(opts)
506 #m = match.always(repo.root, repo.getcwd())
506 #m = match.always(repo.root, repo.getcwd())
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
507 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
508 # False))))
508 # False))))
509 timer, fm = gettimer(ui, opts)
509 timer, fm = gettimer(ui, opts)
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
510 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
511 fm.end()
511 fm.end()
512
512
513 @command(b'perfaddremove', formatteropts)
513 @command(b'perfaddremove', formatteropts)
514 def perfaddremove(ui, repo, **opts):
514 def perfaddremove(ui, repo, **opts):
515 opts = _byteskwargs(opts)
515 opts = _byteskwargs(opts)
516 timer, fm = gettimer(ui, opts)
516 timer, fm = gettimer(ui, opts)
517 try:
517 try:
518 oldquiet = repo.ui.quiet
518 oldquiet = repo.ui.quiet
519 repo.ui.quiet = True
519 repo.ui.quiet = True
520 matcher = scmutil.match(repo[None])
520 matcher = scmutil.match(repo[None])
521 opts[b'dry_run'] = True
521 opts[b'dry_run'] = True
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
522 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
523 finally:
523 finally:
524 repo.ui.quiet = oldquiet
524 repo.ui.quiet = oldquiet
525 fm.end()
525 fm.end()
526
526
527 def clearcaches(cl):
527 def clearcaches(cl):
528 # behave somewhat consistently across internal API changes
528 # behave somewhat consistently across internal API changes
529 if util.safehasattr(cl, b'clearcaches'):
529 if util.safehasattr(cl, b'clearcaches'):
530 cl.clearcaches()
530 cl.clearcaches()
531 elif util.safehasattr(cl, b'_nodecache'):
531 elif util.safehasattr(cl, b'_nodecache'):
532 from mercurial.node import nullid, nullrev
532 from mercurial.node import nullid, nullrev
533 cl._nodecache = {nullid: nullrev}
533 cl._nodecache = {nullid: nullrev}
534 cl._nodepos = None
534 cl._nodepos = None
535
535
536 @command(b'perfheads', formatteropts)
536 @command(b'perfheads', formatteropts)
537 def perfheads(ui, repo, **opts):
537 def perfheads(ui, repo, **opts):
538 """benchmark the computation of a changelog heads"""
538 """benchmark the computation of a changelog heads"""
539 opts = _byteskwargs(opts)
539 opts = _byteskwargs(opts)
540 timer, fm = gettimer(ui, opts)
540 timer, fm = gettimer(ui, opts)
541 cl = repo.changelog
541 cl = repo.changelog
542 def s():
542 def s():
543 clearcaches(cl)
543 clearcaches(cl)
544 def d():
544 def d():
545 len(cl.headrevs())
545 len(cl.headrevs())
546 timer(d, setup=s)
546 timer(d, setup=s)
547 fm.end()
547 fm.end()
548
548
549 @command(b'perftags', formatteropts+
549 @command(b'perftags', formatteropts+
550 [
550 [
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
551 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
552 ])
552 ])
553 def perftags(ui, repo, **opts):
553 def perftags(ui, repo, **opts):
554 opts = _byteskwargs(opts)
554 opts = _byteskwargs(opts)
555 timer, fm = gettimer(ui, opts)
555 timer, fm = gettimer(ui, opts)
556 repocleartagscache = repocleartagscachefunc(repo)
556 repocleartagscache = repocleartagscachefunc(repo)
557 clearrevlogs = opts[b'clear_revlogs']
557 clearrevlogs = opts[b'clear_revlogs']
558 def s():
558 def s():
559 if clearrevlogs:
559 if clearrevlogs:
560 clearchangelog(repo)
560 clearchangelog(repo)
561 clearfilecache(repo.unfiltered(), 'manifest')
561 clearfilecache(repo.unfiltered(), 'manifest')
562 repocleartagscache()
562 repocleartagscache()
563 def t():
563 def t():
564 return len(repo.tags())
564 return len(repo.tags())
565 timer(t, setup=s)
565 timer(t, setup=s)
566 fm.end()
566 fm.end()
567
567
568 @command(b'perfancestors', formatteropts)
568 @command(b'perfancestors', formatteropts)
569 def perfancestors(ui, repo, **opts):
569 def perfancestors(ui, repo, **opts):
570 opts = _byteskwargs(opts)
570 opts = _byteskwargs(opts)
571 timer, fm = gettimer(ui, opts)
571 timer, fm = gettimer(ui, opts)
572 heads = repo.changelog.headrevs()
572 heads = repo.changelog.headrevs()
573 def d():
573 def d():
574 for a in repo.changelog.ancestors(heads):
574 for a in repo.changelog.ancestors(heads):
575 pass
575 pass
576 timer(d)
576 timer(d)
577 fm.end()
577 fm.end()
578
578
579 @command(b'perfancestorset', formatteropts)
579 @command(b'perfancestorset', formatteropts)
580 def perfancestorset(ui, repo, revset, **opts):
580 def perfancestorset(ui, repo, revset, **opts):
581 opts = _byteskwargs(opts)
581 opts = _byteskwargs(opts)
582 timer, fm = gettimer(ui, opts)
582 timer, fm = gettimer(ui, opts)
583 revs = repo.revs(revset)
583 revs = repo.revs(revset)
584 heads = repo.changelog.headrevs()
584 heads = repo.changelog.headrevs()
585 def d():
585 def d():
586 s = repo.changelog.ancestors(heads)
586 s = repo.changelog.ancestors(heads)
587 for rev in revs:
587 for rev in revs:
588 rev in s
588 rev in s
589 timer(d)
589 timer(d)
590 fm.end()
590 fm.end()
591
591
592 @command(b'perfdiscovery', formatteropts, b'PATH')
592 @command(b'perfdiscovery', formatteropts, b'PATH')
593 def perfdiscovery(ui, repo, path, **opts):
593 def perfdiscovery(ui, repo, path, **opts):
594 """benchmark discovery between local repo and the peer at given path
594 """benchmark discovery between local repo and the peer at given path
595 """
595 """
596 repos = [repo, None]
596 repos = [repo, None]
597 timer, fm = gettimer(ui, opts)
597 timer, fm = gettimer(ui, opts)
598 path = ui.expandpath(path)
598 path = ui.expandpath(path)
599
599
600 def s():
600 def s():
601 repos[1] = hg.peer(ui, opts, path)
601 repos[1] = hg.peer(ui, opts, path)
602 def d():
602 def d():
603 setdiscovery.findcommonheads(ui, *repos)
603 setdiscovery.findcommonheads(ui, *repos)
604 timer(d, setup=s)
604 timer(d, setup=s)
605 fm.end()
605 fm.end()
606
606
607 @command(b'perfbookmarks', formatteropts +
607 @command(b'perfbookmarks', formatteropts +
608 [
608 [
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
610 ])
610 ])
611 def perfbookmarks(ui, repo, **opts):
611 def perfbookmarks(ui, repo, **opts):
612 """benchmark parsing bookmarks from disk to memory"""
612 """benchmark parsing bookmarks from disk to memory"""
613 opts = _byteskwargs(opts)
613 opts = _byteskwargs(opts)
614 timer, fm = gettimer(ui, opts)
614 timer, fm = gettimer(ui, opts)
615
615
616 clearrevlogs = opts[b'clear_revlogs']
616 clearrevlogs = opts[b'clear_revlogs']
617 def s():
617 def s():
618 if clearrevlogs:
618 if clearrevlogs:
619 clearchangelog(repo)
619 clearchangelog(repo)
620 clearfilecache(repo, b'_bookmarks')
620 clearfilecache(repo, b'_bookmarks')
621 def d():
621 def d():
622 repo._bookmarks
622 repo._bookmarks
623 timer(d, setup=s)
623 timer(d, setup=s)
624 fm.end()
624 fm.end()
625
625
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
626 @command(b'perfbundleread', formatteropts, b'BUNDLE')
627 def perfbundleread(ui, repo, bundlepath, **opts):
627 def perfbundleread(ui, repo, bundlepath, **opts):
628 """Benchmark reading of bundle files.
628 """Benchmark reading of bundle files.
629
629
630 This command is meant to isolate the I/O part of bundle reading as
630 This command is meant to isolate the I/O part of bundle reading as
631 much as possible.
631 much as possible.
632 """
632 """
633 from mercurial import (
633 from mercurial import (
634 bundle2,
634 bundle2,
635 exchange,
635 exchange,
636 streamclone,
636 streamclone,
637 )
637 )
638
638
639 opts = _byteskwargs(opts)
639 opts = _byteskwargs(opts)
640
640
641 def makebench(fn):
641 def makebench(fn):
642 def run():
642 def run():
643 with open(bundlepath, b'rb') as fh:
643 with open(bundlepath, b'rb') as fh:
644 bundle = exchange.readbundle(ui, fh, bundlepath)
644 bundle = exchange.readbundle(ui, fh, bundlepath)
645 fn(bundle)
645 fn(bundle)
646
646
647 return run
647 return run
648
648
649 def makereadnbytes(size):
649 def makereadnbytes(size):
650 def run():
650 def run():
651 with open(bundlepath, b'rb') as fh:
651 with open(bundlepath, b'rb') as fh:
652 bundle = exchange.readbundle(ui, fh, bundlepath)
652 bundle = exchange.readbundle(ui, fh, bundlepath)
653 while bundle.read(size):
653 while bundle.read(size):
654 pass
654 pass
655
655
656 return run
656 return run
657
657
658 def makestdioread(size):
658 def makestdioread(size):
659 def run():
659 def run():
660 with open(bundlepath, b'rb') as fh:
660 with open(bundlepath, b'rb') as fh:
661 while fh.read(size):
661 while fh.read(size):
662 pass
662 pass
663
663
664 return run
664 return run
665
665
666 # bundle1
666 # bundle1
667
667
668 def deltaiter(bundle):
668 def deltaiter(bundle):
669 for delta in bundle.deltaiter():
669 for delta in bundle.deltaiter():
670 pass
670 pass
671
671
672 def iterchunks(bundle):
672 def iterchunks(bundle):
673 for chunk in bundle.getchunks():
673 for chunk in bundle.getchunks():
674 pass
674 pass
675
675
676 # bundle2
676 # bundle2
677
677
678 def forwardchunks(bundle):
678 def forwardchunks(bundle):
679 for chunk in bundle._forwardchunks():
679 for chunk in bundle._forwardchunks():
680 pass
680 pass
681
681
682 def iterparts(bundle):
682 def iterparts(bundle):
683 for part in bundle.iterparts():
683 for part in bundle.iterparts():
684 pass
684 pass
685
685
686 def iterpartsseekable(bundle):
686 def iterpartsseekable(bundle):
687 for part in bundle.iterparts(seekable=True):
687 for part in bundle.iterparts(seekable=True):
688 pass
688 pass
689
689
690 def seek(bundle):
690 def seek(bundle):
691 for part in bundle.iterparts(seekable=True):
691 for part in bundle.iterparts(seekable=True):
692 part.seek(0, os.SEEK_END)
692 part.seek(0, os.SEEK_END)
693
693
694 def makepartreadnbytes(size):
694 def makepartreadnbytes(size):
695 def run():
695 def run():
696 with open(bundlepath, b'rb') as fh:
696 with open(bundlepath, b'rb') as fh:
697 bundle = exchange.readbundle(ui, fh, bundlepath)
697 bundle = exchange.readbundle(ui, fh, bundlepath)
698 for part in bundle.iterparts():
698 for part in bundle.iterparts():
699 while part.read(size):
699 while part.read(size):
700 pass
700 pass
701
701
702 return run
702 return run
703
703
704 benches = [
704 benches = [
705 (makestdioread(8192), b'read(8k)'),
705 (makestdioread(8192), b'read(8k)'),
706 (makestdioread(16384), b'read(16k)'),
706 (makestdioread(16384), b'read(16k)'),
707 (makestdioread(32768), b'read(32k)'),
707 (makestdioread(32768), b'read(32k)'),
708 (makestdioread(131072), b'read(128k)'),
708 (makestdioread(131072), b'read(128k)'),
709 ]
709 ]
710
710
711 with open(bundlepath, b'rb') as fh:
711 with open(bundlepath, b'rb') as fh:
712 bundle = exchange.readbundle(ui, fh, bundlepath)
712 bundle = exchange.readbundle(ui, fh, bundlepath)
713
713
714 if isinstance(bundle, changegroup.cg1unpacker):
714 if isinstance(bundle, changegroup.cg1unpacker):
715 benches.extend([
715 benches.extend([
716 (makebench(deltaiter), b'cg1 deltaiter()'),
716 (makebench(deltaiter), b'cg1 deltaiter()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
717 (makebench(iterchunks), b'cg1 getchunks()'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
718 (makereadnbytes(8192), b'cg1 read(8k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
719 (makereadnbytes(16384), b'cg1 read(16k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
720 (makereadnbytes(32768), b'cg1 read(32k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
721 (makereadnbytes(131072), b'cg1 read(128k)'),
722 ])
722 ])
723 elif isinstance(bundle, bundle2.unbundle20):
723 elif isinstance(bundle, bundle2.unbundle20):
724 benches.extend([
724 benches.extend([
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
725 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
726 (makebench(iterparts), b'bundle2 iterparts()'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
727 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
728 (makebench(seek), b'bundle2 part seek()'),
728 (makebench(seek), b'bundle2 part seek()'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
729 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
730 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
731 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
732 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
733 ])
733 ])
734 elif isinstance(bundle, streamclone.streamcloneapplier):
734 elif isinstance(bundle, streamclone.streamcloneapplier):
735 raise error.Abort(b'stream clone bundles not supported')
735 raise error.Abort(b'stream clone bundles not supported')
736 else:
736 else:
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
737 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
738
738
739 for fn, title in benches:
739 for fn, title in benches:
740 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
741 timer(fn, title=title)
741 timer(fn, title=title)
742 fm.end()
742 fm.end()
743
743
744 @command(b'perfchangegroupchangelog', formatteropts +
744 @command(b'perfchangegroupchangelog', formatteropts +
745 [(b'', b'cgversion', b'02', b'changegroup version'),
745 [(b'', b'cgversion', b'02', b'changegroup version'),
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
746 (b'r', b'rev', b'', b'revisions to add to changegroup')])
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
747 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
748 """Benchmark producing a changelog group for a changegroup.
748 """Benchmark producing a changelog group for a changegroup.
749
749
750 This measures the time spent processing the changelog during a
750 This measures the time spent processing the changelog during a
751 bundle operation. This occurs during `hg bundle` and on a server
751 bundle operation. This occurs during `hg bundle` and on a server
752 processing a `getbundle` wire protocol request (handles clones
752 processing a `getbundle` wire protocol request (handles clones
753 and pull requests).
753 and pull requests).
754
754
755 By default, all revisions are added to the changegroup.
755 By default, all revisions are added to the changegroup.
756 """
756 """
757 opts = _byteskwargs(opts)
757 opts = _byteskwargs(opts)
758 cl = repo.changelog
758 cl = repo.changelog
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
759 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
760 bundler = changegroup.getbundler(cgversion, repo)
760 bundler = changegroup.getbundler(cgversion, repo)
761
761
762 def d():
762 def d():
763 state, chunks = bundler._generatechangelog(cl, nodes)
763 state, chunks = bundler._generatechangelog(cl, nodes)
764 for chunk in chunks:
764 for chunk in chunks:
765 pass
765 pass
766
766
767 timer, fm = gettimer(ui, opts)
767 timer, fm = gettimer(ui, opts)
768
768
769 # Terminal printing can interfere with timing. So disable it.
769 # Terminal printing can interfere with timing. So disable it.
770 with ui.configoverride({(b'progress', b'disable'): True}):
770 with ui.configoverride({(b'progress', b'disable'): True}):
771 timer(d)
771 timer(d)
772
772
773 fm.end()
773 fm.end()
774
774
775 @command(b'perfdirs', formatteropts)
775 @command(b'perfdirs', formatteropts)
776 def perfdirs(ui, repo, **opts):
776 def perfdirs(ui, repo, **opts):
777 opts = _byteskwargs(opts)
777 opts = _byteskwargs(opts)
778 timer, fm = gettimer(ui, opts)
778 timer, fm = gettimer(ui, opts)
779 dirstate = repo.dirstate
779 dirstate = repo.dirstate
780 b'a' in dirstate
780 b'a' in dirstate
781 def d():
781 def d():
782 dirstate.hasdir(b'a')
782 dirstate.hasdir(b'a')
783 del dirstate._map._dirs
783 del dirstate._map._dirs
784 timer(d)
784 timer(d)
785 fm.end()
785 fm.end()
786
786
787 @command(b'perfdirstate', formatteropts)
787 @command(b'perfdirstate', formatteropts)
788 def perfdirstate(ui, repo, **opts):
788 def perfdirstate(ui, repo, **opts):
789 opts = _byteskwargs(opts)
789 opts = _byteskwargs(opts)
790 timer, fm = gettimer(ui, opts)
790 timer, fm = gettimer(ui, opts)
791 b"a" in repo.dirstate
791 b"a" in repo.dirstate
792 def d():
792 def d():
793 repo.dirstate.invalidate()
793 repo.dirstate.invalidate()
794 b"a" in repo.dirstate
794 b"a" in repo.dirstate
795 timer(d)
795 timer(d)
796 fm.end()
796 fm.end()
797
797
798 @command(b'perfdirstatedirs', formatteropts)
798 @command(b'perfdirstatedirs', formatteropts)
799 def perfdirstatedirs(ui, repo, **opts):
799 def perfdirstatedirs(ui, repo, **opts):
800 opts = _byteskwargs(opts)
800 opts = _byteskwargs(opts)
801 timer, fm = gettimer(ui, opts)
801 timer, fm = gettimer(ui, opts)
802 b"a" in repo.dirstate
802 b"a" in repo.dirstate
803 def d():
803 def d():
804 repo.dirstate.hasdir(b"a")
804 repo.dirstate.hasdir(b"a")
805 del repo.dirstate._map._dirs
805 del repo.dirstate._map._dirs
806 timer(d)
806 timer(d)
807 fm.end()
807 fm.end()
808
808
809 @command(b'perfdirstatefoldmap', formatteropts)
809 @command(b'perfdirstatefoldmap', formatteropts)
810 def perfdirstatefoldmap(ui, repo, **opts):
810 def perfdirstatefoldmap(ui, repo, **opts):
811 opts = _byteskwargs(opts)
811 opts = _byteskwargs(opts)
812 timer, fm = gettimer(ui, opts)
812 timer, fm = gettimer(ui, opts)
813 dirstate = repo.dirstate
813 dirstate = repo.dirstate
814 b'a' in dirstate
814 b'a' in dirstate
815 def d():
815 def d():
816 dirstate._map.filefoldmap.get(b'a')
816 dirstate._map.filefoldmap.get(b'a')
817 del dirstate._map.filefoldmap
817 del dirstate._map.filefoldmap
818 timer(d)
818 timer(d)
819 fm.end()
819 fm.end()
820
820
821 @command(b'perfdirfoldmap', formatteropts)
821 @command(b'perfdirfoldmap', formatteropts)
822 def perfdirfoldmap(ui, repo, **opts):
822 def perfdirfoldmap(ui, repo, **opts):
823 opts = _byteskwargs(opts)
823 opts = _byteskwargs(opts)
824 timer, fm = gettimer(ui, opts)
824 timer, fm = gettimer(ui, opts)
825 dirstate = repo.dirstate
825 dirstate = repo.dirstate
826 b'a' in dirstate
826 b'a' in dirstate
827 def d():
827 def d():
828 dirstate._map.dirfoldmap.get(b'a')
828 dirstate._map.dirfoldmap.get(b'a')
829 del dirstate._map.dirfoldmap
829 del dirstate._map.dirfoldmap
830 del dirstate._map._dirs
830 del dirstate._map._dirs
831 timer(d)
831 timer(d)
832 fm.end()
832 fm.end()
833
833
834 @command(b'perfdirstatewrite', formatteropts)
834 @command(b'perfdirstatewrite', formatteropts)
835 def perfdirstatewrite(ui, repo, **opts):
835 def perfdirstatewrite(ui, repo, **opts):
836 opts = _byteskwargs(opts)
836 opts = _byteskwargs(opts)
837 timer, fm = gettimer(ui, opts)
837 timer, fm = gettimer(ui, opts)
838 ds = repo.dirstate
838 ds = repo.dirstate
839 b"a" in ds
839 b"a" in ds
840 def d():
840 def d():
841 ds._dirty = True
841 ds._dirty = True
842 ds.write(repo.currenttransaction())
842 ds.write(repo.currenttransaction())
843 timer(d)
843 timer(d)
844 fm.end()
844 fm.end()
845
845
846 @command(b'perfmergecalculate',
846 @command(b'perfmergecalculate',
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
847 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
848 def perfmergecalculate(ui, repo, rev, **opts):
848 def perfmergecalculate(ui, repo, rev, **opts):
849 opts = _byteskwargs(opts)
849 opts = _byteskwargs(opts)
850 timer, fm = gettimer(ui, opts)
850 timer, fm = gettimer(ui, opts)
851 wctx = repo[None]
851 wctx = repo[None]
852 rctx = scmutil.revsingle(repo, rev, rev)
852 rctx = scmutil.revsingle(repo, rev, rev)
853 ancestor = wctx.ancestor(rctx)
853 ancestor = wctx.ancestor(rctx)
854 # we don't want working dir files to be stat'd in the benchmark, so prime
854 # we don't want working dir files to be stat'd in the benchmark, so prime
855 # that cache
855 # that cache
856 wctx.dirty()
856 wctx.dirty()
857 def d():
857 def d():
858 # acceptremote is True because we don't want prompts in the middle of
858 # acceptremote is True because we don't want prompts in the middle of
859 # our benchmark
859 # our benchmark
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
860 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
861 acceptremote=True, followcopies=True)
861 acceptremote=True, followcopies=True)
862 timer(d)
862 timer(d)
863 fm.end()
863 fm.end()
864
864
865 @command(b'perfpathcopies', [], b"REV REV")
865 @command(b'perfpathcopies', [], b"REV REV")
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
866 def perfpathcopies(ui, repo, rev1, rev2, **opts):
867 """benchmark the copy tracing logic"""
867 """benchmark the copy tracing logic"""
868 opts = _byteskwargs(opts)
868 opts = _byteskwargs(opts)
869 timer, fm = gettimer(ui, opts)
869 timer, fm = gettimer(ui, opts)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
870 ctx1 = scmutil.revsingle(repo, rev1, rev1)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
871 ctx2 = scmutil.revsingle(repo, rev2, rev2)
872 def d():
872 def d():
873 copies.pathcopies(ctx1, ctx2)
873 copies.pathcopies(ctx1, ctx2)
874 timer(d)
874 timer(d)
875 fm.end()
875 fm.end()
876
876
877 @command(b'perfphases',
877 @command(b'perfphases',
878 [(b'', b'full', False, b'include file reading time too'),
878 [(b'', b'full', False, b'include file reading time too'),
879 ], b"")
879 ], b"")
880 def perfphases(ui, repo, **opts):
880 def perfphases(ui, repo, **opts):
881 """benchmark phasesets computation"""
881 """benchmark phasesets computation"""
882 opts = _byteskwargs(opts)
882 opts = _byteskwargs(opts)
883 timer, fm = gettimer(ui, opts)
883 timer, fm = gettimer(ui, opts)
884 _phases = repo._phasecache
884 _phases = repo._phasecache
885 full = opts.get(b'full')
885 full = opts.get(b'full')
886 def d():
886 def d():
887 phases = _phases
887 phases = _phases
888 if full:
888 if full:
889 clearfilecache(repo, b'_phasecache')
889 clearfilecache(repo, b'_phasecache')
890 phases = repo._phasecache
890 phases = repo._phasecache
891 phases.invalidate()
891 phases.invalidate()
892 phases.loadphaserevs(repo)
892 phases.loadphaserevs(repo)
893 timer(d)
893 timer(d)
894 fm.end()
894 fm.end()
895
895
896 @command(b'perfphasesremote',
896 @command(b'perfphasesremote',
897 [], b"[DEST]")
897 [], b"[DEST]")
898 def perfphasesremote(ui, repo, dest=None, **opts):
898 def perfphasesremote(ui, repo, dest=None, **opts):
899 """benchmark time needed to analyse phases of the remote server"""
899 """benchmark time needed to analyse phases of the remote server"""
900 from mercurial.node import (
900 from mercurial.node import (
901 bin,
901 bin,
902 )
902 )
903 from mercurial import (
903 from mercurial import (
904 exchange,
904 exchange,
905 hg,
905 hg,
906 phases,
906 phases,
907 )
907 )
908 opts = _byteskwargs(opts)
908 opts = _byteskwargs(opts)
909 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
910
910
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
911 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
912 if not path:
912 if not path:
913 raise error.Abort((b'default repository not configured!'),
913 raise error.Abort((b'default repository not configured!'),
914 hint=(b"see 'hg help config.paths'"))
914 hint=(b"see 'hg help config.paths'"))
915 dest = path.pushloc or path.loc
915 dest = path.pushloc or path.loc
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
916 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
917 other = hg.peer(repo, opts, dest)
917 other = hg.peer(repo, opts, dest)
918
918
919 # easier to perform discovery through the operation
919 # easier to perform discovery through the operation
920 op = exchange.pushoperation(repo, other)
920 op = exchange.pushoperation(repo, other)
921 exchange._pushdiscoverychangeset(op)
921 exchange._pushdiscoverychangeset(op)
922
922
923 remotesubset = op.fallbackheads
923 remotesubset = op.fallbackheads
924
924
925 with other.commandexecutor() as e:
925 with other.commandexecutor() as e:
926 remotephases = e.callcommand(b'listkeys',
926 remotephases = e.callcommand(b'listkeys',
927 {b'namespace': b'phases'}).result()
927 {b'namespace': b'phases'}).result()
928 del other
928 del other
929 publishing = remotephases.get(b'publishing', False)
929 publishing = remotephases.get(b'publishing', False)
930 if publishing:
930 if publishing:
931 ui.status((b'publishing: yes\n'))
931 ui.status((b'publishing: yes\n'))
932 else:
932 else:
933 ui.status((b'publishing: no\n'))
933 ui.status((b'publishing: no\n'))
934
934
935 nodemap = repo.changelog.nodemap
935 nodemap = repo.changelog.nodemap
936 nonpublishroots = 0
936 nonpublishroots = 0
937 for nhex, phase in remotephases.iteritems():
937 for nhex, phase in remotephases.iteritems():
938 if nhex == b'publishing': # ignore data related to publish option
938 if nhex == b'publishing': # ignore data related to publish option
939 continue
939 continue
940 node = bin(nhex)
940 node = bin(nhex)
941 if node in nodemap and int(phase):
941 if node in nodemap and int(phase):
942 nonpublishroots += 1
942 nonpublishroots += 1
943 ui.status((b'number of roots: %d\n') % len(remotephases))
943 ui.status((b'number of roots: %d\n') % len(remotephases))
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
944 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
945 def d():
945 def d():
946 phases.remotephasessummary(repo,
946 phases.remotephasessummary(repo,
947 remotesubset,
947 remotesubset,
948 remotephases)
948 remotephases)
949 timer(d)
949 timer(d)
950 fm.end()
950 fm.end()
951
951
952 @command(b'perfmanifest',[
952 @command(b'perfmanifest',[
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
953 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
954 (b'', b'clear-disk', False, b'clear on-disk caches too'),
955 ] + formatteropts, b'REV|NODE')
955 ] + formatteropts, b'REV|NODE')
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
956 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
957 """benchmark the time to read a manifest from disk and return a usable
957 """benchmark the time to read a manifest from disk and return a usable
958 dict-like object
958 dict-like object
959
959
960 Manifest caches are cleared before retrieval."""
960 Manifest caches are cleared before retrieval."""
961 opts = _byteskwargs(opts)
961 opts = _byteskwargs(opts)
962 timer, fm = gettimer(ui, opts)
962 timer, fm = gettimer(ui, opts)
963 if not manifest_rev:
963 if not manifest_rev:
964 ctx = scmutil.revsingle(repo, rev, rev)
964 ctx = scmutil.revsingle(repo, rev, rev)
965 t = ctx.manifestnode()
965 t = ctx.manifestnode()
966 else:
966 else:
967 from mercurial.node import bin
967 from mercurial.node import bin
968
968
969 if len(rev) == 40:
969 if len(rev) == 40:
970 t = bin(rev)
970 t = bin(rev)
971 else:
971 else:
972 try:
972 try:
973 rev = int(rev)
973 rev = int(rev)
974
974
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
975 if util.safehasattr(repo.manifestlog, b'getstorage'):
976 t = repo.manifestlog.getstorage(b'').node(rev)
976 t = repo.manifestlog.getstorage(b'').node(rev)
977 else:
977 else:
978 t = repo.manifestlog._revlog.lookup(rev)
978 t = repo.manifestlog._revlog.lookup(rev)
979 except ValueError:
979 except ValueError:
980 raise error.Abort(b'manifest revision must be integer or full '
980 raise error.Abort(b'manifest revision must be integer or full '
981 b'node')
981 b'node')
982 def d():
982 def d():
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
983 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
984 repo.manifestlog[t].read()
984 repo.manifestlog[t].read()
985 timer(d)
985 timer(d)
986 fm.end()
986 fm.end()
987
987
988 @command(b'perfchangeset', formatteropts)
988 @command(b'perfchangeset', formatteropts)
989 def perfchangeset(ui, repo, rev, **opts):
989 def perfchangeset(ui, repo, rev, **opts):
990 opts = _byteskwargs(opts)
990 opts = _byteskwargs(opts)
991 timer, fm = gettimer(ui, opts)
991 timer, fm = gettimer(ui, opts)
992 n = scmutil.revsingle(repo, rev).node()
992 n = scmutil.revsingle(repo, rev).node()
993 def d():
993 def d():
994 repo.changelog.read(n)
994 repo.changelog.read(n)
995 #repo.changelog._cache = None
995 #repo.changelog._cache = None
996 timer(d)
996 timer(d)
997 fm.end()
997 fm.end()
998
998
999 @command(b'perfignore', formatteropts)
999 @command(b'perfignore', formatteropts)
1000 def perfignore(ui, repo, **opts):
1000 def perfignore(ui, repo, **opts):
1001 """benchmark operation related to computing ignore"""
1001 """benchmark operation related to computing ignore"""
1002 opts = _byteskwargs(opts)
1002 opts = _byteskwargs(opts)
1003 timer, fm = gettimer(ui, opts)
1003 timer, fm = gettimer(ui, opts)
1004 dirstate = repo.dirstate
1004 dirstate = repo.dirstate
1005
1005
1006 def setupone():
1006 def setupone():
1007 dirstate.invalidate()
1007 dirstate.invalidate()
1008 clearfilecache(dirstate, b'_ignore')
1008 clearfilecache(dirstate, b'_ignore')
1009
1009
1010 def runone():
1010 def runone():
1011 dirstate._ignore
1011 dirstate._ignore
1012
1012
1013 timer(runone, setup=setupone, title=b"load")
1013 timer(runone, setup=setupone, title=b"load")
1014 fm.end()
1014 fm.end()
1015
1015
1016 @command(b'perfindex', [
1016 @command(b'perfindex', [
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1017 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1018 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1019 ] + formatteropts)
1019 ] + formatteropts)
1020 def perfindex(ui, repo, **opts):
1020 def perfindex(ui, repo, **opts):
1021 """benchmark index creation time followed by a lookup
1021 """benchmark index creation time followed by a lookup
1022
1022
1023 The default is to look `tip` up. Depending on the index implementation,
1023 The default is to look `tip` up. Depending on the index implementation,
1024 the revision looked up can matters. For example, an implementation
1024 the revision looked up can matters. For example, an implementation
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1025 scanning the index will have a faster lookup time for `--rev tip` than for
1026 `--rev 0`. The number of looked up revisions and their order can also
1026 `--rev 0`. The number of looked up revisions and their order can also
1027 matters.
1027 matters.
1028
1028
1029 Example of useful set to test:
1029 Example of useful set to test:
1030 * tip
1030 * tip
1031 * 0
1031 * 0
1032 * -10:
1032 * -10:
1033 * :10
1033 * :10
1034 * -10: + :10
1034 * -10: + :10
1035 * :10: + -10:
1035 * :10: + -10:
1036 * -10000:
1036 * -10000:
1037 * -10000: + 0
1037 * -10000: + 0
1038
1038
1039 It is not currently possible to check for lookup of a missing node. For
1039 It is not currently possible to check for lookup of a missing node. For
1040 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1040 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1041 import mercurial.revlog
1041 import mercurial.revlog
1042 opts = _byteskwargs(opts)
1042 opts = _byteskwargs(opts)
1043 timer, fm = gettimer(ui, opts)
1043 timer, fm = gettimer(ui, opts)
1044 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1044 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1045 if opts[b'no_lookup']:
1045 if opts[b'no_lookup']:
1046 if opts['rev']:
1046 if opts['rev']:
1047 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1047 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1048 nodes = []
1048 nodes = []
1049 elif not opts[b'rev']:
1049 elif not opts[b'rev']:
1050 nodes = [repo[b"tip"].node()]
1050 nodes = [repo[b"tip"].node()]
1051 else:
1051 else:
1052 revs = scmutil.revrange(repo, opts[b'rev'])
1052 revs = scmutil.revrange(repo, opts[b'rev'])
1053 cl = repo.changelog
1053 cl = repo.changelog
1054 nodes = [cl.node(r) for r in revs]
1054 nodes = [cl.node(r) for r in revs]
1055
1055
1056 unfi = repo.unfiltered()
1056 unfi = repo.unfiltered()
1057 # find the filecache func directly
1057 # find the filecache func directly
1058 # This avoid polluting the benchmark with the filecache logic
1058 # This avoid polluting the benchmark with the filecache logic
1059 makecl = unfi.__class__.changelog.func
1059 makecl = unfi.__class__.changelog.func
1060 def setup():
1060 def setup():
1061 # probably not necessary, but for good measure
1061 # probably not necessary, but for good measure
1062 clearchangelog(unfi)
1062 clearchangelog(unfi)
1063 def d():
1063 def d():
1064 cl = makecl(unfi)
1064 cl = makecl(unfi)
1065 for n in nodes:
1065 for n in nodes:
1066 cl.rev(n)
1066 cl.rev(n)
1067 timer(d, setup=setup)
1067 timer(d, setup=setup)
1068 fm.end()
1068 fm.end()
1069
1069
1070 @command(b'perfnodemap', [
1070 @command(b'perfnodemap', [
1071 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1071 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1072 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1072 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1073 ] + formatteropts)
1073 ] + formatteropts)
1074 def perfnodemap(ui, repo, **opts):
1074 def perfnodemap(ui, repo, **opts):
1075 """benchmark the time necessary to look up revision from a cold nodemap
1075 """benchmark the time necessary to look up revision from a cold nodemap
1076
1076
1077 Depending on the implementation, the amount and order of revision we look
1077 Depending on the implementation, the amount and order of revision we look
1078 up can varies. Example of useful set to test:
1078 up can varies. Example of useful set to test:
1079 * tip
1079 * tip
1080 * 0
1080 * 0
1081 * -10:
1081 * -10:
1082 * :10
1082 * :10
1083 * -10: + :10
1083 * -10: + :10
1084 * :10: + -10:
1084 * :10: + -10:
1085 * -10000:
1085 * -10000:
1086 * -10000: + 0
1086 * -10000: + 0
1087
1087
1088 The command currently focus on valid binary lookup. Benchmarking for
1088 The command currently focus on valid binary lookup. Benchmarking for
1089 hexlookup, prefix lookup and missing lookup would also be valuable.
1089 hexlookup, prefix lookup and missing lookup would also be valuable.
1090 """
1090 """
1091 import mercurial.revlog
1091 import mercurial.revlog
1092 opts = _byteskwargs(opts)
1092 opts = _byteskwargs(opts)
1093 timer, fm = gettimer(ui, opts)
1093 timer, fm = gettimer(ui, opts)
1094 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1094 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1095
1095
1096 unfi = repo.unfiltered()
1096 unfi = repo.unfiltered()
1097 clearcaches = opts['clear_caches']
1097 clearcaches = opts['clear_caches']
1098 # find the filecache func directly
1098 # find the filecache func directly
1099 # This avoid polluting the benchmark with the filecache logic
1099 # This avoid polluting the benchmark with the filecache logic
1100 makecl = unfi.__class__.changelog.func
1100 makecl = unfi.__class__.changelog.func
1101 if not opts[b'rev']:
1101 if not opts[b'rev']:
1102 raise error.Abort('use --rev to specify revisions to look up')
1102 raise error.Abort('use --rev to specify revisions to look up')
1103 revs = scmutil.revrange(repo, opts[b'rev'])
1103 revs = scmutil.revrange(repo, opts[b'rev'])
1104 cl = repo.changelog
1104 cl = repo.changelog
1105 nodes = [cl.node(r) for r in revs]
1105 nodes = [cl.node(r) for r in revs]
1106
1106
1107 # use a list to pass reference to a nodemap from one closure to the next
1107 # use a list to pass reference to a nodemap from one closure to the next
1108 nodeget = [None]
1108 nodeget = [None]
1109 def setnodeget():
1109 def setnodeget():
1110 # probably not necessary, but for good measure
1110 # probably not necessary, but for good measure
1111 clearchangelog(unfi)
1111 clearchangelog(unfi)
1112 nodeget[0] = makecl(unfi).nodemap.get
1112 nodeget[0] = makecl(unfi).nodemap.get
1113
1113
1114 def d():
1114 def d():
1115 get = nodeget[0]
1115 get = nodeget[0]
1116 for n in nodes:
1116 for n in nodes:
1117 get(n)
1117 get(n)
1118
1118
1119 setup = None
1119 setup = None
1120 if clearcaches:
1120 if clearcaches:
1121 def setup():
1121 def setup():
1122 setnodeget()
1122 setnodeget()
1123 else:
1123 else:
1124 setnodeget()
1124 setnodeget()
1125 d() # prewarm the data structure
1125 d() # prewarm the data structure
1126 timer(d, setup=setup)
1126 timer(d, setup=setup)
1127 fm.end()
1127 fm.end()
1128
1128
1129 @command(b'perfstartup', formatteropts)
1129 @command(b'perfstartup', formatteropts)
1130 def perfstartup(ui, repo, **opts):
1130 def perfstartup(ui, repo, **opts):
1131 opts = _byteskwargs(opts)
1131 opts = _byteskwargs(opts)
1132 timer, fm = gettimer(ui, opts)
1132 timer, fm = gettimer(ui, opts)
1133 def d():
1133 def d():
1134 if os.name != r'nt':
1134 if os.name != r'nt':
1135 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1135 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1136 fsencode(sys.argv[0]))
1136 fsencode(sys.argv[0]))
1137 else:
1137 else:
1138 os.environ[r'HGRCPATH'] = r' '
1138 os.environ[r'HGRCPATH'] = r' '
1139 os.system(r"%s version -q > NUL" % sys.argv[0])
1139 os.system(r"%s version -q > NUL" % sys.argv[0])
1140 timer(d)
1140 timer(d)
1141 fm.end()
1141 fm.end()
1142
1142
1143 @command(b'perfparents', formatteropts)
1143 @command(b'perfparents', formatteropts)
1144 def perfparents(ui, repo, **opts):
1144 def perfparents(ui, repo, **opts):
1145 opts = _byteskwargs(opts)
1145 opts = _byteskwargs(opts)
1146 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1147 # control the number of commits perfparents iterates over
1147 # control the number of commits perfparents iterates over
1148 # experimental config: perf.parentscount
1148 # experimental config: perf.parentscount
1149 count = getint(ui, b"perf", b"parentscount", 1000)
1149 count = getint(ui, b"perf", b"parentscount", 1000)
1150 if len(repo.changelog) < count:
1150 if len(repo.changelog) < count:
1151 raise error.Abort(b"repo needs %d commits for this test" % count)
1151 raise error.Abort(b"repo needs %d commits for this test" % count)
1152 repo = repo.unfiltered()
1152 repo = repo.unfiltered()
1153 nl = [repo.changelog.node(i) for i in _xrange(count)]
1153 nl = [repo.changelog.node(i) for i in _xrange(count)]
1154 def d():
1154 def d():
1155 for n in nl:
1155 for n in nl:
1156 repo.changelog.parents(n)
1156 repo.changelog.parents(n)
1157 timer(d)
1157 timer(d)
1158 fm.end()
1158 fm.end()
1159
1159
1160 @command(b'perfctxfiles', formatteropts)
1160 @command(b'perfctxfiles', formatteropts)
1161 def perfctxfiles(ui, repo, x, **opts):
1161 def perfctxfiles(ui, repo, x, **opts):
1162 opts = _byteskwargs(opts)
1162 opts = _byteskwargs(opts)
1163 x = int(x)
1163 x = int(x)
1164 timer, fm = gettimer(ui, opts)
1164 timer, fm = gettimer(ui, opts)
1165 def d():
1165 def d():
1166 len(repo[x].files())
1166 len(repo[x].files())
1167 timer(d)
1167 timer(d)
1168 fm.end()
1168 fm.end()
1169
1169
1170 @command(b'perfrawfiles', formatteropts)
1170 @command(b'perfrawfiles', formatteropts)
1171 def perfrawfiles(ui, repo, x, **opts):
1171 def perfrawfiles(ui, repo, x, **opts):
1172 opts = _byteskwargs(opts)
1172 opts = _byteskwargs(opts)
1173 x = int(x)
1173 x = int(x)
1174 timer, fm = gettimer(ui, opts)
1174 timer, fm = gettimer(ui, opts)
1175 cl = repo.changelog
1175 cl = repo.changelog
1176 def d():
1176 def d():
1177 len(cl.read(x)[3])
1177 len(cl.read(x)[3])
1178 timer(d)
1178 timer(d)
1179 fm.end()
1179 fm.end()
1180
1180
1181 @command(b'perflookup', formatteropts)
1181 @command(b'perflookup', formatteropts)
1182 def perflookup(ui, repo, rev, **opts):
1182 def perflookup(ui, repo, rev, **opts):
1183 opts = _byteskwargs(opts)
1183 opts = _byteskwargs(opts)
1184 timer, fm = gettimer(ui, opts)
1184 timer, fm = gettimer(ui, opts)
1185 timer(lambda: len(repo.lookup(rev)))
1185 timer(lambda: len(repo.lookup(rev)))
1186 fm.end()
1186 fm.end()
1187
1187
1188 @command(b'perflinelogedits',
1188 @command(b'perflinelogedits',
1189 [(b'n', b'edits', 10000, b'number of edits'),
1189 [(b'n', b'edits', 10000, b'number of edits'),
1190 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1190 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1191 ], norepo=True)
1191 ], norepo=True)
1192 def perflinelogedits(ui, **opts):
1192 def perflinelogedits(ui, **opts):
1193 from mercurial import linelog
1193 from mercurial import linelog
1194
1194
1195 opts = _byteskwargs(opts)
1195 opts = _byteskwargs(opts)
1196
1196
1197 edits = opts[b'edits']
1197 edits = opts[b'edits']
1198 maxhunklines = opts[b'max_hunk_lines']
1198 maxhunklines = opts[b'max_hunk_lines']
1199
1199
1200 maxb1 = 100000
1200 maxb1 = 100000
1201 random.seed(0)
1201 random.seed(0)
1202 randint = random.randint
1202 randint = random.randint
1203 currentlines = 0
1203 currentlines = 0
1204 arglist = []
1204 arglist = []
1205 for rev in _xrange(edits):
1205 for rev in _xrange(edits):
1206 a1 = randint(0, currentlines)
1206 a1 = randint(0, currentlines)
1207 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1207 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1208 b1 = randint(0, maxb1)
1208 b1 = randint(0, maxb1)
1209 b2 = randint(b1, b1 + maxhunklines)
1209 b2 = randint(b1, b1 + maxhunklines)
1210 currentlines += (b2 - b1) - (a2 - a1)
1210 currentlines += (b2 - b1) - (a2 - a1)
1211 arglist.append((rev, a1, a2, b1, b2))
1211 arglist.append((rev, a1, a2, b1, b2))
1212
1212
1213 def d():
1213 def d():
1214 ll = linelog.linelog()
1214 ll = linelog.linelog()
1215 for args in arglist:
1215 for args in arglist:
1216 ll.replacelines(*args)
1216 ll.replacelines(*args)
1217
1217
1218 timer, fm = gettimer(ui, opts)
1218 timer, fm = gettimer(ui, opts)
1219 timer(d)
1219 timer(d)
1220 fm.end()
1220 fm.end()
1221
1221
1222 @command(b'perfrevrange', formatteropts)
1222 @command(b'perfrevrange', formatteropts)
1223 def perfrevrange(ui, repo, *specs, **opts):
1223 def perfrevrange(ui, repo, *specs, **opts):
1224 opts = _byteskwargs(opts)
1224 opts = _byteskwargs(opts)
1225 timer, fm = gettimer(ui, opts)
1225 timer, fm = gettimer(ui, opts)
1226 revrange = scmutil.revrange
1226 revrange = scmutil.revrange
1227 timer(lambda: len(revrange(repo, specs)))
1227 timer(lambda: len(revrange(repo, specs)))
1228 fm.end()
1228 fm.end()
1229
1229
1230 @command(b'perfnodelookup', formatteropts)
1230 @command(b'perfnodelookup', formatteropts)
1231 def perfnodelookup(ui, repo, rev, **opts):
1231 def perfnodelookup(ui, repo, rev, **opts):
1232 opts = _byteskwargs(opts)
1232 opts = _byteskwargs(opts)
1233 timer, fm = gettimer(ui, opts)
1233 timer, fm = gettimer(ui, opts)
1234 import mercurial.revlog
1234 import mercurial.revlog
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1235 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1236 n = scmutil.revsingle(repo, rev).node()
1236 n = scmutil.revsingle(repo, rev).node()
1237 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1237 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1238 def d():
1238 def d():
1239 cl.rev(n)
1239 cl.rev(n)
1240 clearcaches(cl)
1240 clearcaches(cl)
1241 timer(d)
1241 timer(d)
1242 fm.end()
1242 fm.end()
1243
1243
1244 @command(b'perflog',
1244 @command(b'perflog',
1245 [(b'', b'rename', False, b'ask log to follow renames')
1245 [(b'', b'rename', False, b'ask log to follow renames')
1246 ] + formatteropts)
1246 ] + formatteropts)
1247 def perflog(ui, repo, rev=None, **opts):
1247 def perflog(ui, repo, rev=None, **opts):
1248 opts = _byteskwargs(opts)
1248 opts = _byteskwargs(opts)
1249 if rev is None:
1249 if rev is None:
1250 rev=[]
1250 rev=[]
1251 timer, fm = gettimer(ui, opts)
1251 timer, fm = gettimer(ui, opts)
1252 ui.pushbuffer()
1252 ui.pushbuffer()
1253 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1253 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1254 copies=opts.get(b'rename')))
1254 copies=opts.get(b'rename')))
1255 ui.popbuffer()
1255 ui.popbuffer()
1256 fm.end()
1256 fm.end()
1257
1257
1258 @command(b'perfmoonwalk', formatteropts)
1258 @command(b'perfmoonwalk', formatteropts)
1259 def perfmoonwalk(ui, repo, **opts):
1259 def perfmoonwalk(ui, repo, **opts):
1260 """benchmark walking the changelog backwards
1260 """benchmark walking the changelog backwards
1261
1261
1262 This also loads the changelog data for each revision in the changelog.
1262 This also loads the changelog data for each revision in the changelog.
1263 """
1263 """
1264 opts = _byteskwargs(opts)
1264 opts = _byteskwargs(opts)
1265 timer, fm = gettimer(ui, opts)
1265 timer, fm = gettimer(ui, opts)
1266 def moonwalk():
1266 def moonwalk():
1267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1268 ctx = repo[i]
1268 ctx = repo[i]
1269 ctx.branch() # read changelog data (in addition to the index)
1269 ctx.branch() # read changelog data (in addition to the index)
1270 timer(moonwalk)
1270 timer(moonwalk)
1271 fm.end()
1271 fm.end()
1272
1272
1273 @command(b'perftemplating',
1273 @command(b'perftemplating',
1274 [(b'r', b'rev', [], b'revisions to run the template on'),
1274 [(b'r', b'rev', [], b'revisions to run the template on'),
1275 ] + formatteropts)
1275 ] + formatteropts)
1276 def perftemplating(ui, repo, testedtemplate=None, **opts):
1276 def perftemplating(ui, repo, testedtemplate=None, **opts):
1277 """test the rendering time of a given template"""
1277 """test the rendering time of a given template"""
1278 if makelogtemplater is None:
1278 if makelogtemplater is None:
1279 raise error.Abort((b"perftemplating not available with this Mercurial"),
1279 raise error.Abort((b"perftemplating not available with this Mercurial"),
1280 hint=b"use 4.3 or later")
1280 hint=b"use 4.3 or later")
1281
1281
1282 opts = _byteskwargs(opts)
1282 opts = _byteskwargs(opts)
1283
1283
1284 nullui = ui.copy()
1284 nullui = ui.copy()
1285 nullui.fout = open(os.devnull, r'wb')
1285 nullui.fout = open(os.devnull, r'wb')
1286 nullui.disablepager()
1286 nullui.disablepager()
1287 revs = opts.get(b'rev')
1287 revs = opts.get(b'rev')
1288 if not revs:
1288 if not revs:
1289 revs = [b'all()']
1289 revs = [b'all()']
1290 revs = list(scmutil.revrange(repo, revs))
1290 revs = list(scmutil.revrange(repo, revs))
1291
1291
1292 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1292 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1293 b' {author|person}: {desc|firstline}\n')
1293 b' {author|person}: {desc|firstline}\n')
1294 if testedtemplate is None:
1294 if testedtemplate is None:
1295 testedtemplate = defaulttemplate
1295 testedtemplate = defaulttemplate
1296 displayer = makelogtemplater(nullui, repo, testedtemplate)
1296 displayer = makelogtemplater(nullui, repo, testedtemplate)
1297 def format():
1297 def format():
1298 for r in revs:
1298 for r in revs:
1299 ctx = repo[r]
1299 ctx = repo[r]
1300 displayer.show(ctx)
1300 displayer.show(ctx)
1301 displayer.flush(ctx)
1301 displayer.flush(ctx)
1302
1302
1303 timer, fm = gettimer(ui, opts)
1303 timer, fm = gettimer(ui, opts)
1304 timer(format)
1304 timer(format)
1305 fm.end()
1305 fm.end()
1306
1306
1307 @command(b'perfhelper-pathcopies', formatteropts +
1307 @command(b'perfhelper-pathcopies', formatteropts +
1308 [
1308 [
1309 (b'r', b'revs', [], b'restrict search to these revisions'),
1309 (b'r', b'revs', [], b'restrict search to these revisions'),
1310 (b'', b'timing', False, b'provides extra data (costly)'),
1310 (b'', b'timing', False, b'provides extra data (costly)'),
1311 ])
1311 ])
1312 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1312 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1313 """find statistic about potential parameters for the `perftracecopies`
1313 """find statistic about potential parameters for the `perftracecopies`
1314
1314
1315 This command find source-destination pair relevant for copytracing testing.
1315 This command find source-destination pair relevant for copytracing testing.
1316 It report value for some of the parameters that impact copy tracing time.
1316 It report value for some of the parameters that impact copy tracing time.
1317
1317
1318 If `--timing` is set, rename detection is run and the associated timing
1318 If `--timing` is set, rename detection is run and the associated timing
1319 will be reported. The extra details comes at the cost of a slower command
1319 will be reported. The extra details comes at the cost of a slower command
1320 execution.
1320 execution.
1321
1321
1322 Since the rename detection is only run once, other factors might easily
1322 Since the rename detection is only run once, other factors might easily
1323 affect the precision of the timing. However it should give a good
1323 affect the precision of the timing. However it should give a good
1324 approximation of which revision pairs are very costly.
1324 approximation of which revision pairs are very costly.
1325 """
1325 """
1326 opts = _byteskwargs(opts)
1326 opts = _byteskwargs(opts)
1327 fm = ui.formatter(b'perf', opts)
1327 fm = ui.formatter(b'perf', opts)
1328 dotiming = opts[b'timing']
1328 dotiming = opts[b'timing']
1329
1329
1330 if dotiming:
1330 if dotiming:
1331 header = '%12s %12s %12s %12s %12s %12s\n'
1331 header = '%12s %12s %12s %12s %12s %12s\n'
1332 output = ("%(source)12s %(destination)12s "
1332 output = ("%(source)12s %(destination)12s "
1333 "%(nbrevs)12d %(nbmissingfiles)12d "
1333 "%(nbrevs)12d %(nbmissingfiles)12d "
1334 "%(nbrenamedfiles)12d %(time)18.5f\n")
1334 "%(nbrenamedfiles)12d %(time)18.5f\n")
1335 header_names = ("source", "destination", "nb-revs", "nb-files",
1335 header_names = ("source", "destination", "nb-revs", "nb-files",
1336 "nb-renames", "time")
1336 "nb-renames", "time")
1337 fm.plain(header % header_names)
1337 fm.plain(header % header_names)
1338 else:
1338 else:
1339 header = '%12s %12s %12s %12s\n'
1339 header = '%12s %12s %12s %12s\n'
1340 output = ("%(source)12s %(destination)12s "
1340 output = ("%(source)12s %(destination)12s "
1341 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1341 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1342 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1342 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1343
1343
1344 if not revs:
1344 if not revs:
1345 revs = ['all()']
1345 revs = ['all()']
1346 revs = scmutil.revrange(repo, revs)
1346 revs = scmutil.revrange(repo, revs)
1347
1347
1348 roi = repo.revs('merge() and %ld', revs)
1348 roi = repo.revs('merge() and %ld', revs)
1349 for r in roi:
1349 for r in roi:
1350 ctx = repo[r]
1350 ctx = repo[r]
1351 p1 = ctx.p1().rev()
1351 p1 = ctx.p1().rev()
1352 p2 = ctx.p2().rev()
1352 p2 = ctx.p2().rev()
1353 bases = repo.changelog._commonancestorsheads(p1, p2)
1353 bases = repo.changelog._commonancestorsheads(p1, p2)
1354 for p in (p1, p2):
1354 for p in (p1, p2):
1355 for b in bases:
1355 for b in bases:
1356 base = repo[b]
1356 base = repo[b]
1357 parent = repo[p]
1357 parent = repo[p]
1358 missing = copies._computeforwardmissing(base, parent)
1358 missing = copies._computeforwardmissing(base, parent)
1359 if not missing:
1359 if not missing:
1360 continue
1360 continue
1361 data = {
1361 data = {
1362 b'source': base.hex(),
1362 b'source': base.hex(),
1363 b'destination': parent.hex(),
1363 b'destination': parent.hex(),
1364 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1364 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1365 b'nbmissingfiles': len(missing),
1365 b'nbmissingfiles': len(missing),
1366 }
1366 }
1367 if dotiming:
1367 if dotiming:
1368 begin = util.timer()
1368 begin = util.timer()
1369 renames = copies.pathcopies(base, parent)
1369 renames = copies.pathcopies(base, parent)
1370 end = util.timer()
1370 end = util.timer()
1371 # not very stable timing since we did only one run
1371 # not very stable timing since we did only one run
1372 data['time'] = end - begin
1372 data['time'] = end - begin
1373 data['nbrenamedfiles'] = len(renames)
1373 data['nbrenamedfiles'] = len(renames)
1374 fm.startitem()
1374 fm.startitem()
1375 fm.data(**data)
1375 fm.data(**data)
1376 out = data.copy()
1376 out = data.copy()
1377 out['source'] = fm.hexfunc(base.node())
1377 out['source'] = fm.hexfunc(base.node())
1378 out['destination'] = fm.hexfunc(parent.node())
1378 out['destination'] = fm.hexfunc(parent.node())
1379 fm.plain(output % out)
1379 fm.plain(output % out)
1380
1380
1381 fm.end()
1381 fm.end()
1382
1382
1383 @command(b'perfcca', formatteropts)
1383 @command(b'perfcca', formatteropts)
1384 def perfcca(ui, repo, **opts):
1384 def perfcca(ui, repo, **opts):
1385 opts = _byteskwargs(opts)
1385 opts = _byteskwargs(opts)
1386 timer, fm = gettimer(ui, opts)
1386 timer, fm = gettimer(ui, opts)
1387 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1387 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1388 fm.end()
1388 fm.end()
1389
1389
1390 @command(b'perffncacheload', formatteropts)
1390 @command(b'perffncacheload', formatteropts)
1391 def perffncacheload(ui, repo, **opts):
1391 def perffncacheload(ui, repo, **opts):
1392 opts = _byteskwargs(opts)
1392 opts = _byteskwargs(opts)
1393 timer, fm = gettimer(ui, opts)
1393 timer, fm = gettimer(ui, opts)
1394 s = repo.store
1394 s = repo.store
1395 def d():
1395 def d():
1396 s.fncache._load()
1396 s.fncache._load()
1397 timer(d)
1397 timer(d)
1398 fm.end()
1398 fm.end()
1399
1399
1400 @command(b'perffncachewrite', formatteropts)
1400 @command(b'perffncachewrite', formatteropts)
1401 def perffncachewrite(ui, repo, **opts):
1401 def perffncachewrite(ui, repo, **opts):
1402 opts = _byteskwargs(opts)
1402 opts = _byteskwargs(opts)
1403 timer, fm = gettimer(ui, opts)
1403 timer, fm = gettimer(ui, opts)
1404 s = repo.store
1404 s = repo.store
1405 lock = repo.lock()
1405 lock = repo.lock()
1406 s.fncache._load()
1406 s.fncache._load()
1407 tr = repo.transaction(b'perffncachewrite')
1407 tr = repo.transaction(b'perffncachewrite')
1408 tr.addbackup(b'fncache')
1408 tr.addbackup(b'fncache')
1409 def d():
1409 def d():
1410 s.fncache._dirty = True
1410 s.fncache._dirty = True
1411 s.fncache.write(tr)
1411 s.fncache.write(tr)
1412 timer(d)
1412 timer(d)
1413 tr.close()
1413 tr.close()
1414 lock.release()
1414 lock.release()
1415 fm.end()
1415 fm.end()
1416
1416
1417 @command(b'perffncacheencode', formatteropts)
1417 @command(b'perffncacheencode', formatteropts)
1418 def perffncacheencode(ui, repo, **opts):
1418 def perffncacheencode(ui, repo, **opts):
1419 opts = _byteskwargs(opts)
1419 opts = _byteskwargs(opts)
1420 timer, fm = gettimer(ui, opts)
1420 timer, fm = gettimer(ui, opts)
1421 s = repo.store
1421 s = repo.store
1422 s.fncache._load()
1422 s.fncache._load()
1423 def d():
1423 def d():
1424 for p in s.fncache.entries:
1424 for p in s.fncache.entries:
1425 s.encode(p)
1425 s.encode(p)
1426 timer(d)
1426 timer(d)
1427 fm.end()
1427 fm.end()
1428
1428
1429 def _bdiffworker(q, blocks, xdiff, ready, done):
1429 def _bdiffworker(q, blocks, xdiff, ready, done):
1430 while not done.is_set():
1430 while not done.is_set():
1431 pair = q.get()
1431 pair = q.get()
1432 while pair is not None:
1432 while pair is not None:
1433 if xdiff:
1433 if xdiff:
1434 mdiff.bdiff.xdiffblocks(*pair)
1434 mdiff.bdiff.xdiffblocks(*pair)
1435 elif blocks:
1435 elif blocks:
1436 mdiff.bdiff.blocks(*pair)
1436 mdiff.bdiff.blocks(*pair)
1437 else:
1437 else:
1438 mdiff.textdiff(*pair)
1438 mdiff.textdiff(*pair)
1439 q.task_done()
1439 q.task_done()
1440 pair = q.get()
1440 pair = q.get()
1441 q.task_done() # for the None one
1441 q.task_done() # for the None one
1442 with ready:
1442 with ready:
1443 ready.wait()
1443 ready.wait()
1444
1444
1445 def _manifestrevision(repo, mnode):
1445 def _manifestrevision(repo, mnode):
1446 ml = repo.manifestlog
1446 ml = repo.manifestlog
1447
1447
1448 if util.safehasattr(ml, b'getstorage'):
1448 if util.safehasattr(ml, b'getstorage'):
1449 store = ml.getstorage(b'')
1449 store = ml.getstorage(b'')
1450 else:
1450 else:
1451 store = ml._revlog
1451 store = ml._revlog
1452
1452
1453 return store.revision(mnode)
1453 return store.revision(mnode)
1454
1454
1455 @command(b'perfbdiff', revlogopts + formatteropts + [
1455 @command(b'perfbdiff', revlogopts + formatteropts + [
1456 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1456 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1457 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1457 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1458 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1458 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1459 (b'', b'blocks', False, b'test computing diffs into blocks'),
1459 (b'', b'blocks', False, b'test computing diffs into blocks'),
1460 (b'', b'xdiff', False, b'use xdiff algorithm'),
1460 (b'', b'xdiff', False, b'use xdiff algorithm'),
1461 ],
1461 ],
1462
1462
1463 b'-c|-m|FILE REV')
1463 b'-c|-m|FILE REV')
1464 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1464 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1465 """benchmark a bdiff between revisions
1465 """benchmark a bdiff between revisions
1466
1466
1467 By default, benchmark a bdiff between its delta parent and itself.
1467 By default, benchmark a bdiff between its delta parent and itself.
1468
1468
1469 With ``--count``, benchmark bdiffs between delta parents and self for N
1469 With ``--count``, benchmark bdiffs between delta parents and self for N
1470 revisions starting at the specified revision.
1470 revisions starting at the specified revision.
1471
1471
1472 With ``--alldata``, assume the requested revision is a changeset and
1472 With ``--alldata``, assume the requested revision is a changeset and
1473 measure bdiffs for all changes related to that changeset (manifest
1473 measure bdiffs for all changes related to that changeset (manifest
1474 and filelogs).
1474 and filelogs).
1475 """
1475 """
1476 opts = _byteskwargs(opts)
1476 opts = _byteskwargs(opts)
1477
1477
1478 if opts[b'xdiff'] and not opts[b'blocks']:
1478 if opts[b'xdiff'] and not opts[b'blocks']:
1479 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1479 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1480
1480
1481 if opts[b'alldata']:
1481 if opts[b'alldata']:
1482 opts[b'changelog'] = True
1482 opts[b'changelog'] = True
1483
1483
1484 if opts.get(b'changelog') or opts.get(b'manifest'):
1484 if opts.get(b'changelog') or opts.get(b'manifest'):
1485 file_, rev = None, file_
1485 file_, rev = None, file_
1486 elif rev is None:
1486 elif rev is None:
1487 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1487 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1488
1488
1489 blocks = opts[b'blocks']
1489 blocks = opts[b'blocks']
1490 xdiff = opts[b'xdiff']
1490 xdiff = opts[b'xdiff']
1491 textpairs = []
1491 textpairs = []
1492
1492
1493 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1493 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1494
1494
1495 startrev = r.rev(r.lookup(rev))
1495 startrev = r.rev(r.lookup(rev))
1496 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1496 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1497 if opts[b'alldata']:
1497 if opts[b'alldata']:
1498 # Load revisions associated with changeset.
1498 # Load revisions associated with changeset.
1499 ctx = repo[rev]
1499 ctx = repo[rev]
1500 mtext = _manifestrevision(repo, ctx.manifestnode())
1500 mtext = _manifestrevision(repo, ctx.manifestnode())
1501 for pctx in ctx.parents():
1501 for pctx in ctx.parents():
1502 pman = _manifestrevision(repo, pctx.manifestnode())
1502 pman = _manifestrevision(repo, pctx.manifestnode())
1503 textpairs.append((pman, mtext))
1503 textpairs.append((pman, mtext))
1504
1504
1505 # Load filelog revisions by iterating manifest delta.
1505 # Load filelog revisions by iterating manifest delta.
1506 man = ctx.manifest()
1506 man = ctx.manifest()
1507 pman = ctx.p1().manifest()
1507 pman = ctx.p1().manifest()
1508 for filename, change in pman.diff(man).items():
1508 for filename, change in pman.diff(man).items():
1509 fctx = repo.file(filename)
1509 fctx = repo.file(filename)
1510 f1 = fctx.revision(change[0][0] or -1)
1510 f1 = fctx.revision(change[0][0] or -1)
1511 f2 = fctx.revision(change[1][0] or -1)
1511 f2 = fctx.revision(change[1][0] or -1)
1512 textpairs.append((f1, f2))
1512 textpairs.append((f1, f2))
1513 else:
1513 else:
1514 dp = r.deltaparent(rev)
1514 dp = r.deltaparent(rev)
1515 textpairs.append((r.revision(dp), r.revision(rev)))
1515 textpairs.append((r.revision(dp), r.revision(rev)))
1516
1516
1517 withthreads = threads > 0
1517 withthreads = threads > 0
1518 if not withthreads:
1518 if not withthreads:
1519 def d():
1519 def d():
1520 for pair in textpairs:
1520 for pair in textpairs:
1521 if xdiff:
1521 if xdiff:
1522 mdiff.bdiff.xdiffblocks(*pair)
1522 mdiff.bdiff.xdiffblocks(*pair)
1523 elif blocks:
1523 elif blocks:
1524 mdiff.bdiff.blocks(*pair)
1524 mdiff.bdiff.blocks(*pair)
1525 else:
1525 else:
1526 mdiff.textdiff(*pair)
1526 mdiff.textdiff(*pair)
1527 else:
1527 else:
1528 q = queue()
1528 q = queue()
1529 for i in _xrange(threads):
1529 for i in _xrange(threads):
1530 q.put(None)
1530 q.put(None)
1531 ready = threading.Condition()
1531 ready = threading.Condition()
1532 done = threading.Event()
1532 done = threading.Event()
1533 for i in _xrange(threads):
1533 for i in _xrange(threads):
1534 threading.Thread(target=_bdiffworker,
1534 threading.Thread(target=_bdiffworker,
1535 args=(q, blocks, xdiff, ready, done)).start()
1535 args=(q, blocks, xdiff, ready, done)).start()
1536 q.join()
1536 q.join()
1537 def d():
1537 def d():
1538 for pair in textpairs:
1538 for pair in textpairs:
1539 q.put(pair)
1539 q.put(pair)
1540 for i in _xrange(threads):
1540 for i in _xrange(threads):
1541 q.put(None)
1541 q.put(None)
1542 with ready:
1542 with ready:
1543 ready.notify_all()
1543 ready.notify_all()
1544 q.join()
1544 q.join()
1545 timer, fm = gettimer(ui, opts)
1545 timer, fm = gettimer(ui, opts)
1546 timer(d)
1546 timer(d)
1547 fm.end()
1547 fm.end()
1548
1548
1549 if withthreads:
1549 if withthreads:
1550 done.set()
1550 done.set()
1551 for i in _xrange(threads):
1551 for i in _xrange(threads):
1552 q.put(None)
1552 q.put(None)
1553 with ready:
1553 with ready:
1554 ready.notify_all()
1554 ready.notify_all()
1555
1555
1556 @command(b'perfunidiff', revlogopts + formatteropts + [
1556 @command(b'perfunidiff', revlogopts + formatteropts + [
1557 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1557 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1558 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1558 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1559 ], b'-c|-m|FILE REV')
1559 ], b'-c|-m|FILE REV')
1560 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1560 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1561 """benchmark a unified diff between revisions
1561 """benchmark a unified diff between revisions
1562
1562
1563 This doesn't include any copy tracing - it's just a unified diff
1563 This doesn't include any copy tracing - it's just a unified diff
1564 of the texts.
1564 of the texts.
1565
1565
1566 By default, benchmark a diff between its delta parent and itself.
1566 By default, benchmark a diff between its delta parent and itself.
1567
1567
1568 With ``--count``, benchmark diffs between delta parents and self for N
1568 With ``--count``, benchmark diffs between delta parents and self for N
1569 revisions starting at the specified revision.
1569 revisions starting at the specified revision.
1570
1570
1571 With ``--alldata``, assume the requested revision is a changeset and
1571 With ``--alldata``, assume the requested revision is a changeset and
1572 measure diffs for all changes related to that changeset (manifest
1572 measure diffs for all changes related to that changeset (manifest
1573 and filelogs).
1573 and filelogs).
1574 """
1574 """
1575 opts = _byteskwargs(opts)
1575 opts = _byteskwargs(opts)
1576 if opts[b'alldata']:
1576 if opts[b'alldata']:
1577 opts[b'changelog'] = True
1577 opts[b'changelog'] = True
1578
1578
1579 if opts.get(b'changelog') or opts.get(b'manifest'):
1579 if opts.get(b'changelog') or opts.get(b'manifest'):
1580 file_, rev = None, file_
1580 file_, rev = None, file_
1581 elif rev is None:
1581 elif rev is None:
1582 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1582 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1583
1583
1584 textpairs = []
1584 textpairs = []
1585
1585
1586 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1586 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1587
1587
1588 startrev = r.rev(r.lookup(rev))
1588 startrev = r.rev(r.lookup(rev))
1589 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1589 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1590 if opts[b'alldata']:
1590 if opts[b'alldata']:
1591 # Load revisions associated with changeset.
1591 # Load revisions associated with changeset.
1592 ctx = repo[rev]
1592 ctx = repo[rev]
1593 mtext = _manifestrevision(repo, ctx.manifestnode())
1593 mtext = _manifestrevision(repo, ctx.manifestnode())
1594 for pctx in ctx.parents():
1594 for pctx in ctx.parents():
1595 pman = _manifestrevision(repo, pctx.manifestnode())
1595 pman = _manifestrevision(repo, pctx.manifestnode())
1596 textpairs.append((pman, mtext))
1596 textpairs.append((pman, mtext))
1597
1597
1598 # Load filelog revisions by iterating manifest delta.
1598 # Load filelog revisions by iterating manifest delta.
1599 man = ctx.manifest()
1599 man = ctx.manifest()
1600 pman = ctx.p1().manifest()
1600 pman = ctx.p1().manifest()
1601 for filename, change in pman.diff(man).items():
1601 for filename, change in pman.diff(man).items():
1602 fctx = repo.file(filename)
1602 fctx = repo.file(filename)
1603 f1 = fctx.revision(change[0][0] or -1)
1603 f1 = fctx.revision(change[0][0] or -1)
1604 f2 = fctx.revision(change[1][0] or -1)
1604 f2 = fctx.revision(change[1][0] or -1)
1605 textpairs.append((f1, f2))
1605 textpairs.append((f1, f2))
1606 else:
1606 else:
1607 dp = r.deltaparent(rev)
1607 dp = r.deltaparent(rev)
1608 textpairs.append((r.revision(dp), r.revision(rev)))
1608 textpairs.append((r.revision(dp), r.revision(rev)))
1609
1609
1610 def d():
1610 def d():
1611 for left, right in textpairs:
1611 for left, right in textpairs:
1612 # The date strings don't matter, so we pass empty strings.
1612 # The date strings don't matter, so we pass empty strings.
1613 headerlines, hunks = mdiff.unidiff(
1613 headerlines, hunks = mdiff.unidiff(
1614 left, b'', right, b'', b'left', b'right', binary=False)
1614 left, b'', right, b'', b'left', b'right', binary=False)
1615 # consume iterators in roughly the way patch.py does
1615 # consume iterators in roughly the way patch.py does
1616 b'\n'.join(headerlines)
1616 b'\n'.join(headerlines)
1617 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1617 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1618 timer, fm = gettimer(ui, opts)
1618 timer, fm = gettimer(ui, opts)
1619 timer(d)
1619 timer(d)
1620 fm.end()
1620 fm.end()
1621
1621
1622 @command(b'perfdiffwd', formatteropts)
1622 @command(b'perfdiffwd', formatteropts)
1623 def perfdiffwd(ui, repo, **opts):
1623 def perfdiffwd(ui, repo, **opts):
1624 """Profile diff of working directory changes"""
1624 """Profile diff of working directory changes"""
1625 opts = _byteskwargs(opts)
1625 opts = _byteskwargs(opts)
1626 timer, fm = gettimer(ui, opts)
1626 timer, fm = gettimer(ui, opts)
1627 options = {
1627 options = {
1628 'w': 'ignore_all_space',
1628 'w': 'ignore_all_space',
1629 'b': 'ignore_space_change',
1629 'b': 'ignore_space_change',
1630 'B': 'ignore_blank_lines',
1630 'B': 'ignore_blank_lines',
1631 }
1631 }
1632
1632
1633 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1633 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1634 opts = dict((options[c], b'1') for c in diffopt)
1634 opts = dict((options[c], b'1') for c in diffopt)
1635 def d():
1635 def d():
1636 ui.pushbuffer()
1636 ui.pushbuffer()
1637 commands.diff(ui, repo, **opts)
1637 commands.diff(ui, repo, **opts)
1638 ui.popbuffer()
1638 ui.popbuffer()
1639 diffopt = diffopt.encode('ascii')
1639 diffopt = diffopt.encode('ascii')
1640 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1640 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1641 timer(d, title=title)
1641 timer(d, title=title)
1642 fm.end()
1642 fm.end()
1643
1643
1644 @command(b'perfrevlogindex', revlogopts + formatteropts,
1644 @command(b'perfrevlogindex', revlogopts + formatteropts,
1645 b'-c|-m|FILE')
1645 b'-c|-m|FILE')
1646 def perfrevlogindex(ui, repo, file_=None, **opts):
1646 def perfrevlogindex(ui, repo, file_=None, **opts):
1647 """Benchmark operations against a revlog index.
1647 """Benchmark operations against a revlog index.
1648
1648
1649 This tests constructing a revlog instance, reading index data,
1649 This tests constructing a revlog instance, reading index data,
1650 parsing index data, and performing various operations related to
1650 parsing index data, and performing various operations related to
1651 index data.
1651 index data.
1652 """
1652 """
1653
1653
1654 opts = _byteskwargs(opts)
1654 opts = _byteskwargs(opts)
1655
1655
1656 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1656 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1657
1657
1658 opener = getattr(rl, 'opener') # trick linter
1658 opener = getattr(rl, 'opener') # trick linter
1659 indexfile = rl.indexfile
1659 indexfile = rl.indexfile
1660 data = opener.read(indexfile)
1660 data = opener.read(indexfile)
1661
1661
1662 header = struct.unpack(b'>I', data[0:4])[0]
1662 header = struct.unpack(b'>I', data[0:4])[0]
1663 version = header & 0xFFFF
1663 version = header & 0xFFFF
1664 if version == 1:
1664 if version == 1:
1665 revlogio = revlog.revlogio()
1665 revlogio = revlog.revlogio()
1666 inline = header & (1 << 16)
1666 inline = header & (1 << 16)
1667 else:
1667 else:
1668 raise error.Abort((b'unsupported revlog version: %d') % version)
1668 raise error.Abort((b'unsupported revlog version: %d') % version)
1669
1669
1670 rllen = len(rl)
1670 rllen = len(rl)
1671
1671
1672 node0 = rl.node(0)
1672 node0 = rl.node(0)
1673 node25 = rl.node(rllen // 4)
1673 node25 = rl.node(rllen // 4)
1674 node50 = rl.node(rllen // 2)
1674 node50 = rl.node(rllen // 2)
1675 node75 = rl.node(rllen // 4 * 3)
1675 node75 = rl.node(rllen // 4 * 3)
1676 node100 = rl.node(rllen - 1)
1676 node100 = rl.node(rllen - 1)
1677
1677
1678 allrevs = range(rllen)
1678 allrevs = range(rllen)
1679 allrevsrev = list(reversed(allrevs))
1679 allrevsrev = list(reversed(allrevs))
1680 allnodes = [rl.node(rev) for rev in range(rllen)]
1680 allnodes = [rl.node(rev) for rev in range(rllen)]
1681 allnodesrev = list(reversed(allnodes))
1681 allnodesrev = list(reversed(allnodes))
1682
1682
1683 def constructor():
1683 def constructor():
1684 revlog.revlog(opener, indexfile)
1684 revlog.revlog(opener, indexfile)
1685
1685
1686 def read():
1686 def read():
1687 with opener(indexfile) as fh:
1687 with opener(indexfile) as fh:
1688 fh.read()
1688 fh.read()
1689
1689
1690 def parseindex():
1690 def parseindex():
1691 revlogio.parseindex(data, inline)
1691 revlogio.parseindex(data, inline)
1692
1692
1693 def getentry(revornode):
1693 def getentry(revornode):
1694 index = revlogio.parseindex(data, inline)[0]
1694 index = revlogio.parseindex(data, inline)[0]
1695 index[revornode]
1695 index[revornode]
1696
1696
1697 def getentries(revs, count=1):
1697 def getentries(revs, count=1):
1698 index = revlogio.parseindex(data, inline)[0]
1698 index = revlogio.parseindex(data, inline)[0]
1699
1699
1700 for i in range(count):
1700 for i in range(count):
1701 for rev in revs:
1701 for rev in revs:
1702 index[rev]
1702 index[rev]
1703
1703
1704 def resolvenode(node):
1704 def resolvenode(node):
1705 nodemap = revlogio.parseindex(data, inline)[1]
1705 nodemap = revlogio.parseindex(data, inline)[1]
1706 # This only works for the C code.
1706 # This only works for the C code.
1707 if nodemap is None:
1707 if nodemap is None:
1708 return
1708 return
1709
1709
1710 try:
1710 try:
1711 nodemap[node]
1711 nodemap[node]
1712 except error.RevlogError:
1712 except error.RevlogError:
1713 pass
1713 pass
1714
1714
1715 def resolvenodes(nodes, count=1):
1715 def resolvenodes(nodes, count=1):
1716 nodemap = revlogio.parseindex(data, inline)[1]
1716 nodemap = revlogio.parseindex(data, inline)[1]
1717 if nodemap is None:
1717 if nodemap is None:
1718 return
1718 return
1719
1719
1720 for i in range(count):
1720 for i in range(count):
1721 for node in nodes:
1721 for node in nodes:
1722 try:
1722 try:
1723 nodemap[node]
1723 nodemap[node]
1724 except error.RevlogError:
1724 except error.RevlogError:
1725 pass
1725 pass
1726
1726
1727 benches = [
1727 benches = [
1728 (constructor, b'revlog constructor'),
1728 (constructor, b'revlog constructor'),
1729 (read, b'read'),
1729 (read, b'read'),
1730 (parseindex, b'create index object'),
1730 (parseindex, b'create index object'),
1731 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1731 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1732 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1732 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1733 (lambda: resolvenode(node0), b'look up node at rev 0'),
1733 (lambda: resolvenode(node0), b'look up node at rev 0'),
1734 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1734 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1735 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1735 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1736 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1736 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1737 (lambda: resolvenode(node100), b'look up node at tip'),
1737 (lambda: resolvenode(node100), b'look up node at tip'),
1738 # 2x variation is to measure caching impact.
1738 # 2x variation is to measure caching impact.
1739 (lambda: resolvenodes(allnodes),
1739 (lambda: resolvenodes(allnodes),
1740 b'look up all nodes (forward)'),
1740 b'look up all nodes (forward)'),
1741 (lambda: resolvenodes(allnodes, 2),
1741 (lambda: resolvenodes(allnodes, 2),
1742 b'look up all nodes 2x (forward)'),
1742 b'look up all nodes 2x (forward)'),
1743 (lambda: resolvenodes(allnodesrev),
1743 (lambda: resolvenodes(allnodesrev),
1744 b'look up all nodes (reverse)'),
1744 b'look up all nodes (reverse)'),
1745 (lambda: resolvenodes(allnodesrev, 2),
1745 (lambda: resolvenodes(allnodesrev, 2),
1746 b'look up all nodes 2x (reverse)'),
1746 b'look up all nodes 2x (reverse)'),
1747 (lambda: getentries(allrevs),
1747 (lambda: getentries(allrevs),
1748 b'retrieve all index entries (forward)'),
1748 b'retrieve all index entries (forward)'),
1749 (lambda: getentries(allrevs, 2),
1749 (lambda: getentries(allrevs, 2),
1750 b'retrieve all index entries 2x (forward)'),
1750 b'retrieve all index entries 2x (forward)'),
1751 (lambda: getentries(allrevsrev),
1751 (lambda: getentries(allrevsrev),
1752 b'retrieve all index entries (reverse)'),
1752 b'retrieve all index entries (reverse)'),
1753 (lambda: getentries(allrevsrev, 2),
1753 (lambda: getentries(allrevsrev, 2),
1754 b'retrieve all index entries 2x (reverse)'),
1754 b'retrieve all index entries 2x (reverse)'),
1755 ]
1755 ]
1756
1756
1757 for fn, title in benches:
1757 for fn, title in benches:
1758 timer, fm = gettimer(ui, opts)
1758 timer, fm = gettimer(ui, opts)
1759 timer(fn, title=title)
1759 timer(fn, title=title)
1760 fm.end()
1760 fm.end()
1761
1761
1762 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1762 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1763 [(b'd', b'dist', 100, b'distance between the revisions'),
1763 [(b'd', b'dist', 100, b'distance between the revisions'),
1764 (b's', b'startrev', 0, b'revision to start reading at'),
1764 (b's', b'startrev', 0, b'revision to start reading at'),
1765 (b'', b'reverse', False, b'read in reverse')],
1765 (b'', b'reverse', False, b'read in reverse')],
1766 b'-c|-m|FILE')
1766 b'-c|-m|FILE')
1767 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1767 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1768 **opts):
1768 **opts):
1769 """Benchmark reading a series of revisions from a revlog.
1769 """Benchmark reading a series of revisions from a revlog.
1770
1770
1771 By default, we read every ``-d/--dist`` revision from 0 to tip of
1771 By default, we read every ``-d/--dist`` revision from 0 to tip of
1772 the specified revlog.
1772 the specified revlog.
1773
1773
1774 The start revision can be defined via ``-s/--startrev``.
1774 The start revision can be defined via ``-s/--startrev``.
1775 """
1775 """
1776 opts = _byteskwargs(opts)
1776 opts = _byteskwargs(opts)
1777
1777
1778 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1778 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1779 rllen = getlen(ui)(rl)
1779 rllen = getlen(ui)(rl)
1780
1780
1781 if startrev < 0:
1781 if startrev < 0:
1782 startrev = rllen + startrev
1782 startrev = rllen + startrev
1783
1783
1784 def d():
1784 def d():
1785 rl.clearcaches()
1785 rl.clearcaches()
1786
1786
1787 beginrev = startrev
1787 beginrev = startrev
1788 endrev = rllen
1788 endrev = rllen
1789 dist = opts[b'dist']
1789 dist = opts[b'dist']
1790
1790
1791 if reverse:
1791 if reverse:
1792 beginrev, endrev = endrev - 1, beginrev - 1
1792 beginrev, endrev = endrev - 1, beginrev - 1
1793 dist = -1 * dist
1793 dist = -1 * dist
1794
1794
1795 for x in _xrange(beginrev, endrev, dist):
1795 for x in _xrange(beginrev, endrev, dist):
1796 # Old revisions don't support passing int.
1796 # Old revisions don't support passing int.
1797 n = rl.node(x)
1797 n = rl.node(x)
1798 rl.revision(n)
1798 rl.revision(n)
1799
1799
1800 timer, fm = gettimer(ui, opts)
1800 timer, fm = gettimer(ui, opts)
1801 timer(d)
1801 timer(d)
1802 fm.end()
1802 fm.end()
1803
1803
1804 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1804 @command(b'perfrevlogwrite', revlogopts + formatteropts +
1805 [(b's', b'startrev', 1000, b'revision to start writing at'),
1805 [(b's', b'startrev', 1000, b'revision to start writing at'),
1806 (b'', b'stoprev', -1, b'last revision to write'),
1806 (b'', b'stoprev', -1, b'last revision to write'),
1807 (b'', b'count', 3, b'last revision to write'),
1807 (b'', b'count', 3, b'last revision to write'),
1808 (b'', b'details', False, b'print timing for every revisions tested'),
1808 (b'', b'details', False, b'print timing for every revisions tested'),
1809 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1809 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
1810 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1810 (b'', b'lazydeltabase', True, b'try the provided delta first'),
1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1812 ],
1812 ],
1813 b'-c|-m|FILE')
1813 b'-c|-m|FILE')
1814 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1814 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
1815 """Benchmark writing a series of revisions to a revlog.
1815 """Benchmark writing a series of revisions to a revlog.
1816
1816
1817 Possible source values are:
1817 Possible source values are:
1818 * `full`: add from a full text (default).
1818 * `full`: add from a full text (default).
1819 * `parent-1`: add from a delta to the first parent
1819 * `parent-1`: add from a delta to the first parent
1820 * `parent-2`: add from a delta to the second parent if it exists
1820 * `parent-2`: add from a delta to the second parent if it exists
1821 (use a delta from the first parent otherwise)
1821 (use a delta from the first parent otherwise)
1822 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1822 * `parent-smallest`: add from the smallest delta (either p1 or p2)
1823 * `storage`: add from the existing precomputed deltas
1823 * `storage`: add from the existing precomputed deltas
1824 """
1824 """
1825 opts = _byteskwargs(opts)
1825 opts = _byteskwargs(opts)
1826
1826
1827 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1827 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
1828 rllen = getlen(ui)(rl)
1828 rllen = getlen(ui)(rl)
1829 if startrev < 0:
1829 if startrev < 0:
1830 startrev = rllen + startrev
1830 startrev = rllen + startrev
1831 if stoprev < 0:
1831 if stoprev < 0:
1832 stoprev = rllen + stoprev
1832 stoprev = rllen + stoprev
1833
1833
1834 lazydeltabase = opts['lazydeltabase']
1834 lazydeltabase = opts['lazydeltabase']
1835 source = opts['source']
1835 source = opts['source']
1836 clearcaches = opts['clear_caches']
1836 clearcaches = opts['clear_caches']
1837 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1837 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
1838 b'storage')
1838 b'storage')
1839 if source not in validsource:
1839 if source not in validsource:
1840 raise error.Abort('invalid source type: %s' % source)
1840 raise error.Abort('invalid source type: %s' % source)
1841
1841
1842 ### actually gather results
1842 ### actually gather results
1843 count = opts['count']
1843 count = opts['count']
1844 if count <= 0:
1844 if count <= 0:
1845 raise error.Abort('invalide run count: %d' % count)
1845 raise error.Abort('invalide run count: %d' % count)
1846 allresults = []
1846 allresults = []
1847 for c in range(count):
1847 for c in range(count):
1848 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1848 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
1849 lazydeltabase=lazydeltabase,
1849 lazydeltabase=lazydeltabase,
1850 clearcaches=clearcaches)
1850 clearcaches=clearcaches)
1851 allresults.append(timing)
1851 allresults.append(timing)
1852
1852
1853 ### consolidate the results in a single list
1853 ### consolidate the results in a single list
1854 results = []
1854 results = []
1855 for idx, (rev, t) in enumerate(allresults[0]):
1855 for idx, (rev, t) in enumerate(allresults[0]):
1856 ts = [t]
1856 ts = [t]
1857 for other in allresults[1:]:
1857 for other in allresults[1:]:
1858 orev, ot = other[idx]
1858 orev, ot = other[idx]
1859 assert orev == rev
1859 assert orev == rev
1860 ts.append(ot)
1860 ts.append(ot)
1861 results.append((rev, ts))
1861 results.append((rev, ts))
1862 resultcount = len(results)
1862 resultcount = len(results)
1863
1863
1864 ### Compute and display relevant statistics
1864 ### Compute and display relevant statistics
1865
1865
1866 # get a formatter
1866 # get a formatter
1867 fm = ui.formatter(b'perf', opts)
1867 fm = ui.formatter(b'perf', opts)
1868 displayall = ui.configbool(b"perf", b"all-timing", False)
1868 displayall = ui.configbool(b"perf", b"all-timing", False)
1869
1869
1870 # print individual details if requested
1870 # print individual details if requested
1871 if opts['details']:
1871 if opts['details']:
1872 for idx, item in enumerate(results, 1):
1872 for idx, item in enumerate(results, 1):
1873 rev, data = item
1873 rev, data = item
1874 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1874 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
1875 formatone(fm, data, title=title, displayall=displayall)
1875 formatone(fm, data, title=title, displayall=displayall)
1876
1876
1877 # sorts results by median time
1877 # sorts results by median time
1878 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1878 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
1879 # list of (name, index) to display)
1879 # list of (name, index) to display)
1880 relevants = [
1880 relevants = [
1881 ("min", 0),
1881 ("min", 0),
1882 ("10%", resultcount * 10 // 100),
1882 ("10%", resultcount * 10 // 100),
1883 ("25%", resultcount * 25 // 100),
1883 ("25%", resultcount * 25 // 100),
1884 ("50%", resultcount * 70 // 100),
1884 ("50%", resultcount * 70 // 100),
1885 ("75%", resultcount * 75 // 100),
1885 ("75%", resultcount * 75 // 100),
1886 ("90%", resultcount * 90 // 100),
1886 ("90%", resultcount * 90 // 100),
1887 ("95%", resultcount * 95 // 100),
1887 ("95%", resultcount * 95 // 100),
1888 ("99%", resultcount * 99 // 100),
1888 ("99%", resultcount * 99 // 100),
1889 ("99.9%", resultcount * 999 // 1000),
1889 ("99.9%", resultcount * 999 // 1000),
1890 ("99.99%", resultcount * 9999 // 10000),
1890 ("99.99%", resultcount * 9999 // 10000),
1891 ("99.999%", resultcount * 99999 // 100000),
1891 ("99.999%", resultcount * 99999 // 100000),
1892 ("max", -1),
1892 ("max", -1),
1893 ]
1893 ]
1894 if not ui.quiet:
1894 if not ui.quiet:
1895 for name, idx in relevants:
1895 for name, idx in relevants:
1896 data = results[idx]
1896 data = results[idx]
1897 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1897 title = '%s of %d, rev %d' % (name, resultcount, data[0])
1898 formatone(fm, data[1], title=title, displayall=displayall)
1898 formatone(fm, data[1], title=title, displayall=displayall)
1899
1899
1900 # XXX summing that many float will not be very precise, we ignore this fact
1900 # XXX summing that many float will not be very precise, we ignore this fact
1901 # for now
1901 # for now
1902 totaltime = []
1902 totaltime = []
1903 for item in allresults:
1903 for item in allresults:
1904 totaltime.append((sum(x[1][0] for x in item),
1904 totaltime.append((sum(x[1][0] for x in item),
1905 sum(x[1][1] for x in item),
1905 sum(x[1][1] for x in item),
1906 sum(x[1][2] for x in item),)
1906 sum(x[1][2] for x in item),)
1907 )
1907 )
1908 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1908 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
1909 displayall=displayall)
1909 displayall=displayall)
1910 fm.end()
1910 fm.end()
1911
1911
1912 class _faketr(object):
1912 class _faketr(object):
1913 def add(s, x, y, z=None):
1913 def add(s, x, y, z=None):
1914 return None
1914 return None
1915
1915
1916 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1916 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
1917 lazydeltabase=True, clearcaches=True):
1917 lazydeltabase=True, clearcaches=True):
1918 timings = []
1918 timings = []
1919 tr = _faketr()
1919 tr = _faketr()
1920 with _temprevlog(ui, orig, startrev) as dest:
1920 with _temprevlog(ui, orig, startrev) as dest:
1921 dest._lazydeltabase = lazydeltabase
1921 dest._lazydeltabase = lazydeltabase
1922 revs = list(orig.revs(startrev, stoprev))
1922 revs = list(orig.revs(startrev, stoprev))
1923 total = len(revs)
1923 total = len(revs)
1924 topic = 'adding'
1924 topic = 'adding'
1925 if runidx is not None:
1925 if runidx is not None:
1926 topic += ' (run #%d)' % runidx
1926 topic += ' (run #%d)' % runidx
1927 # Support both old and new progress API
1927 # Support both old and new progress API
1928 if util.safehasattr(ui, 'makeprogress'):
1928 if util.safehasattr(ui, 'makeprogress'):
1929 progress = ui.makeprogress(topic, unit='revs', total=total)
1929 progress = ui.makeprogress(topic, unit='revs', total=total)
1930 def updateprogress(pos):
1930 def updateprogress(pos):
1931 progress.update(pos)
1931 progress.update(pos)
1932 def completeprogress():
1932 def completeprogress():
1933 progress.complete()
1933 progress.complete()
1934 else:
1934 else:
1935 def updateprogress(pos):
1935 def updateprogress(pos):
1936 ui.progress(topic, pos, unit='revs', total=total)
1936 ui.progress(topic, pos, unit='revs', total=total)
1937 def completeprogress():
1937 def completeprogress():
1938 ui.progress(topic, None, unit='revs', total=total)
1938 ui.progress(topic, None, unit='revs', total=total)
1939
1939
1940 for idx, rev in enumerate(revs):
1940 for idx, rev in enumerate(revs):
1941 updateprogress(idx)
1941 updateprogress(idx)
1942 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1942 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
1943 if clearcaches:
1943 if clearcaches:
1944 dest.index.clearcaches()
1944 dest.index.clearcaches()
1945 dest.clearcaches()
1945 dest.clearcaches()
1946 with timeone() as r:
1946 with timeone() as r:
1947 dest.addrawrevision(*addargs, **addkwargs)
1947 dest.addrawrevision(*addargs, **addkwargs)
1948 timings.append((rev, r[0]))
1948 timings.append((rev, r[0]))
1949 updateprogress(total)
1949 updateprogress(total)
1950 completeprogress()
1950 completeprogress()
1951 return timings
1951 return timings
1952
1952
1953 def _getrevisionseed(orig, rev, tr, source):
1953 def _getrevisionseed(orig, rev, tr, source):
1954 from mercurial.node import nullid
1954 from mercurial.node import nullid
1955
1955
1956 linkrev = orig.linkrev(rev)
1956 linkrev = orig.linkrev(rev)
1957 node = orig.node(rev)
1957 node = orig.node(rev)
1958 p1, p2 = orig.parents(node)
1958 p1, p2 = orig.parents(node)
1959 flags = orig.flags(rev)
1959 flags = orig.flags(rev)
1960 cachedelta = None
1960 cachedelta = None
1961 text = None
1961 text = None
1962
1962
1963 if source == b'full':
1963 if source == b'full':
1964 text = orig.revision(rev)
1964 text = orig.revision(rev)
1965 elif source == b'parent-1':
1965 elif source == b'parent-1':
1966 baserev = orig.rev(p1)
1966 baserev = orig.rev(p1)
1967 cachedelta = (baserev, orig.revdiff(p1, rev))
1967 cachedelta = (baserev, orig.revdiff(p1, rev))
1968 elif source == b'parent-2':
1968 elif source == b'parent-2':
1969 parent = p2
1969 parent = p2
1970 if p2 == nullid:
1970 if p2 == nullid:
1971 parent = p1
1971 parent = p1
1972 baserev = orig.rev(parent)
1972 baserev = orig.rev(parent)
1973 cachedelta = (baserev, orig.revdiff(parent, rev))
1973 cachedelta = (baserev, orig.revdiff(parent, rev))
1974 elif source == b'parent-smallest':
1974 elif source == b'parent-smallest':
1975 p1diff = orig.revdiff(p1, rev)
1975 p1diff = orig.revdiff(p1, rev)
1976 parent = p1
1976 parent = p1
1977 diff = p1diff
1977 diff = p1diff
1978 if p2 != nullid:
1978 if p2 != nullid:
1979 p2diff = orig.revdiff(p2, rev)
1979 p2diff = orig.revdiff(p2, rev)
1980 if len(p1diff) > len(p2diff):
1980 if len(p1diff) > len(p2diff):
1981 parent = p2
1981 parent = p2
1982 diff = p2diff
1982 diff = p2diff
1983 baserev = orig.rev(parent)
1983 baserev = orig.rev(parent)
1984 cachedelta = (baserev, diff)
1984 cachedelta = (baserev, diff)
1985 elif source == b'storage':
1985 elif source == b'storage':
1986 baserev = orig.deltaparent(rev)
1986 baserev = orig.deltaparent(rev)
1987 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1987 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
1988
1988
1989 return ((text, tr, linkrev, p1, p2),
1989 return ((text, tr, linkrev, p1, p2),
1990 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1990 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
1991
1991
1992 @contextlib.contextmanager
1992 @contextlib.contextmanager
1993 def _temprevlog(ui, orig, truncaterev):
1993 def _temprevlog(ui, orig, truncaterev):
1994 from mercurial import vfs as vfsmod
1994 from mercurial import vfs as vfsmod
1995
1995
1996 if orig._inline:
1996 if orig._inline:
1997 raise error.Abort('not supporting inline revlog (yet)')
1997 raise error.Abort('not supporting inline revlog (yet)')
1998
1998
1999 origindexpath = orig.opener.join(orig.indexfile)
1999 origindexpath = orig.opener.join(orig.indexfile)
2000 origdatapath = orig.opener.join(orig.datafile)
2000 origdatapath = orig.opener.join(orig.datafile)
2001 indexname = 'revlog.i'
2001 indexname = 'revlog.i'
2002 dataname = 'revlog.d'
2002 dataname = 'revlog.d'
2003
2003
2004 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2004 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2005 try:
2005 try:
2006 # copy the data file in a temporary directory
2006 # copy the data file in a temporary directory
2007 ui.debug('copying data in %s\n' % tmpdir)
2007 ui.debug('copying data in %s\n' % tmpdir)
2008 destindexpath = os.path.join(tmpdir, 'revlog.i')
2008 destindexpath = os.path.join(tmpdir, 'revlog.i')
2009 destdatapath = os.path.join(tmpdir, 'revlog.d')
2009 destdatapath = os.path.join(tmpdir, 'revlog.d')
2010 shutil.copyfile(origindexpath, destindexpath)
2010 shutil.copyfile(origindexpath, destindexpath)
2011 shutil.copyfile(origdatapath, destdatapath)
2011 shutil.copyfile(origdatapath, destdatapath)
2012
2012
2013 # remove the data we want to add again
2013 # remove the data we want to add again
2014 ui.debug('truncating data to be rewritten\n')
2014 ui.debug('truncating data to be rewritten\n')
2015 with open(destindexpath, 'ab') as index:
2015 with open(destindexpath, 'ab') as index:
2016 index.seek(0)
2016 index.seek(0)
2017 index.truncate(truncaterev * orig._io.size)
2017 index.truncate(truncaterev * orig._io.size)
2018 with open(destdatapath, 'ab') as data:
2018 with open(destdatapath, 'ab') as data:
2019 data.seek(0)
2019 data.seek(0)
2020 data.truncate(orig.start(truncaterev))
2020 data.truncate(orig.start(truncaterev))
2021
2021
2022 # instantiate a new revlog from the temporary copy
2022 # instantiate a new revlog from the temporary copy
2023 ui.debug('truncating adding to be rewritten\n')
2023 ui.debug('truncating adding to be rewritten\n')
2024 vfs = vfsmod.vfs(tmpdir)
2024 vfs = vfsmod.vfs(tmpdir)
2025 vfs.options = getattr(orig.opener, 'options', None)
2025 vfs.options = getattr(orig.opener, 'options', None)
2026
2026
2027 dest = revlog.revlog(vfs,
2027 dest = revlog.revlog(vfs,
2028 indexfile=indexname,
2028 indexfile=indexname,
2029 datafile=dataname)
2029 datafile=dataname)
2030 if dest._inline:
2030 if dest._inline:
2031 raise error.Abort('not supporting inline revlog (yet)')
2031 raise error.Abort('not supporting inline revlog (yet)')
2032 # make sure internals are initialized
2032 # make sure internals are initialized
2033 dest.revision(len(dest) - 1)
2033 dest.revision(len(dest) - 1)
2034 yield dest
2034 yield dest
2035 del dest, vfs
2035 del dest, vfs
2036 finally:
2036 finally:
2037 shutil.rmtree(tmpdir, True)
2037 shutil.rmtree(tmpdir, True)
2038
2038
2039 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2039 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2040 [(b'e', b'engines', b'', b'compression engines to use'),
2040 [(b'e', b'engines', b'', b'compression engines to use'),
2041 (b's', b'startrev', 0, b'revision to start at')],
2041 (b's', b'startrev', 0, b'revision to start at')],
2042 b'-c|-m|FILE')
2042 b'-c|-m|FILE')
2043 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2043 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2044 """Benchmark operations on revlog chunks.
2044 """Benchmark operations on revlog chunks.
2045
2045
2046 Logically, each revlog is a collection of fulltext revisions. However,
2046 Logically, each revlog is a collection of fulltext revisions. However,
2047 stored within each revlog are "chunks" of possibly compressed data. This
2047 stored within each revlog are "chunks" of possibly compressed data. This
2048 data needs to be read and decompressed or compressed and written.
2048 data needs to be read and decompressed or compressed and written.
2049
2049
2050 This command measures the time it takes to read+decompress and recompress
2050 This command measures the time it takes to read+decompress and recompress
2051 chunks in a revlog. It effectively isolates I/O and compression performance.
2051 chunks in a revlog. It effectively isolates I/O and compression performance.
2052 For measurements of higher-level operations like resolving revisions,
2052 For measurements of higher-level operations like resolving revisions,
2053 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2053 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2054 """
2054 """
2055 opts = _byteskwargs(opts)
2055 opts = _byteskwargs(opts)
2056
2056
2057 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2057 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2058
2058
2059 # _chunkraw was renamed to _getsegmentforrevs.
2059 # _chunkraw was renamed to _getsegmentforrevs.
2060 try:
2060 try:
2061 segmentforrevs = rl._getsegmentforrevs
2061 segmentforrevs = rl._getsegmentforrevs
2062 except AttributeError:
2062 except AttributeError:
2063 segmentforrevs = rl._chunkraw
2063 segmentforrevs = rl._chunkraw
2064
2064
2065 # Verify engines argument.
2065 # Verify engines argument.
2066 if engines:
2066 if engines:
2067 engines = set(e.strip() for e in engines.split(b','))
2067 engines = set(e.strip() for e in engines.split(b','))
2068 for engine in engines:
2068 for engine in engines:
2069 try:
2069 try:
2070 util.compressionengines[engine]
2070 util.compressionengines[engine]
2071 except KeyError:
2071 except KeyError:
2072 raise error.Abort(b'unknown compression engine: %s' % engine)
2072 raise error.Abort(b'unknown compression engine: %s' % engine)
2073 else:
2073 else:
2074 engines = []
2074 engines = []
2075 for e in util.compengines:
2075 for e in util.compengines:
2076 engine = util.compengines[e]
2076 engine = util.compengines[e]
2077 try:
2077 try:
2078 if engine.available():
2078 if engine.available():
2079 engine.revlogcompressor().compress(b'dummy')
2079 engine.revlogcompressor().compress(b'dummy')
2080 engines.append(e)
2080 engines.append(e)
2081 except NotImplementedError:
2081 except NotImplementedError:
2082 pass
2082 pass
2083
2083
2084 revs = list(rl.revs(startrev, len(rl) - 1))
2084 revs = list(rl.revs(startrev, len(rl) - 1))
2085
2085
2086 def rlfh(rl):
2086 def rlfh(rl):
2087 if rl._inline:
2087 if rl._inline:
2088 return getsvfs(repo)(rl.indexfile)
2088 return getsvfs(repo)(rl.indexfile)
2089 else:
2089 else:
2090 return getsvfs(repo)(rl.datafile)
2090 return getsvfs(repo)(rl.datafile)
2091
2091
2092 def doread():
2092 def doread():
2093 rl.clearcaches()
2093 rl.clearcaches()
2094 for rev in revs:
2094 for rev in revs:
2095 segmentforrevs(rev, rev)
2095 segmentforrevs(rev, rev)
2096
2096
2097 def doreadcachedfh():
2097 def doreadcachedfh():
2098 rl.clearcaches()
2098 rl.clearcaches()
2099 fh = rlfh(rl)
2099 fh = rlfh(rl)
2100 for rev in revs:
2100 for rev in revs:
2101 segmentforrevs(rev, rev, df=fh)
2101 segmentforrevs(rev, rev, df=fh)
2102
2102
2103 def doreadbatch():
2103 def doreadbatch():
2104 rl.clearcaches()
2104 rl.clearcaches()
2105 segmentforrevs(revs[0], revs[-1])
2105 segmentforrevs(revs[0], revs[-1])
2106
2106
2107 def doreadbatchcachedfh():
2107 def doreadbatchcachedfh():
2108 rl.clearcaches()
2108 rl.clearcaches()
2109 fh = rlfh(rl)
2109 fh = rlfh(rl)
2110 segmentforrevs(revs[0], revs[-1], df=fh)
2110 segmentforrevs(revs[0], revs[-1], df=fh)
2111
2111
2112 def dochunk():
2112 def dochunk():
2113 rl.clearcaches()
2113 rl.clearcaches()
2114 fh = rlfh(rl)
2114 fh = rlfh(rl)
2115 for rev in revs:
2115 for rev in revs:
2116 rl._chunk(rev, df=fh)
2116 rl._chunk(rev, df=fh)
2117
2117
2118 chunks = [None]
2118 chunks = [None]
2119
2119
2120 def dochunkbatch():
2120 def dochunkbatch():
2121 rl.clearcaches()
2121 rl.clearcaches()
2122 fh = rlfh(rl)
2122 fh = rlfh(rl)
2123 # Save chunks as a side-effect.
2123 # Save chunks as a side-effect.
2124 chunks[0] = rl._chunks(revs, df=fh)
2124 chunks[0] = rl._chunks(revs, df=fh)
2125
2125
2126 def docompress(compressor):
2126 def docompress(compressor):
2127 rl.clearcaches()
2127 rl.clearcaches()
2128
2128
2129 try:
2129 try:
2130 # Swap in the requested compression engine.
2130 # Swap in the requested compression engine.
2131 oldcompressor = rl._compressor
2131 oldcompressor = rl._compressor
2132 rl._compressor = compressor
2132 rl._compressor = compressor
2133 for chunk in chunks[0]:
2133 for chunk in chunks[0]:
2134 rl.compress(chunk)
2134 rl.compress(chunk)
2135 finally:
2135 finally:
2136 rl._compressor = oldcompressor
2136 rl._compressor = oldcompressor
2137
2137
2138 benches = [
2138 benches = [
2139 (lambda: doread(), b'read'),
2139 (lambda: doread(), b'read'),
2140 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2140 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2141 (lambda: doreadbatch(), b'read batch'),
2141 (lambda: doreadbatch(), b'read batch'),
2142 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2142 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2143 (lambda: dochunk(), b'chunk'),
2143 (lambda: dochunk(), b'chunk'),
2144 (lambda: dochunkbatch(), b'chunk batch'),
2144 (lambda: dochunkbatch(), b'chunk batch'),
2145 ]
2145 ]
2146
2146
2147 for engine in sorted(engines):
2147 for engine in sorted(engines):
2148 compressor = util.compengines[engine].revlogcompressor()
2148 compressor = util.compengines[engine].revlogcompressor()
2149 benches.append((functools.partial(docompress, compressor),
2149 benches.append((functools.partial(docompress, compressor),
2150 b'compress w/ %s' % engine))
2150 b'compress w/ %s' % engine))
2151
2151
2152 for fn, title in benches:
2152 for fn, title in benches:
2153 timer, fm = gettimer(ui, opts)
2153 timer, fm = gettimer(ui, opts)
2154 timer(fn, title=title)
2154 timer(fn, title=title)
2155 fm.end()
2155 fm.end()
2156
2156
2157 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2157 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2158 [(b'', b'cache', False, b'use caches instead of clearing')],
2158 [(b'', b'cache', False, b'use caches instead of clearing')],
2159 b'-c|-m|FILE REV')
2159 b'-c|-m|FILE REV')
2160 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2160 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2161 """Benchmark obtaining a revlog revision.
2161 """Benchmark obtaining a revlog revision.
2162
2162
2163 Obtaining a revlog revision consists of roughly the following steps:
2163 Obtaining a revlog revision consists of roughly the following steps:
2164
2164
2165 1. Compute the delta chain
2165 1. Compute the delta chain
2166 2. Slice the delta chain if applicable
2166 2. Slice the delta chain if applicable
2167 3. Obtain the raw chunks for that delta chain
2167 3. Obtain the raw chunks for that delta chain
2168 4. Decompress each raw chunk
2168 4. Decompress each raw chunk
2169 5. Apply binary patches to obtain fulltext
2169 5. Apply binary patches to obtain fulltext
2170 6. Verify hash of fulltext
2170 6. Verify hash of fulltext
2171
2171
2172 This command measures the time spent in each of these phases.
2172 This command measures the time spent in each of these phases.
2173 """
2173 """
2174 opts = _byteskwargs(opts)
2174 opts = _byteskwargs(opts)
2175
2175
2176 if opts.get(b'changelog') or opts.get(b'manifest'):
2176 if opts.get(b'changelog') or opts.get(b'manifest'):
2177 file_, rev = None, file_
2177 file_, rev = None, file_
2178 elif rev is None:
2178 elif rev is None:
2179 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2179 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2180
2180
2181 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2181 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2182
2182
2183 # _chunkraw was renamed to _getsegmentforrevs.
2183 # _chunkraw was renamed to _getsegmentforrevs.
2184 try:
2184 try:
2185 segmentforrevs = r._getsegmentforrevs
2185 segmentforrevs = r._getsegmentforrevs
2186 except AttributeError:
2186 except AttributeError:
2187 segmentforrevs = r._chunkraw
2187 segmentforrevs = r._chunkraw
2188
2188
2189 node = r.lookup(rev)
2189 node = r.lookup(rev)
2190 rev = r.rev(node)
2190 rev = r.rev(node)
2191
2191
2192 def getrawchunks(data, chain):
2192 def getrawchunks(data, chain):
2193 start = r.start
2193 start = r.start
2194 length = r.length
2194 length = r.length
2195 inline = r._inline
2195 inline = r._inline
2196 iosize = r._io.size
2196 iosize = r._io.size
2197 buffer = util.buffer
2197 buffer = util.buffer
2198
2198
2199 chunks = []
2199 chunks = []
2200 ladd = chunks.append
2200 ladd = chunks.append
2201 for idx, item in enumerate(chain):
2201 for idx, item in enumerate(chain):
2202 offset = start(item[0])
2202 offset = start(item[0])
2203 bits = data[idx]
2203 bits = data[idx]
2204 for rev in item:
2204 for rev in item:
2205 chunkstart = start(rev)
2205 chunkstart = start(rev)
2206 if inline:
2206 if inline:
2207 chunkstart += (rev + 1) * iosize
2207 chunkstart += (rev + 1) * iosize
2208 chunklength = length(rev)
2208 chunklength = length(rev)
2209 ladd(buffer(bits, chunkstart - offset, chunklength))
2209 ladd(buffer(bits, chunkstart - offset, chunklength))
2210
2210
2211 return chunks
2211 return chunks
2212
2212
2213 def dodeltachain(rev):
2213 def dodeltachain(rev):
2214 if not cache:
2214 if not cache:
2215 r.clearcaches()
2215 r.clearcaches()
2216 r._deltachain(rev)
2216 r._deltachain(rev)
2217
2217
2218 def doread(chain):
2218 def doread(chain):
2219 if not cache:
2219 if not cache:
2220 r.clearcaches()
2220 r.clearcaches()
2221 for item in slicedchain:
2221 for item in slicedchain:
2222 segmentforrevs(item[0], item[-1])
2222 segmentforrevs(item[0], item[-1])
2223
2223
2224 def doslice(r, chain, size):
2224 def doslice(r, chain, size):
2225 for s in slicechunk(r, chain, targetsize=size):
2225 for s in slicechunk(r, chain, targetsize=size):
2226 pass
2226 pass
2227
2227
2228 def dorawchunks(data, chain):
2228 def dorawchunks(data, chain):
2229 if not cache:
2229 if not cache:
2230 r.clearcaches()
2230 r.clearcaches()
2231 getrawchunks(data, chain)
2231 getrawchunks(data, chain)
2232
2232
2233 def dodecompress(chunks):
2233 def dodecompress(chunks):
2234 decomp = r.decompress
2234 decomp = r.decompress
2235 for chunk in chunks:
2235 for chunk in chunks:
2236 decomp(chunk)
2236 decomp(chunk)
2237
2237
2238 def dopatch(text, bins):
2238 def dopatch(text, bins):
2239 if not cache:
2239 if not cache:
2240 r.clearcaches()
2240 r.clearcaches()
2241 mdiff.patches(text, bins)
2241 mdiff.patches(text, bins)
2242
2242
2243 def dohash(text):
2243 def dohash(text):
2244 if not cache:
2244 if not cache:
2245 r.clearcaches()
2245 r.clearcaches()
2246 r.checkhash(text, node, rev=rev)
2246 r.checkhash(text, node, rev=rev)
2247
2247
2248 def dorevision():
2248 def dorevision():
2249 if not cache:
2249 if not cache:
2250 r.clearcaches()
2250 r.clearcaches()
2251 r.revision(node)
2251 r.revision(node)
2252
2252
2253 try:
2253 try:
2254 from mercurial.revlogutils.deltas import slicechunk
2254 from mercurial.revlogutils.deltas import slicechunk
2255 except ImportError:
2255 except ImportError:
2256 slicechunk = getattr(revlog, '_slicechunk', None)
2256 slicechunk = getattr(revlog, '_slicechunk', None)
2257
2257
2258 size = r.length(rev)
2258 size = r.length(rev)
2259 chain = r._deltachain(rev)[0]
2259 chain = r._deltachain(rev)[0]
2260 if not getattr(r, '_withsparseread', False):
2260 if not getattr(r, '_withsparseread', False):
2261 slicedchain = (chain,)
2261 slicedchain = (chain,)
2262 else:
2262 else:
2263 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2263 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2264 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2264 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2265 rawchunks = getrawchunks(data, slicedchain)
2265 rawchunks = getrawchunks(data, slicedchain)
2266 bins = r._chunks(chain)
2266 bins = r._chunks(chain)
2267 text = bytes(bins[0])
2267 text = bytes(bins[0])
2268 bins = bins[1:]
2268 bins = bins[1:]
2269 text = mdiff.patches(text, bins)
2269 text = mdiff.patches(text, bins)
2270
2270
2271 benches = [
2271 benches = [
2272 (lambda: dorevision(), b'full'),
2272 (lambda: dorevision(), b'full'),
2273 (lambda: dodeltachain(rev), b'deltachain'),
2273 (lambda: dodeltachain(rev), b'deltachain'),
2274 (lambda: doread(chain), b'read'),
2274 (lambda: doread(chain), b'read'),
2275 ]
2275 ]
2276
2276
2277 if getattr(r, '_withsparseread', False):
2277 if getattr(r, '_withsparseread', False):
2278 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2278 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2279 benches.append(slicing)
2279 benches.append(slicing)
2280
2280
2281 benches.extend([
2281 benches.extend([
2282 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2282 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2283 (lambda: dodecompress(rawchunks), b'decompress'),
2283 (lambda: dodecompress(rawchunks), b'decompress'),
2284 (lambda: dopatch(text, bins), b'patch'),
2284 (lambda: dopatch(text, bins), b'patch'),
2285 (lambda: dohash(text), b'hash'),
2285 (lambda: dohash(text), b'hash'),
2286 ])
2286 ])
2287
2287
2288 timer, fm = gettimer(ui, opts)
2288 timer, fm = gettimer(ui, opts)
2289 for fn, title in benches:
2289 for fn, title in benches:
2290 timer(fn, title=title)
2290 timer(fn, title=title)
2291 fm.end()
2291 fm.end()
2292
2292
2293 @command(b'perfrevset',
2293 @command(b'perfrevset',
2294 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2294 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2295 (b'', b'contexts', False, b'obtain changectx for each revision')]
2295 (b'', b'contexts', False, b'obtain changectx for each revision')]
2296 + formatteropts, b"REVSET")
2296 + formatteropts, b"REVSET")
2297 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2297 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2298 """benchmark the execution time of a revset
2298 """benchmark the execution time of a revset
2299
2299
2300 Use the --clean option if need to evaluate the impact of build volatile
2300 Use the --clean option if need to evaluate the impact of build volatile
2301 revisions set cache on the revset execution. Volatile cache hold filtered
2301 revisions set cache on the revset execution. Volatile cache hold filtered
2302 and obsolete related cache."""
2302 and obsolete related cache."""
2303 opts = _byteskwargs(opts)
2303 opts = _byteskwargs(opts)
2304
2304
2305 timer, fm = gettimer(ui, opts)
2305 timer, fm = gettimer(ui, opts)
2306 def d():
2306 def d():
2307 if clear:
2307 if clear:
2308 repo.invalidatevolatilesets()
2308 repo.invalidatevolatilesets()
2309 if contexts:
2309 if contexts:
2310 for ctx in repo.set(expr): pass
2310 for ctx in repo.set(expr): pass
2311 else:
2311 else:
2312 for r in repo.revs(expr): pass
2312 for r in repo.revs(expr): pass
2313 timer(d)
2313 timer(d)
2314 fm.end()
2314 fm.end()
2315
2315
2316 @command(b'perfvolatilesets',
2316 @command(b'perfvolatilesets',
2317 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2317 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2318 ] + formatteropts)
2318 ] + formatteropts)
2319 def perfvolatilesets(ui, repo, *names, **opts):
2319 def perfvolatilesets(ui, repo, *names, **opts):
2320 """benchmark the computation of various volatile set
2320 """benchmark the computation of various volatile set
2321
2321
2322 Volatile set computes element related to filtering and obsolescence."""
2322 Volatile set computes element related to filtering and obsolescence."""
2323 opts = _byteskwargs(opts)
2323 opts = _byteskwargs(opts)
2324 timer, fm = gettimer(ui, opts)
2324 timer, fm = gettimer(ui, opts)
2325 repo = repo.unfiltered()
2325 repo = repo.unfiltered()
2326
2326
2327 def getobs(name):
2327 def getobs(name):
2328 def d():
2328 def d():
2329 repo.invalidatevolatilesets()
2329 repo.invalidatevolatilesets()
2330 if opts[b'clear_obsstore']:
2330 if opts[b'clear_obsstore']:
2331 clearfilecache(repo, b'obsstore')
2331 clearfilecache(repo, b'obsstore')
2332 obsolete.getrevs(repo, name)
2332 obsolete.getrevs(repo, name)
2333 return d
2333 return d
2334
2334
2335 allobs = sorted(obsolete.cachefuncs)
2335 allobs = sorted(obsolete.cachefuncs)
2336 if names:
2336 if names:
2337 allobs = [n for n in allobs if n in names]
2337 allobs = [n for n in allobs if n in names]
2338
2338
2339 for name in allobs:
2339 for name in allobs:
2340 timer(getobs(name), title=name)
2340 timer(getobs(name), title=name)
2341
2341
2342 def getfiltered(name):
2342 def getfiltered(name):
2343 def d():
2343 def d():
2344 repo.invalidatevolatilesets()
2344 repo.invalidatevolatilesets()
2345 if opts[b'clear_obsstore']:
2345 if opts[b'clear_obsstore']:
2346 clearfilecache(repo, b'obsstore')
2346 clearfilecache(repo, b'obsstore')
2347 repoview.filterrevs(repo, name)
2347 repoview.filterrevs(repo, name)
2348 return d
2348 return d
2349
2349
2350 allfilter = sorted(repoview.filtertable)
2350 allfilter = sorted(repoview.filtertable)
2351 if names:
2351 if names:
2352 allfilter = [n for n in allfilter if n in names]
2352 allfilter = [n for n in allfilter if n in names]
2353
2353
2354 for name in allfilter:
2354 for name in allfilter:
2355 timer(getfiltered(name), title=name)
2355 timer(getfiltered(name), title=name)
2356 fm.end()
2356 fm.end()
2357
2357
2358 @command(b'perfbranchmap',
2358 @command(b'perfbranchmap',
2359 [(b'f', b'full', False,
2359 [(b'f', b'full', False,
2360 b'Includes build time of subset'),
2360 b'Includes build time of subset'),
2361 (b'', b'clear-revbranch', False,
2361 (b'', b'clear-revbranch', False,
2362 b'purge the revbranch cache between computation'),
2362 b'purge the revbranch cache between computation'),
2363 ] + formatteropts)
2363 ] + formatteropts)
2364 def perfbranchmap(ui, repo, *filternames, **opts):
2364 def perfbranchmap(ui, repo, *filternames, **opts):
2365 """benchmark the update of a branchmap
2365 """benchmark the update of a branchmap
2366
2366
2367 This benchmarks the full repo.branchmap() call with read and write disabled
2367 This benchmarks the full repo.branchmap() call with read and write disabled
2368 """
2368 """
2369 opts = _byteskwargs(opts)
2369 opts = _byteskwargs(opts)
2370 full = opts.get(b"full", False)
2370 full = opts.get(b"full", False)
2371 clear_revbranch = opts.get(b"clear_revbranch", False)
2371 clear_revbranch = opts.get(b"clear_revbranch", False)
2372 timer, fm = gettimer(ui, opts)
2372 timer, fm = gettimer(ui, opts)
2373 def getbranchmap(filtername):
2373 def getbranchmap(filtername):
2374 """generate a benchmark function for the filtername"""
2374 """generate a benchmark function for the filtername"""
2375 if filtername is None:
2375 if filtername is None:
2376 view = repo
2376 view = repo
2377 else:
2377 else:
2378 view = repo.filtered(filtername)
2378 view = repo.filtered(filtername)
2379 if util.safehasattr(view._branchcaches, '_per_filter'):
2380 filtered = view._branchcaches._per_filter
2381 else:
2382 # older versions
2383 filtered = view._branchcaches
2379 def d():
2384 def d():
2380 if clear_revbranch:
2385 if clear_revbranch:
2381 repo.revbranchcache()._clear()
2386 repo.revbranchcache()._clear()
2382 if full:
2387 if full:
2383 view._branchcaches.clear()
2388 view._branchcaches.clear()
2384 else:
2389 else:
2385 view._branchcaches.pop(filtername, None)
2390 filtered.pop(filtername, None)
2386 view.branchmap()
2391 view.branchmap()
2387 return d
2392 return d
2388 # add filter in smaller subset to bigger subset
2393 # add filter in smaller subset to bigger subset
2389 possiblefilters = set(repoview.filtertable)
2394 possiblefilters = set(repoview.filtertable)
2390 if filternames:
2395 if filternames:
2391 possiblefilters &= set(filternames)
2396 possiblefilters &= set(filternames)
2392 subsettable = getbranchmapsubsettable()
2397 subsettable = getbranchmapsubsettable()
2393 allfilters = []
2398 allfilters = []
2394 while possiblefilters:
2399 while possiblefilters:
2395 for name in possiblefilters:
2400 for name in possiblefilters:
2396 subset = subsettable.get(name)
2401 subset = subsettable.get(name)
2397 if subset not in possiblefilters:
2402 if subset not in possiblefilters:
2398 break
2403 break
2399 else:
2404 else:
2400 assert False, b'subset cycle %s!' % possiblefilters
2405 assert False, b'subset cycle %s!' % possiblefilters
2401 allfilters.append(name)
2406 allfilters.append(name)
2402 possiblefilters.remove(name)
2407 possiblefilters.remove(name)
2403
2408
2404 # warm the cache
2409 # warm the cache
2405 if not full:
2410 if not full:
2406 for name in allfilters:
2411 for name in allfilters:
2407 repo.filtered(name).branchmap()
2412 repo.filtered(name).branchmap()
2408 if not filternames or b'unfiltered' in filternames:
2413 if not filternames or b'unfiltered' in filternames:
2409 # add unfiltered
2414 # add unfiltered
2410 allfilters.append(None)
2415 allfilters.append(None)
2411
2416
2412 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2417 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2413 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2418 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2414 branchcacheread.set(classmethod(lambda *args: None))
2419 branchcacheread.set(classmethod(lambda *args: None))
2415 else:
2420 else:
2416 # older versions
2421 # older versions
2417 branchcacheread = safeattrsetter(branchmap, b'read')
2422 branchcacheread = safeattrsetter(branchmap, b'read')
2418 branchcacheread.set(lambda *args: None)
2423 branchcacheread.set(lambda *args: None)
2419 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2424 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2420 branchcachewrite.set(lambda *args: None)
2425 branchcachewrite.set(lambda *args: None)
2421 try:
2426 try:
2422 for name in allfilters:
2427 for name in allfilters:
2423 printname = name
2428 printname = name
2424 if name is None:
2429 if name is None:
2425 printname = b'unfiltered'
2430 printname = b'unfiltered'
2426 timer(getbranchmap(name), title=str(printname))
2431 timer(getbranchmap(name), title=str(printname))
2427 finally:
2432 finally:
2428 branchcacheread.restore()
2433 branchcacheread.restore()
2429 branchcachewrite.restore()
2434 branchcachewrite.restore()
2430 fm.end()
2435 fm.end()
2431
2436
2432 @command(b'perfbranchmapupdate', [
2437 @command(b'perfbranchmapupdate', [
2433 (b'', b'base', [], b'subset of revision to start from'),
2438 (b'', b'base', [], b'subset of revision to start from'),
2434 (b'', b'target', [], b'subset of revision to end with'),
2439 (b'', b'target', [], b'subset of revision to end with'),
2435 (b'', b'clear-caches', False, b'clear cache between each runs')
2440 (b'', b'clear-caches', False, b'clear cache between each runs')
2436 ] + formatteropts)
2441 ] + formatteropts)
2437 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2442 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2438 """benchmark branchmap update from for <base> revs to <target> revs
2443 """benchmark branchmap update from for <base> revs to <target> revs
2439
2444
2440 If `--clear-caches` is passed, the following items will be reset before
2445 If `--clear-caches` is passed, the following items will be reset before
2441 each update:
2446 each update:
2442 * the changelog instance and associated indexes
2447 * the changelog instance and associated indexes
2443 * the rev-branch-cache instance
2448 * the rev-branch-cache instance
2444
2449
2445 Examples:
2450 Examples:
2446
2451
2447 # update for the one last revision
2452 # update for the one last revision
2448 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2453 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2449
2454
2450 $ update for change coming with a new branch
2455 $ update for change coming with a new branch
2451 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2456 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2452 """
2457 """
2453 from mercurial import branchmap
2458 from mercurial import branchmap
2454 from mercurial import repoview
2459 from mercurial import repoview
2455 opts = _byteskwargs(opts)
2460 opts = _byteskwargs(opts)
2456 timer, fm = gettimer(ui, opts)
2461 timer, fm = gettimer(ui, opts)
2457 clearcaches = opts[b'clear_caches']
2462 clearcaches = opts[b'clear_caches']
2458 unfi = repo.unfiltered()
2463 unfi = repo.unfiltered()
2459 x = [None] # used to pass data between closure
2464 x = [None] # used to pass data between closure
2460
2465
2461 # we use a `list` here to avoid possible side effect from smartset
2466 # we use a `list` here to avoid possible side effect from smartset
2462 baserevs = list(scmutil.revrange(repo, base))
2467 baserevs = list(scmutil.revrange(repo, base))
2463 targetrevs = list(scmutil.revrange(repo, target))
2468 targetrevs = list(scmutil.revrange(repo, target))
2464 if not baserevs:
2469 if not baserevs:
2465 raise error.Abort(b'no revisions selected for --base')
2470 raise error.Abort(b'no revisions selected for --base')
2466 if not targetrevs:
2471 if not targetrevs:
2467 raise error.Abort(b'no revisions selected for --target')
2472 raise error.Abort(b'no revisions selected for --target')
2468
2473
2469 # make sure the target branchmap also contains the one in the base
2474 # make sure the target branchmap also contains the one in the base
2470 targetrevs = list(set(baserevs) | set(targetrevs))
2475 targetrevs = list(set(baserevs) | set(targetrevs))
2471 targetrevs.sort()
2476 targetrevs.sort()
2472
2477
2473 cl = repo.changelog
2478 cl = repo.changelog
2474 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2479 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2475 allbaserevs.sort()
2480 allbaserevs.sort()
2476 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2481 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2477
2482
2478 newrevs = list(alltargetrevs.difference(allbaserevs))
2483 newrevs = list(alltargetrevs.difference(allbaserevs))
2479 newrevs.sort()
2484 newrevs.sort()
2480
2485
2481 allrevs = frozenset(unfi.changelog.revs())
2486 allrevs = frozenset(unfi.changelog.revs())
2482 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2487 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2483 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2488 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2484
2489
2485 def basefilter(repo, visibilityexceptions=None):
2490 def basefilter(repo, visibilityexceptions=None):
2486 return basefilterrevs
2491 return basefilterrevs
2487
2492
2488 def targetfilter(repo, visibilityexceptions=None):
2493 def targetfilter(repo, visibilityexceptions=None):
2489 return targetfilterrevs
2494 return targetfilterrevs
2490
2495
2491 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2496 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2492 ui.status(msg % (len(allbaserevs), len(newrevs)))
2497 ui.status(msg % (len(allbaserevs), len(newrevs)))
2493 if targetfilterrevs:
2498 if targetfilterrevs:
2494 msg = b'(%d revisions still filtered)\n'
2499 msg = b'(%d revisions still filtered)\n'
2495 ui.status(msg % len(targetfilterrevs))
2500 ui.status(msg % len(targetfilterrevs))
2496
2501
2497 try:
2502 try:
2498 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2503 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2499 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2504 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2500
2505
2501 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2506 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2502 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2507 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2503
2508
2504 # try to find an existing branchmap to reuse
2509 # try to find an existing branchmap to reuse
2505 subsettable = getbranchmapsubsettable()
2510 subsettable = getbranchmapsubsettable()
2506 candidatefilter = subsettable.get(None)
2511 candidatefilter = subsettable.get(None)
2507 while candidatefilter is not None:
2512 while candidatefilter is not None:
2508 candidatebm = repo.filtered(candidatefilter).branchmap()
2513 candidatebm = repo.filtered(candidatefilter).branchmap()
2509 if candidatebm.validfor(baserepo):
2514 if candidatebm.validfor(baserepo):
2510 filtered = repoview.filterrevs(repo, candidatefilter)
2515 filtered = repoview.filterrevs(repo, candidatefilter)
2511 missing = [r for r in allbaserevs if r in filtered]
2516 missing = [r for r in allbaserevs if r in filtered]
2512 base = candidatebm.copy()
2517 base = candidatebm.copy()
2513 base.update(baserepo, missing)
2518 base.update(baserepo, missing)
2514 break
2519 break
2515 candidatefilter = subsettable.get(candidatefilter)
2520 candidatefilter = subsettable.get(candidatefilter)
2516 else:
2521 else:
2517 # no suitable subset where found
2522 # no suitable subset where found
2518 base = branchmap.branchcache()
2523 base = branchmap.branchcache()
2519 base.update(baserepo, allbaserevs)
2524 base.update(baserepo, allbaserevs)
2520
2525
2521 def setup():
2526 def setup():
2522 x[0] = base.copy()
2527 x[0] = base.copy()
2523 if clearcaches:
2528 if clearcaches:
2524 unfi._revbranchcache = None
2529 unfi._revbranchcache = None
2525 clearchangelog(repo)
2530 clearchangelog(repo)
2526
2531
2527 def bench():
2532 def bench():
2528 x[0].update(targetrepo, newrevs)
2533 x[0].update(targetrepo, newrevs)
2529
2534
2530 timer(bench, setup=setup)
2535 timer(bench, setup=setup)
2531 fm.end()
2536 fm.end()
2532 finally:
2537 finally:
2533 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2538 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2534 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2539 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2535
2540
2536 @command(b'perfbranchmapload', [
2541 @command(b'perfbranchmapload', [
2537 (b'f', b'filter', b'', b'Specify repoview filter'),
2542 (b'f', b'filter', b'', b'Specify repoview filter'),
2538 (b'', b'list', False, b'List brachmap filter caches'),
2543 (b'', b'list', False, b'List brachmap filter caches'),
2539 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2544 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2540
2545
2541 ] + formatteropts)
2546 ] + formatteropts)
2542 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2547 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2543 """benchmark reading the branchmap"""
2548 """benchmark reading the branchmap"""
2544 opts = _byteskwargs(opts)
2549 opts = _byteskwargs(opts)
2545 clearrevlogs = opts[b'clear_revlogs']
2550 clearrevlogs = opts[b'clear_revlogs']
2546
2551
2547 if list:
2552 if list:
2548 for name, kind, st in repo.cachevfs.readdir(stat=True):
2553 for name, kind, st in repo.cachevfs.readdir(stat=True):
2549 if name.startswith(b'branch2'):
2554 if name.startswith(b'branch2'):
2550 filtername = name.partition(b'-')[2] or b'unfiltered'
2555 filtername = name.partition(b'-')[2] or b'unfiltered'
2551 ui.status(b'%s - %s\n'
2556 ui.status(b'%s - %s\n'
2552 % (filtername, util.bytecount(st.st_size)))
2557 % (filtername, util.bytecount(st.st_size)))
2553 return
2558 return
2554 if not filter:
2559 if not filter:
2555 filter = None
2560 filter = None
2556 subsettable = getbranchmapsubsettable()
2561 subsettable = getbranchmapsubsettable()
2557 if filter is None:
2562 if filter is None:
2558 repo = repo.unfiltered()
2563 repo = repo.unfiltered()
2559 else:
2564 else:
2560 repo = repoview.repoview(repo, filter)
2565 repo = repoview.repoview(repo, filter)
2561
2566
2562 repo.branchmap() # make sure we have a relevant, up to date branchmap
2567 repo.branchmap() # make sure we have a relevant, up to date branchmap
2563
2568
2564 try:
2569 try:
2565 fromfile = branchmap.branchcache.fromfile
2570 fromfile = branchmap.branchcache.fromfile
2566 except AttributeError:
2571 except AttributeError:
2567 # older versions
2572 # older versions
2568 fromfile = branchmap.read
2573 fromfile = branchmap.read
2569
2574
2570 currentfilter = filter
2575 currentfilter = filter
2571 # try once without timer, the filter may not be cached
2576 # try once without timer, the filter may not be cached
2572 while fromfile(repo) is None:
2577 while fromfile(repo) is None:
2573 currentfilter = subsettable.get(currentfilter)
2578 currentfilter = subsettable.get(currentfilter)
2574 if currentfilter is None:
2579 if currentfilter is None:
2575 raise error.Abort(b'No branchmap cached for %s repo'
2580 raise error.Abort(b'No branchmap cached for %s repo'
2576 % (filter or b'unfiltered'))
2581 % (filter or b'unfiltered'))
2577 repo = repo.filtered(currentfilter)
2582 repo = repo.filtered(currentfilter)
2578 timer, fm = gettimer(ui, opts)
2583 timer, fm = gettimer(ui, opts)
2579 def setup():
2584 def setup():
2580 if clearrevlogs:
2585 if clearrevlogs:
2581 clearchangelog(repo)
2586 clearchangelog(repo)
2582 def bench():
2587 def bench():
2583 fromfile(repo)
2588 fromfile(repo)
2584 timer(bench, setup=setup)
2589 timer(bench, setup=setup)
2585 fm.end()
2590 fm.end()
2586
2591
2587 @command(b'perfloadmarkers')
2592 @command(b'perfloadmarkers')
2588 def perfloadmarkers(ui, repo):
2593 def perfloadmarkers(ui, repo):
2589 """benchmark the time to parse the on-disk markers for a repo
2594 """benchmark the time to parse the on-disk markers for a repo
2590
2595
2591 Result is the number of markers in the repo."""
2596 Result is the number of markers in the repo."""
2592 timer, fm = gettimer(ui)
2597 timer, fm = gettimer(ui)
2593 svfs = getsvfs(repo)
2598 svfs = getsvfs(repo)
2594 timer(lambda: len(obsolete.obsstore(svfs)))
2599 timer(lambda: len(obsolete.obsstore(svfs)))
2595 fm.end()
2600 fm.end()
2596
2601
2597 @command(b'perflrucachedict', formatteropts +
2602 @command(b'perflrucachedict', formatteropts +
2598 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2603 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2599 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2604 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2600 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2605 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2601 (b'', b'size', 4, b'size of cache'),
2606 (b'', b'size', 4, b'size of cache'),
2602 (b'', b'gets', 10000, b'number of key lookups'),
2607 (b'', b'gets', 10000, b'number of key lookups'),
2603 (b'', b'sets', 10000, b'number of key sets'),
2608 (b'', b'sets', 10000, b'number of key sets'),
2604 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2609 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2605 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2610 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2606 norepo=True)
2611 norepo=True)
2607 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2612 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2608 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2613 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2609 opts = _byteskwargs(opts)
2614 opts = _byteskwargs(opts)
2610
2615
2611 def doinit():
2616 def doinit():
2612 for i in _xrange(10000):
2617 for i in _xrange(10000):
2613 util.lrucachedict(size)
2618 util.lrucachedict(size)
2614
2619
2615 costrange = list(range(mincost, maxcost + 1))
2620 costrange = list(range(mincost, maxcost + 1))
2616
2621
2617 values = []
2622 values = []
2618 for i in _xrange(size):
2623 for i in _xrange(size):
2619 values.append(random.randint(0, _maxint))
2624 values.append(random.randint(0, _maxint))
2620
2625
2621 # Get mode fills the cache and tests raw lookup performance with no
2626 # Get mode fills the cache and tests raw lookup performance with no
2622 # eviction.
2627 # eviction.
2623 getseq = []
2628 getseq = []
2624 for i in _xrange(gets):
2629 for i in _xrange(gets):
2625 getseq.append(random.choice(values))
2630 getseq.append(random.choice(values))
2626
2631
2627 def dogets():
2632 def dogets():
2628 d = util.lrucachedict(size)
2633 d = util.lrucachedict(size)
2629 for v in values:
2634 for v in values:
2630 d[v] = v
2635 d[v] = v
2631 for key in getseq:
2636 for key in getseq:
2632 value = d[key]
2637 value = d[key]
2633 value # silence pyflakes warning
2638 value # silence pyflakes warning
2634
2639
2635 def dogetscost():
2640 def dogetscost():
2636 d = util.lrucachedict(size, maxcost=costlimit)
2641 d = util.lrucachedict(size, maxcost=costlimit)
2637 for i, v in enumerate(values):
2642 for i, v in enumerate(values):
2638 d.insert(v, v, cost=costs[i])
2643 d.insert(v, v, cost=costs[i])
2639 for key in getseq:
2644 for key in getseq:
2640 try:
2645 try:
2641 value = d[key]
2646 value = d[key]
2642 value # silence pyflakes warning
2647 value # silence pyflakes warning
2643 except KeyError:
2648 except KeyError:
2644 pass
2649 pass
2645
2650
2646 # Set mode tests insertion speed with cache eviction.
2651 # Set mode tests insertion speed with cache eviction.
2647 setseq = []
2652 setseq = []
2648 costs = []
2653 costs = []
2649 for i in _xrange(sets):
2654 for i in _xrange(sets):
2650 setseq.append(random.randint(0, _maxint))
2655 setseq.append(random.randint(0, _maxint))
2651 costs.append(random.choice(costrange))
2656 costs.append(random.choice(costrange))
2652
2657
2653 def doinserts():
2658 def doinserts():
2654 d = util.lrucachedict(size)
2659 d = util.lrucachedict(size)
2655 for v in setseq:
2660 for v in setseq:
2656 d.insert(v, v)
2661 d.insert(v, v)
2657
2662
2658 def doinsertscost():
2663 def doinsertscost():
2659 d = util.lrucachedict(size, maxcost=costlimit)
2664 d = util.lrucachedict(size, maxcost=costlimit)
2660 for i, v in enumerate(setseq):
2665 for i, v in enumerate(setseq):
2661 d.insert(v, v, cost=costs[i])
2666 d.insert(v, v, cost=costs[i])
2662
2667
2663 def dosets():
2668 def dosets():
2664 d = util.lrucachedict(size)
2669 d = util.lrucachedict(size)
2665 for v in setseq:
2670 for v in setseq:
2666 d[v] = v
2671 d[v] = v
2667
2672
2668 # Mixed mode randomly performs gets and sets with eviction.
2673 # Mixed mode randomly performs gets and sets with eviction.
2669 mixedops = []
2674 mixedops = []
2670 for i in _xrange(mixed):
2675 for i in _xrange(mixed):
2671 r = random.randint(0, 100)
2676 r = random.randint(0, 100)
2672 if r < mixedgetfreq:
2677 if r < mixedgetfreq:
2673 op = 0
2678 op = 0
2674 else:
2679 else:
2675 op = 1
2680 op = 1
2676
2681
2677 mixedops.append((op,
2682 mixedops.append((op,
2678 random.randint(0, size * 2),
2683 random.randint(0, size * 2),
2679 random.choice(costrange)))
2684 random.choice(costrange)))
2680
2685
2681 def domixed():
2686 def domixed():
2682 d = util.lrucachedict(size)
2687 d = util.lrucachedict(size)
2683
2688
2684 for op, v, cost in mixedops:
2689 for op, v, cost in mixedops:
2685 if op == 0:
2690 if op == 0:
2686 try:
2691 try:
2687 d[v]
2692 d[v]
2688 except KeyError:
2693 except KeyError:
2689 pass
2694 pass
2690 else:
2695 else:
2691 d[v] = v
2696 d[v] = v
2692
2697
2693 def domixedcost():
2698 def domixedcost():
2694 d = util.lrucachedict(size, maxcost=costlimit)
2699 d = util.lrucachedict(size, maxcost=costlimit)
2695
2700
2696 for op, v, cost in mixedops:
2701 for op, v, cost in mixedops:
2697 if op == 0:
2702 if op == 0:
2698 try:
2703 try:
2699 d[v]
2704 d[v]
2700 except KeyError:
2705 except KeyError:
2701 pass
2706 pass
2702 else:
2707 else:
2703 d.insert(v, v, cost=cost)
2708 d.insert(v, v, cost=cost)
2704
2709
2705 benches = [
2710 benches = [
2706 (doinit, b'init'),
2711 (doinit, b'init'),
2707 ]
2712 ]
2708
2713
2709 if costlimit:
2714 if costlimit:
2710 benches.extend([
2715 benches.extend([
2711 (dogetscost, b'gets w/ cost limit'),
2716 (dogetscost, b'gets w/ cost limit'),
2712 (doinsertscost, b'inserts w/ cost limit'),
2717 (doinsertscost, b'inserts w/ cost limit'),
2713 (domixedcost, b'mixed w/ cost limit'),
2718 (domixedcost, b'mixed w/ cost limit'),
2714 ])
2719 ])
2715 else:
2720 else:
2716 benches.extend([
2721 benches.extend([
2717 (dogets, b'gets'),
2722 (dogets, b'gets'),
2718 (doinserts, b'inserts'),
2723 (doinserts, b'inserts'),
2719 (dosets, b'sets'),
2724 (dosets, b'sets'),
2720 (domixed, b'mixed')
2725 (domixed, b'mixed')
2721 ])
2726 ])
2722
2727
2723 for fn, title in benches:
2728 for fn, title in benches:
2724 timer, fm = gettimer(ui, opts)
2729 timer, fm = gettimer(ui, opts)
2725 timer(fn, title=title)
2730 timer(fn, title=title)
2726 fm.end()
2731 fm.end()
2727
2732
2728 @command(b'perfwrite', formatteropts)
2733 @command(b'perfwrite', formatteropts)
2729 def perfwrite(ui, repo, **opts):
2734 def perfwrite(ui, repo, **opts):
2730 """microbenchmark ui.write
2735 """microbenchmark ui.write
2731 """
2736 """
2732 opts = _byteskwargs(opts)
2737 opts = _byteskwargs(opts)
2733
2738
2734 timer, fm = gettimer(ui, opts)
2739 timer, fm = gettimer(ui, opts)
2735 def write():
2740 def write():
2736 for i in range(100000):
2741 for i in range(100000):
2737 ui.write((b'Testing write performance\n'))
2742 ui.write((b'Testing write performance\n'))
2738 timer(write)
2743 timer(write)
2739 fm.end()
2744 fm.end()
2740
2745
2741 def uisetup(ui):
2746 def uisetup(ui):
2742 if (util.safehasattr(cmdutil, b'openrevlog') and
2747 if (util.safehasattr(cmdutil, b'openrevlog') and
2743 not util.safehasattr(commands, b'debugrevlogopts')):
2748 not util.safehasattr(commands, b'debugrevlogopts')):
2744 # for "historical portability":
2749 # for "historical portability":
2745 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2750 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2746 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2751 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2747 # openrevlog() should cause failure, because it has been
2752 # openrevlog() should cause failure, because it has been
2748 # available since 3.5 (or 49c583ca48c4).
2753 # available since 3.5 (or 49c583ca48c4).
2749 def openrevlog(orig, repo, cmd, file_, opts):
2754 def openrevlog(orig, repo, cmd, file_, opts):
2750 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2755 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2751 raise error.Abort(b"This version doesn't support --dir option",
2756 raise error.Abort(b"This version doesn't support --dir option",
2752 hint=b"use 3.5 or later")
2757 hint=b"use 3.5 or later")
2753 return orig(repo, cmd, file_, opts)
2758 return orig(repo, cmd, file_, opts)
2754 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2759 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2755
2760
2756 @command(b'perfprogress', formatteropts + [
2761 @command(b'perfprogress', formatteropts + [
2757 (b'', b'topic', b'topic', b'topic for progress messages'),
2762 (b'', b'topic', b'topic', b'topic for progress messages'),
2758 (b'c', b'total', 1000000, b'total value we are progressing to'),
2763 (b'c', b'total', 1000000, b'total value we are progressing to'),
2759 ], norepo=True)
2764 ], norepo=True)
2760 def perfprogress(ui, topic=None, total=None, **opts):
2765 def perfprogress(ui, topic=None, total=None, **opts):
2761 """printing of progress bars"""
2766 """printing of progress bars"""
2762 opts = _byteskwargs(opts)
2767 opts = _byteskwargs(opts)
2763
2768
2764 timer, fm = gettimer(ui, opts)
2769 timer, fm = gettimer(ui, opts)
2765
2770
2766 def doprogress():
2771 def doprogress():
2767 with ui.makeprogress(topic, total=total) as progress:
2772 with ui.makeprogress(topic, total=total) as progress:
2768 for i in pycompat.xrange(total):
2773 for i in pycompat.xrange(total):
2769 progress.increment()
2774 progress.increment()
2770
2775
2771 timer(doprogress)
2776 timer(doprogress)
2772 fm.end()
2777 fm.end()
@@ -1,581 +1,595 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 pycompat,
21 pycompat,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 stringutil,
26 stringutil,
27 )
27 )
28
28
29 calcsize = struct.calcsize
29 calcsize = struct.calcsize
30 pack_into = struct.pack_into
30 pack_into = struct.pack_into
31 unpack_from = struct.unpack_from
31 unpack_from = struct.unpack_from
32
32
33
33
34 ### Nearest subset relation
34 ### Nearest subset relation
35 # Nearest subset of filter X is a filter Y so that:
35 # Nearest subset of filter X is a filter Y so that:
36 # * Y is included in X,
36 # * Y is included in X,
37 # * X - Y is as small as possible.
37 # * X - Y is as small as possible.
38 # This create and ordering used for branchmap purpose.
38 # This create and ordering used for branchmap purpose.
39 # the ordering may be partial
39 # the ordering may be partial
40 subsettable = {None: 'visible',
40 subsettable = {None: 'visible',
41 'visible-hidden': 'visible',
41 'visible-hidden': 'visible',
42 'visible': 'served',
42 'visible': 'served',
43 'served': 'immutable',
43 'served': 'immutable',
44 'immutable': 'base'}
44 'immutable': 'base'}
45
45
46 def updatecache(repo):
46
47 """Update the cache for the given filtered view on a repository"""
47 class BranchMapCache(object):
48 # This can trigger updates for the caches for subsets of the filtered
48 """Cache mapping"""
49 # view, e.g. when there is no cache for this filtered view or the cache
49 def __init__(self):
50 # is stale.
50 self._per_filter = {}
51
51
52 cl = repo.changelog
52 def __getitem__(self, repo):
53 filtername = repo.filtername
53 self.updatecache(repo)
54 bcache = repo._branchcaches.get(filtername)
54 return self._per_filter[repo.filtername]
55 if bcache is None or not bcache.validfor(repo):
55
56 # cache object missing or cache object stale? Read from disk
56 def updatecache(self, repo):
57 bcache = branchcache.fromfile(repo)
57 """Update the cache for the given filtered view on a repository"""
58 # This can trigger updates for the caches for subsets of the filtered
59 # view, e.g. when there is no cache for this filtered view or the cache
60 # is stale.
58
61
59 revs = []
62 cl = repo.changelog
60 if bcache is None:
63 filtername = repo.filtername
61 # no (fresh) cache available anymore, perhaps we can re-use
64 bcache = self._per_filter.get(filtername)
62 # the cache for a subset, then extend that to add info on missing
65 if bcache is None or not bcache.validfor(repo):
63 # revisions.
66 # cache object missing or cache object stale? Read from disk
64 subsetname = subsettable.get(filtername)
67 bcache = branchcache.fromfile(repo)
65 if subsetname is not None:
66 subset = repo.filtered(subsetname)
67 bcache = subset.branchmap().copy()
68 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
69 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
70 else:
71 # nothing to fall back on, start empty.
72 bcache = branchcache()
73
68
74 revs.extend(cl.revs(start=bcache.tiprev + 1))
69 revs = []
75 if revs:
70 if bcache is None:
76 bcache.update(repo, revs)
71 # no (fresh) cache available anymore, perhaps we can re-use
72 # the cache for a subset, then extend that to add info on missing
73 # revisions.
74 subsetname = subsettable.get(filtername)
75 if subsetname is not None:
76 subset = repo.filtered(subsetname)
77 bcache = self[subset].copy()
78 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
79 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
80 else:
81 # nothing to fall back on, start empty.
82 bcache = branchcache()
77
83
78 assert bcache.validfor(repo), filtername
84 revs.extend(cl.revs(start=bcache.tiprev + 1))
79 repo._branchcaches[repo.filtername] = bcache
85 if revs:
86 bcache.update(repo, revs)
80
87
81 def replacecache(repo, bm):
88 assert bcache.validfor(repo), filtername
82 """Replace the branchmap cache for a repo with a branch mapping.
89 self._per_filter[repo.filtername] = bcache
90
91 def replace(self, repo, remotebranchmap):
92 """Replace the branchmap cache for a repo with a branch mapping.
93
94 This is likely only called during clone with a branch map from a
95 remote.
83
96
84 This is likely only called during clone with a branch map from a remote.
97 """
85 """
98 cl = repo.changelog
86 cl = repo.changelog
99 clrev = cl.rev
87 clrev = cl.rev
100 clbranchinfo = cl.branchinfo
88 clbranchinfo = cl.branchinfo
101 rbheads = []
89 rbheads = []
102 closed = []
90 closed = []
103 for bheads in remotebranchmap.itervalues():
91 for bheads in bm.itervalues():
104 rbheads += bheads
92 rbheads.extend(bheads)
105 for h in bheads:
93 for h in bheads:
106 r = clrev(h)
94 r = clrev(h)
107 b, c = clbranchinfo(r)
95 b, c = clbranchinfo(r)
108 if c:
96 if c:
109 closed.append(h)
97 closed.append(h)
98
110
99 if rbheads:
111 if rbheads:
100 rtiprev = max((int(clrev(node))
112 rtiprev = max((int(clrev(node)) for node in rbheads))
101 for node in rbheads))
113 cache = branchcache(
102 cache = branchcache(bm,
114 remotebranchmap, repo[rtiprev].node(), rtiprev,
103 repo[rtiprev].node(),
115 closednodes=closed)
104 rtiprev,
105 closednodes=closed)
106
116
107 # Try to stick it as low as possible
117 # Try to stick it as low as possible
108 # filter above served are unlikely to be fetch from a clone
118 # filter above served are unlikely to be fetch from a clone
109 for candidate in ('base', 'immutable', 'served'):
119 for candidate in ('base', 'immutable', 'served'):
110 rview = repo.filtered(candidate)
120 rview = repo.filtered(candidate)
111 if cache.validfor(rview):
121 if cache.validfor(rview):
112 repo._branchcaches[candidate] = cache
122 self._per_filter[candidate] = cache
113 cache.write(rview)
123 cache.write(rview)
114 break
124 return
125
126 def clear(self):
127 self._per_filter.clear()
128
115
129
116 class branchcache(dict):
130 class branchcache(dict):
117 """A dict like object that hold branches heads cache.
131 """A dict like object that hold branches heads cache.
118
132
119 This cache is used to avoid costly computations to determine all the
133 This cache is used to avoid costly computations to determine all the
120 branch heads of a repo.
134 branch heads of a repo.
121
135
122 The cache is serialized on disk in the following format:
136 The cache is serialized on disk in the following format:
123
137
124 <tip hex node> <tip rev number> [optional filtered repo hex hash]
138 <tip hex node> <tip rev number> [optional filtered repo hex hash]
125 <branch head hex node> <open/closed state> <branch name>
139 <branch head hex node> <open/closed state> <branch name>
126 <branch head hex node> <open/closed state> <branch name>
140 <branch head hex node> <open/closed state> <branch name>
127 ...
141 ...
128
142
129 The first line is used to check if the cache is still valid. If the
143 The first line is used to check if the cache is still valid. If the
130 branch cache is for a filtered repo view, an optional third hash is
144 branch cache is for a filtered repo view, an optional third hash is
131 included that hashes the hashes of all filtered revisions.
145 included that hashes the hashes of all filtered revisions.
132
146
133 The open/closed state is represented by a single letter 'o' or 'c'.
147 The open/closed state is represented by a single letter 'o' or 'c'.
134 This field can be used to avoid changelog reads when determining if a
148 This field can be used to avoid changelog reads when determining if a
135 branch head closes a branch or not.
149 branch head closes a branch or not.
136 """
150 """
137 @classmethod
151 @classmethod
138 def fromfile(cls, repo):
152 def fromfile(cls, repo):
139 f = None
153 f = None
140 try:
154 try:
141 f = repo.cachevfs(cls._filename(repo))
155 f = repo.cachevfs(cls._filename(repo))
142 lineiter = iter(f)
156 lineiter = iter(f)
143 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
157 cachekey = next(lineiter).rstrip('\n').split(" ", 2)
144 last, lrev = cachekey[:2]
158 last, lrev = cachekey[:2]
145 last, lrev = bin(last), int(lrev)
159 last, lrev = bin(last), int(lrev)
146 filteredhash = None
160 filteredhash = None
147 if len(cachekey) > 2:
161 if len(cachekey) > 2:
148 filteredhash = bin(cachekey[2])
162 filteredhash = bin(cachekey[2])
149 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
163 bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash)
150 if not bcache.validfor(repo):
164 if not bcache.validfor(repo):
151 # invalidate the cache
165 # invalidate the cache
152 raise ValueError(r'tip differs')
166 raise ValueError(r'tip differs')
153 cl = repo.changelog
167 cl = repo.changelog
154 for line in lineiter:
168 for line in lineiter:
155 line = line.rstrip('\n')
169 line = line.rstrip('\n')
156 if not line:
170 if not line:
157 continue
171 continue
158 node, state, label = line.split(" ", 2)
172 node, state, label = line.split(" ", 2)
159 if state not in 'oc':
173 if state not in 'oc':
160 raise ValueError(r'invalid branch state')
174 raise ValueError(r'invalid branch state')
161 label = encoding.tolocal(label.strip())
175 label = encoding.tolocal(label.strip())
162 node = bin(node)
176 node = bin(node)
163 if not cl.hasnode(node):
177 if not cl.hasnode(node):
164 raise ValueError(
178 raise ValueError(
165 r'node %s does not exist' % pycompat.sysstr(hex(node)))
179 r'node %s does not exist' % pycompat.sysstr(hex(node)))
166 bcache.setdefault(label, []).append(node)
180 bcache.setdefault(label, []).append(node)
167 if state == 'c':
181 if state == 'c':
168 bcache._closednodes.add(node)
182 bcache._closednodes.add(node)
169
183
170 except (IOError, OSError):
184 except (IOError, OSError):
171 return None
185 return None
172
186
173 except Exception as inst:
187 except Exception as inst:
174 if repo.ui.debugflag:
188 if repo.ui.debugflag:
175 msg = 'invalid branchheads cache'
189 msg = 'invalid branchheads cache'
176 if repo.filtername is not None:
190 if repo.filtername is not None:
177 msg += ' (%s)' % repo.filtername
191 msg += ' (%s)' % repo.filtername
178 msg += ': %s\n'
192 msg += ': %s\n'
179 repo.ui.debug(msg % pycompat.bytestr(inst))
193 repo.ui.debug(msg % pycompat.bytestr(inst))
180 bcache = None
194 bcache = None
181
195
182 finally:
196 finally:
183 if f:
197 if f:
184 f.close()
198 f.close()
185
199
186 return bcache
200 return bcache
187
201
188 @staticmethod
202 @staticmethod
189 def _filename(repo):
203 def _filename(repo):
190 """name of a branchcache file for a given repo or repoview"""
204 """name of a branchcache file for a given repo or repoview"""
191 filename = "branch2"
205 filename = "branch2"
192 if repo.filtername:
206 if repo.filtername:
193 filename = '%s-%s' % (filename, repo.filtername)
207 filename = '%s-%s' % (filename, repo.filtername)
194 return filename
208 return filename
195
209
196 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
210 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
197 filteredhash=None, closednodes=None):
211 filteredhash=None, closednodes=None):
198 super(branchcache, self).__init__(entries)
212 super(branchcache, self).__init__(entries)
199 self.tipnode = tipnode
213 self.tipnode = tipnode
200 self.tiprev = tiprev
214 self.tiprev = tiprev
201 self.filteredhash = filteredhash
215 self.filteredhash = filteredhash
202 # closednodes is a set of nodes that close their branch. If the branch
216 # closednodes is a set of nodes that close their branch. If the branch
203 # cache has been updated, it may contain nodes that are no longer
217 # cache has been updated, it may contain nodes that are no longer
204 # heads.
218 # heads.
205 if closednodes is None:
219 if closednodes is None:
206 self._closednodes = set()
220 self._closednodes = set()
207 else:
221 else:
208 self._closednodes = closednodes
222 self._closednodes = closednodes
209
223
210 def validfor(self, repo):
224 def validfor(self, repo):
211 """Is the cache content valid regarding a repo
225 """Is the cache content valid regarding a repo
212
226
213 - False when cached tipnode is unknown or if we detect a strip.
227 - False when cached tipnode is unknown or if we detect a strip.
214 - True when cache is up to date or a subset of current repo."""
228 - True when cache is up to date or a subset of current repo."""
215 try:
229 try:
216 return ((self.tipnode == repo.changelog.node(self.tiprev))
230 return ((self.tipnode == repo.changelog.node(self.tiprev))
217 and (self.filteredhash == \
231 and (self.filteredhash == \
218 scmutil.filteredhash(repo, self.tiprev)))
232 scmutil.filteredhash(repo, self.tiprev)))
219 except IndexError:
233 except IndexError:
220 return False
234 return False
221
235
222 def _branchtip(self, heads):
236 def _branchtip(self, heads):
223 '''Return tuple with last open head in heads and false,
237 '''Return tuple with last open head in heads and false,
224 otherwise return last closed head and true.'''
238 otherwise return last closed head and true.'''
225 tip = heads[-1]
239 tip = heads[-1]
226 closed = True
240 closed = True
227 for h in reversed(heads):
241 for h in reversed(heads):
228 if h not in self._closednodes:
242 if h not in self._closednodes:
229 tip = h
243 tip = h
230 closed = False
244 closed = False
231 break
245 break
232 return tip, closed
246 return tip, closed
233
247
234 def branchtip(self, branch):
248 def branchtip(self, branch):
235 '''Return the tipmost open head on branch head, otherwise return the
249 '''Return the tipmost open head on branch head, otherwise return the
236 tipmost closed head on branch.
250 tipmost closed head on branch.
237 Raise KeyError for unknown branch.'''
251 Raise KeyError for unknown branch.'''
238 return self._branchtip(self[branch])[0]
252 return self._branchtip(self[branch])[0]
239
253
240 def iteropen(self, nodes):
254 def iteropen(self, nodes):
241 return (n for n in nodes if n not in self._closednodes)
255 return (n for n in nodes if n not in self._closednodes)
242
256
243 def branchheads(self, branch, closed=False):
257 def branchheads(self, branch, closed=False):
244 heads = self[branch]
258 heads = self[branch]
245 if not closed:
259 if not closed:
246 heads = list(self.iteropen(heads))
260 heads = list(self.iteropen(heads))
247 return heads
261 return heads
248
262
249 def iterbranches(self):
263 def iterbranches(self):
250 for bn, heads in self.iteritems():
264 for bn, heads in self.iteritems():
251 yield (bn, heads) + self._branchtip(heads)
265 yield (bn, heads) + self._branchtip(heads)
252
266
253 def copy(self):
267 def copy(self):
254 """return an deep copy of the branchcache object"""
268 """return an deep copy of the branchcache object"""
255 return type(self)(
269 return type(self)(
256 self, self.tipnode, self.tiprev, self.filteredhash,
270 self, self.tipnode, self.tiprev, self.filteredhash,
257 self._closednodes)
271 self._closednodes)
258
272
259 def write(self, repo):
273 def write(self, repo):
260 try:
274 try:
261 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
275 f = repo.cachevfs(self._filename(repo), "w", atomictemp=True)
262 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
276 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
263 if self.filteredhash is not None:
277 if self.filteredhash is not None:
264 cachekey.append(hex(self.filteredhash))
278 cachekey.append(hex(self.filteredhash))
265 f.write(" ".join(cachekey) + '\n')
279 f.write(" ".join(cachekey) + '\n')
266 nodecount = 0
280 nodecount = 0
267 for label, nodes in sorted(self.iteritems()):
281 for label, nodes in sorted(self.iteritems()):
268 for node in nodes:
282 for node in nodes:
269 nodecount += 1
283 nodecount += 1
270 if node in self._closednodes:
284 if node in self._closednodes:
271 state = 'c'
285 state = 'c'
272 else:
286 else:
273 state = 'o'
287 state = 'o'
274 f.write("%s %s %s\n" % (hex(node), state,
288 f.write("%s %s %s\n" % (hex(node), state,
275 encoding.fromlocal(label)))
289 encoding.fromlocal(label)))
276 f.close()
290 f.close()
277 repo.ui.log('branchcache',
291 repo.ui.log('branchcache',
278 'wrote %s branch cache with %d labels and %d nodes\n',
292 'wrote %s branch cache with %d labels and %d nodes\n',
279 repo.filtername, len(self), nodecount)
293 repo.filtername, len(self), nodecount)
280 except (IOError, OSError, error.Abort) as inst:
294 except (IOError, OSError, error.Abort) as inst:
281 # Abort may be raised by read only opener, so log and continue
295 # Abort may be raised by read only opener, so log and continue
282 repo.ui.debug("couldn't write branch cache: %s\n" %
296 repo.ui.debug("couldn't write branch cache: %s\n" %
283 stringutil.forcebytestr(inst))
297 stringutil.forcebytestr(inst))
284
298
285 def update(self, repo, revgen):
299 def update(self, repo, revgen):
286 """Given a branchhead cache, self, that may have extra nodes or be
300 """Given a branchhead cache, self, that may have extra nodes or be
287 missing heads, and a generator of nodes that are strictly a superset of
301 missing heads, and a generator of nodes that are strictly a superset of
288 heads missing, this function updates self to be correct.
302 heads missing, this function updates self to be correct.
289 """
303 """
290 starttime = util.timer()
304 starttime = util.timer()
291 cl = repo.changelog
305 cl = repo.changelog
292 # collect new branch entries
306 # collect new branch entries
293 newbranches = {}
307 newbranches = {}
294 getbranchinfo = repo.revbranchcache().branchinfo
308 getbranchinfo = repo.revbranchcache().branchinfo
295 for r in revgen:
309 for r in revgen:
296 branch, closesbranch = getbranchinfo(r)
310 branch, closesbranch = getbranchinfo(r)
297 newbranches.setdefault(branch, []).append(r)
311 newbranches.setdefault(branch, []).append(r)
298 if closesbranch:
312 if closesbranch:
299 self._closednodes.add(cl.node(r))
313 self._closednodes.add(cl.node(r))
300
314
301 # fetch current topological heads to speed up filtering
315 # fetch current topological heads to speed up filtering
302 topoheads = set(cl.headrevs())
316 topoheads = set(cl.headrevs())
303
317
304 # if older branchheads are reachable from new ones, they aren't
318 # if older branchheads are reachable from new ones, they aren't
305 # really branchheads. Note checking parents is insufficient:
319 # really branchheads. Note checking parents is insufficient:
306 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
320 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
307 for branch, newheadrevs in newbranches.iteritems():
321 for branch, newheadrevs in newbranches.iteritems():
308 bheads = self.setdefault(branch, [])
322 bheads = self.setdefault(branch, [])
309 bheadset = set(cl.rev(node) for node in bheads)
323 bheadset = set(cl.rev(node) for node in bheads)
310
324
311 # This have been tested True on all internal usage of this function.
325 # This have been tested True on all internal usage of this function.
312 # run it again in case of doubt
326 # run it again in case of doubt
313 # assert not (set(bheadrevs) & set(newheadrevs))
327 # assert not (set(bheadrevs) & set(newheadrevs))
314 bheadset.update(newheadrevs)
328 bheadset.update(newheadrevs)
315
329
316 # This prunes out two kinds of heads - heads that are superseded by
330 # This prunes out two kinds of heads - heads that are superseded by
317 # a head in newheadrevs, and newheadrevs that are not heads because
331 # a head in newheadrevs, and newheadrevs that are not heads because
318 # an existing head is their descendant.
332 # an existing head is their descendant.
319 uncertain = bheadset - topoheads
333 uncertain = bheadset - topoheads
320 if uncertain:
334 if uncertain:
321 floorrev = min(uncertain)
335 floorrev = min(uncertain)
322 ancestors = set(cl.ancestors(newheadrevs, floorrev))
336 ancestors = set(cl.ancestors(newheadrevs, floorrev))
323 bheadset -= ancestors
337 bheadset -= ancestors
324 bheadrevs = sorted(bheadset)
338 bheadrevs = sorted(bheadset)
325 self[branch] = [cl.node(rev) for rev in bheadrevs]
339 self[branch] = [cl.node(rev) for rev in bheadrevs]
326 tiprev = bheadrevs[-1]
340 tiprev = bheadrevs[-1]
327 if tiprev > self.tiprev:
341 if tiprev > self.tiprev:
328 self.tipnode = cl.node(tiprev)
342 self.tipnode = cl.node(tiprev)
329 self.tiprev = tiprev
343 self.tiprev = tiprev
330
344
331 if not self.validfor(repo):
345 if not self.validfor(repo):
332 # cache key are not valid anymore
346 # cache key are not valid anymore
333 self.tipnode = nullid
347 self.tipnode = nullid
334 self.tiprev = nullrev
348 self.tiprev = nullrev
335 for heads in self.values():
349 for heads in self.values():
336 tiprev = max(cl.rev(node) for node in heads)
350 tiprev = max(cl.rev(node) for node in heads)
337 if tiprev > self.tiprev:
351 if tiprev > self.tiprev:
338 self.tipnode = cl.node(tiprev)
352 self.tipnode = cl.node(tiprev)
339 self.tiprev = tiprev
353 self.tiprev = tiprev
340 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
354 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
341
355
342 duration = util.timer() - starttime
356 duration = util.timer() - starttime
343 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
357 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
344 repo.filtername, duration)
358 repo.filtername, duration)
345
359
346 self.write(repo)
360 self.write(repo)
347
361
348
362
349 class remotebranchcache(branchcache):
363 class remotebranchcache(branchcache):
350 """Branchmap info for a remote connection, should not write locally"""
364 """Branchmap info for a remote connection, should not write locally"""
351 def write(self, repo):
365 def write(self, repo):
352 pass
366 pass
353
367
354
368
355 # Revision branch info cache
369 # Revision branch info cache
356
370
357 _rbcversion = '-v1'
371 _rbcversion = '-v1'
358 _rbcnames = 'rbc-names' + _rbcversion
372 _rbcnames = 'rbc-names' + _rbcversion
359 _rbcrevs = 'rbc-revs' + _rbcversion
373 _rbcrevs = 'rbc-revs' + _rbcversion
360 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
374 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
361 _rbcrecfmt = '>4sI'
375 _rbcrecfmt = '>4sI'
362 _rbcrecsize = calcsize(_rbcrecfmt)
376 _rbcrecsize = calcsize(_rbcrecfmt)
363 _rbcnodelen = 4
377 _rbcnodelen = 4
364 _rbcbranchidxmask = 0x7fffffff
378 _rbcbranchidxmask = 0x7fffffff
365 _rbccloseflag = 0x80000000
379 _rbccloseflag = 0x80000000
366
380
367 class revbranchcache(object):
381 class revbranchcache(object):
368 """Persistent cache, mapping from revision number to branch name and close.
382 """Persistent cache, mapping from revision number to branch name and close.
369 This is a low level cache, independent of filtering.
383 This is a low level cache, independent of filtering.
370
384
371 Branch names are stored in rbc-names in internal encoding separated by 0.
385 Branch names are stored in rbc-names in internal encoding separated by 0.
372 rbc-names is append-only, and each branch name is only stored once and will
386 rbc-names is append-only, and each branch name is only stored once and will
373 thus have a unique index.
387 thus have a unique index.
374
388
375 The branch info for each revision is stored in rbc-revs as constant size
389 The branch info for each revision is stored in rbc-revs as constant size
376 records. The whole file is read into memory, but it is only 'parsed' on
390 records. The whole file is read into memory, but it is only 'parsed' on
377 demand. The file is usually append-only but will be truncated if repo
391 demand. The file is usually append-only but will be truncated if repo
378 modification is detected.
392 modification is detected.
379 The record for each revision contains the first 4 bytes of the
393 The record for each revision contains the first 4 bytes of the
380 corresponding node hash, and the record is only used if it still matches.
394 corresponding node hash, and the record is only used if it still matches.
381 Even a completely trashed rbc-revs fill thus still give the right result
395 Even a completely trashed rbc-revs fill thus still give the right result
382 while converging towards full recovery ... assuming no incorrectly matching
396 while converging towards full recovery ... assuming no incorrectly matching
383 node hashes.
397 node hashes.
384 The record also contains 4 bytes where 31 bits contains the index of the
398 The record also contains 4 bytes where 31 bits contains the index of the
385 branch and the last bit indicate that it is a branch close commit.
399 branch and the last bit indicate that it is a branch close commit.
386 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
400 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
387 and will grow with it but be 1/8th of its size.
401 and will grow with it but be 1/8th of its size.
388 """
402 """
389
403
390 def __init__(self, repo, readonly=True):
404 def __init__(self, repo, readonly=True):
391 assert repo.filtername is None
405 assert repo.filtername is None
392 self._repo = repo
406 self._repo = repo
393 self._names = [] # branch names in local encoding with static index
407 self._names = [] # branch names in local encoding with static index
394 self._rbcrevs = bytearray()
408 self._rbcrevs = bytearray()
395 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
409 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
396 try:
410 try:
397 bndata = repo.cachevfs.read(_rbcnames)
411 bndata = repo.cachevfs.read(_rbcnames)
398 self._rbcsnameslen = len(bndata) # for verification before writing
412 self._rbcsnameslen = len(bndata) # for verification before writing
399 if bndata:
413 if bndata:
400 self._names = [encoding.tolocal(bn)
414 self._names = [encoding.tolocal(bn)
401 for bn in bndata.split('\0')]
415 for bn in bndata.split('\0')]
402 except (IOError, OSError):
416 except (IOError, OSError):
403 if readonly:
417 if readonly:
404 # don't try to use cache - fall back to the slow path
418 # don't try to use cache - fall back to the slow path
405 self.branchinfo = self._branchinfo
419 self.branchinfo = self._branchinfo
406
420
407 if self._names:
421 if self._names:
408 try:
422 try:
409 data = repo.cachevfs.read(_rbcrevs)
423 data = repo.cachevfs.read(_rbcrevs)
410 self._rbcrevs[:] = data
424 self._rbcrevs[:] = data
411 except (IOError, OSError) as inst:
425 except (IOError, OSError) as inst:
412 repo.ui.debug("couldn't read revision branch cache: %s\n" %
426 repo.ui.debug("couldn't read revision branch cache: %s\n" %
413 stringutil.forcebytestr(inst))
427 stringutil.forcebytestr(inst))
414 # remember number of good records on disk
428 # remember number of good records on disk
415 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
429 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
416 len(repo.changelog))
430 len(repo.changelog))
417 if self._rbcrevslen == 0:
431 if self._rbcrevslen == 0:
418 self._names = []
432 self._names = []
419 self._rbcnamescount = len(self._names) # number of names read at
433 self._rbcnamescount = len(self._names) # number of names read at
420 # _rbcsnameslen
434 # _rbcsnameslen
421
435
422 def _clear(self):
436 def _clear(self):
423 self._rbcsnameslen = 0
437 self._rbcsnameslen = 0
424 del self._names[:]
438 del self._names[:]
425 self._rbcnamescount = 0
439 self._rbcnamescount = 0
426 self._rbcrevslen = len(self._repo.changelog)
440 self._rbcrevslen = len(self._repo.changelog)
427 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
441 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
428 util.clearcachedproperty(self, '_namesreverse')
442 util.clearcachedproperty(self, '_namesreverse')
429
443
430 @util.propertycache
444 @util.propertycache
431 def _namesreverse(self):
445 def _namesreverse(self):
432 return dict((b, r) for r, b in enumerate(self._names))
446 return dict((b, r) for r, b in enumerate(self._names))
433
447
434 def branchinfo(self, rev):
448 def branchinfo(self, rev):
435 """Return branch name and close flag for rev, using and updating
449 """Return branch name and close flag for rev, using and updating
436 persistent cache."""
450 persistent cache."""
437 changelog = self._repo.changelog
451 changelog = self._repo.changelog
438 rbcrevidx = rev * _rbcrecsize
452 rbcrevidx = rev * _rbcrecsize
439
453
440 # avoid negative index, changelog.read(nullrev) is fast without cache
454 # avoid negative index, changelog.read(nullrev) is fast without cache
441 if rev == nullrev:
455 if rev == nullrev:
442 return changelog.branchinfo(rev)
456 return changelog.branchinfo(rev)
443
457
444 # if requested rev isn't allocated, grow and cache the rev info
458 # if requested rev isn't allocated, grow and cache the rev info
445 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
459 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
446 return self._branchinfo(rev)
460 return self._branchinfo(rev)
447
461
448 # fast path: extract data from cache, use it if node is matching
462 # fast path: extract data from cache, use it if node is matching
449 reponode = changelog.node(rev)[:_rbcnodelen]
463 reponode = changelog.node(rev)[:_rbcnodelen]
450 cachenode, branchidx = unpack_from(
464 cachenode, branchidx = unpack_from(
451 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
465 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
452 close = bool(branchidx & _rbccloseflag)
466 close = bool(branchidx & _rbccloseflag)
453 if close:
467 if close:
454 branchidx &= _rbcbranchidxmask
468 branchidx &= _rbcbranchidxmask
455 if cachenode == '\0\0\0\0':
469 if cachenode == '\0\0\0\0':
456 pass
470 pass
457 elif cachenode == reponode:
471 elif cachenode == reponode:
458 try:
472 try:
459 return self._names[branchidx], close
473 return self._names[branchidx], close
460 except IndexError:
474 except IndexError:
461 # recover from invalid reference to unknown branch
475 # recover from invalid reference to unknown branch
462 self._repo.ui.debug("referenced branch names not found"
476 self._repo.ui.debug("referenced branch names not found"
463 " - rebuilding revision branch cache from scratch\n")
477 " - rebuilding revision branch cache from scratch\n")
464 self._clear()
478 self._clear()
465 else:
479 else:
466 # rev/node map has changed, invalidate the cache from here up
480 # rev/node map has changed, invalidate the cache from here up
467 self._repo.ui.debug("history modification detected - truncating "
481 self._repo.ui.debug("history modification detected - truncating "
468 "revision branch cache to revision %d\n" % rev)
482 "revision branch cache to revision %d\n" % rev)
469 truncate = rbcrevidx + _rbcrecsize
483 truncate = rbcrevidx + _rbcrecsize
470 del self._rbcrevs[truncate:]
484 del self._rbcrevs[truncate:]
471 self._rbcrevslen = min(self._rbcrevslen, truncate)
485 self._rbcrevslen = min(self._rbcrevslen, truncate)
472
486
473 # fall back to slow path and make sure it will be written to disk
487 # fall back to slow path and make sure it will be written to disk
474 return self._branchinfo(rev)
488 return self._branchinfo(rev)
475
489
476 def _branchinfo(self, rev):
490 def _branchinfo(self, rev):
477 """Retrieve branch info from changelog and update _rbcrevs"""
491 """Retrieve branch info from changelog and update _rbcrevs"""
478 changelog = self._repo.changelog
492 changelog = self._repo.changelog
479 b, close = changelog.branchinfo(rev)
493 b, close = changelog.branchinfo(rev)
480 if b in self._namesreverse:
494 if b in self._namesreverse:
481 branchidx = self._namesreverse[b]
495 branchidx = self._namesreverse[b]
482 else:
496 else:
483 branchidx = len(self._names)
497 branchidx = len(self._names)
484 self._names.append(b)
498 self._names.append(b)
485 self._namesreverse[b] = branchidx
499 self._namesreverse[b] = branchidx
486 reponode = changelog.node(rev)
500 reponode = changelog.node(rev)
487 if close:
501 if close:
488 branchidx |= _rbccloseflag
502 branchidx |= _rbccloseflag
489 self._setcachedata(rev, reponode, branchidx)
503 self._setcachedata(rev, reponode, branchidx)
490 return b, close
504 return b, close
491
505
492 def setdata(self, branch, rev, node, close):
506 def setdata(self, branch, rev, node, close):
493 """add new data information to the cache"""
507 """add new data information to the cache"""
494 if branch in self._namesreverse:
508 if branch in self._namesreverse:
495 branchidx = self._namesreverse[branch]
509 branchidx = self._namesreverse[branch]
496 else:
510 else:
497 branchidx = len(self._names)
511 branchidx = len(self._names)
498 self._names.append(branch)
512 self._names.append(branch)
499 self._namesreverse[branch] = branchidx
513 self._namesreverse[branch] = branchidx
500 if close:
514 if close:
501 branchidx |= _rbccloseflag
515 branchidx |= _rbccloseflag
502 self._setcachedata(rev, node, branchidx)
516 self._setcachedata(rev, node, branchidx)
503 # If no cache data were readable (non exists, bad permission, etc)
517 # If no cache data were readable (non exists, bad permission, etc)
504 # the cache was bypassing itself by setting:
518 # the cache was bypassing itself by setting:
505 #
519 #
506 # self.branchinfo = self._branchinfo
520 # self.branchinfo = self._branchinfo
507 #
521 #
508 # Since we now have data in the cache, we need to drop this bypassing.
522 # Since we now have data in the cache, we need to drop this bypassing.
509 if r'branchinfo' in vars(self):
523 if r'branchinfo' in vars(self):
510 del self.branchinfo
524 del self.branchinfo
511
525
512 def _setcachedata(self, rev, node, branchidx):
526 def _setcachedata(self, rev, node, branchidx):
513 """Writes the node's branch data to the in-memory cache data."""
527 """Writes the node's branch data to the in-memory cache data."""
514 if rev == nullrev:
528 if rev == nullrev:
515 return
529 return
516 rbcrevidx = rev * _rbcrecsize
530 rbcrevidx = rev * _rbcrecsize
517 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
531 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
518 self._rbcrevs.extend('\0' *
532 self._rbcrevs.extend('\0' *
519 (len(self._repo.changelog) * _rbcrecsize -
533 (len(self._repo.changelog) * _rbcrecsize -
520 len(self._rbcrevs)))
534 len(self._rbcrevs)))
521 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
535 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
522 self._rbcrevslen = min(self._rbcrevslen, rev)
536 self._rbcrevslen = min(self._rbcrevslen, rev)
523
537
524 tr = self._repo.currenttransaction()
538 tr = self._repo.currenttransaction()
525 if tr:
539 if tr:
526 tr.addfinalize('write-revbranchcache', self.write)
540 tr.addfinalize('write-revbranchcache', self.write)
527
541
528 def write(self, tr=None):
542 def write(self, tr=None):
529 """Save branch cache if it is dirty."""
543 """Save branch cache if it is dirty."""
530 repo = self._repo
544 repo = self._repo
531 wlock = None
545 wlock = None
532 step = ''
546 step = ''
533 try:
547 try:
534 if self._rbcnamescount < len(self._names):
548 if self._rbcnamescount < len(self._names):
535 step = ' names'
549 step = ' names'
536 wlock = repo.wlock(wait=False)
550 wlock = repo.wlock(wait=False)
537 if self._rbcnamescount != 0:
551 if self._rbcnamescount != 0:
538 f = repo.cachevfs.open(_rbcnames, 'ab')
552 f = repo.cachevfs.open(_rbcnames, 'ab')
539 if f.tell() == self._rbcsnameslen:
553 if f.tell() == self._rbcsnameslen:
540 f.write('\0')
554 f.write('\0')
541 else:
555 else:
542 f.close()
556 f.close()
543 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
557 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
544 self._rbcnamescount = 0
558 self._rbcnamescount = 0
545 self._rbcrevslen = 0
559 self._rbcrevslen = 0
546 if self._rbcnamescount == 0:
560 if self._rbcnamescount == 0:
547 # before rewriting names, make sure references are removed
561 # before rewriting names, make sure references are removed
548 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
562 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
549 f = repo.cachevfs.open(_rbcnames, 'wb')
563 f = repo.cachevfs.open(_rbcnames, 'wb')
550 f.write('\0'.join(encoding.fromlocal(b)
564 f.write('\0'.join(encoding.fromlocal(b)
551 for b in self._names[self._rbcnamescount:]))
565 for b in self._names[self._rbcnamescount:]))
552 self._rbcsnameslen = f.tell()
566 self._rbcsnameslen = f.tell()
553 f.close()
567 f.close()
554 self._rbcnamescount = len(self._names)
568 self._rbcnamescount = len(self._names)
555
569
556 start = self._rbcrevslen * _rbcrecsize
570 start = self._rbcrevslen * _rbcrecsize
557 if start != len(self._rbcrevs):
571 if start != len(self._rbcrevs):
558 step = ''
572 step = ''
559 if wlock is None:
573 if wlock is None:
560 wlock = repo.wlock(wait=False)
574 wlock = repo.wlock(wait=False)
561 revs = min(len(repo.changelog),
575 revs = min(len(repo.changelog),
562 len(self._rbcrevs) // _rbcrecsize)
576 len(self._rbcrevs) // _rbcrecsize)
563 f = repo.cachevfs.open(_rbcrevs, 'ab')
577 f = repo.cachevfs.open(_rbcrevs, 'ab')
564 if f.tell() != start:
578 if f.tell() != start:
565 repo.ui.debug("truncating cache/%s to %d\n"
579 repo.ui.debug("truncating cache/%s to %d\n"
566 % (_rbcrevs, start))
580 % (_rbcrevs, start))
567 f.seek(start)
581 f.seek(start)
568 if f.tell() != start:
582 if f.tell() != start:
569 start = 0
583 start = 0
570 f.seek(start)
584 f.seek(start)
571 f.truncate()
585 f.truncate()
572 end = revs * _rbcrecsize
586 end = revs * _rbcrecsize
573 f.write(self._rbcrevs[start:end])
587 f.write(self._rbcrevs[start:end])
574 f.close()
588 f.close()
575 self._rbcrevslen = revs
589 self._rbcrevslen = revs
576 except (IOError, OSError, error.Abort, error.LockError) as inst:
590 except (IOError, OSError, error.Abort, error.LockError) as inst:
577 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
591 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
578 % (step, stringutil.forcebytestr(inst)))
592 % (step, stringutil.forcebytestr(inst)))
579 finally:
593 finally:
580 if wlock is not None:
594 if wlock is not None:
581 wlock.release()
595 wlock.release()
@@ -1,3073 +1,3072 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
756
756
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
758 if 0 <= chainspan:
758 if 0 <= chainspan:
759 options[b'maxdeltachainspan'] = chainspan
759 options[b'maxdeltachainspan'] = chainspan
760
760
761 mmapindexthreshold = ui.configbytes(b'experimental',
761 mmapindexthreshold = ui.configbytes(b'experimental',
762 b'mmapindexthreshold')
762 b'mmapindexthreshold')
763 if mmapindexthreshold is not None:
763 if mmapindexthreshold is not None:
764 options[b'mmapindexthreshold'] = mmapindexthreshold
764 options[b'mmapindexthreshold'] = mmapindexthreshold
765
765
766 withsparseread = ui.configbool(b'experimental', b'sparse-read')
766 withsparseread = ui.configbool(b'experimental', b'sparse-read')
767 srdensitythres = float(ui.config(b'experimental',
767 srdensitythres = float(ui.config(b'experimental',
768 b'sparse-read.density-threshold'))
768 b'sparse-read.density-threshold'))
769 srmingapsize = ui.configbytes(b'experimental',
769 srmingapsize = ui.configbytes(b'experimental',
770 b'sparse-read.min-gap-size')
770 b'sparse-read.min-gap-size')
771 options[b'with-sparse-read'] = withsparseread
771 options[b'with-sparse-read'] = withsparseread
772 options[b'sparse-read-density-threshold'] = srdensitythres
772 options[b'sparse-read-density-threshold'] = srdensitythres
773 options[b'sparse-read-min-gap-size'] = srmingapsize
773 options[b'sparse-read-min-gap-size'] = srmingapsize
774
774
775 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
775 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
776 options[b'sparse-revlog'] = sparserevlog
776 options[b'sparse-revlog'] = sparserevlog
777 if sparserevlog:
777 if sparserevlog:
778 options[b'generaldelta'] = True
778 options[b'generaldelta'] = True
779
779
780 maxchainlen = None
780 maxchainlen = None
781 if sparserevlog:
781 if sparserevlog:
782 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
782 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
783 # experimental config: format.maxchainlen
783 # experimental config: format.maxchainlen
784 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
784 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
785 if maxchainlen is not None:
785 if maxchainlen is not None:
786 options[b'maxchainlen'] = maxchainlen
786 options[b'maxchainlen'] = maxchainlen
787
787
788 for r in requirements:
788 for r in requirements:
789 if r.startswith(b'exp-compression-'):
789 if r.startswith(b'exp-compression-'):
790 options[b'compengine'] = r[len(b'exp-compression-'):]
790 options[b'compengine'] = r[len(b'exp-compression-'):]
791
791
792 if repository.NARROW_REQUIREMENT in requirements:
792 if repository.NARROW_REQUIREMENT in requirements:
793 options[b'enableellipsis'] = True
793 options[b'enableellipsis'] = True
794
794
795 return options
795 return options
796
796
797 def makemain(**kwargs):
797 def makemain(**kwargs):
798 """Produce a type conforming to ``ilocalrepositorymain``."""
798 """Produce a type conforming to ``ilocalrepositorymain``."""
799 return localrepository
799 return localrepository
800
800
801 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
801 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
802 class revlogfilestorage(object):
802 class revlogfilestorage(object):
803 """File storage when using revlogs."""
803 """File storage when using revlogs."""
804
804
805 def file(self, path):
805 def file(self, path):
806 if path[0] == b'/':
806 if path[0] == b'/':
807 path = path[1:]
807 path = path[1:]
808
808
809 return filelog.filelog(self.svfs, path)
809 return filelog.filelog(self.svfs, path)
810
810
811 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
811 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
812 class revlognarrowfilestorage(object):
812 class revlognarrowfilestorage(object):
813 """File storage when using revlogs and narrow files."""
813 """File storage when using revlogs and narrow files."""
814
814
815 def file(self, path):
815 def file(self, path):
816 if path[0] == b'/':
816 if path[0] == b'/':
817 path = path[1:]
817 path = path[1:]
818
818
819 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
819 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
820
820
821 def makefilestorage(requirements, features, **kwargs):
821 def makefilestorage(requirements, features, **kwargs):
822 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
822 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
823 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
823 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
824 features.add(repository.REPO_FEATURE_STREAM_CLONE)
824 features.add(repository.REPO_FEATURE_STREAM_CLONE)
825
825
826 if repository.NARROW_REQUIREMENT in requirements:
826 if repository.NARROW_REQUIREMENT in requirements:
827 return revlognarrowfilestorage
827 return revlognarrowfilestorage
828 else:
828 else:
829 return revlogfilestorage
829 return revlogfilestorage
830
830
831 # List of repository interfaces and factory functions for them. Each
831 # List of repository interfaces and factory functions for them. Each
832 # will be called in order during ``makelocalrepository()`` to iteratively
832 # will be called in order during ``makelocalrepository()`` to iteratively
833 # derive the final type for a local repository instance. We capture the
833 # derive the final type for a local repository instance. We capture the
834 # function as a lambda so we don't hold a reference and the module-level
834 # function as a lambda so we don't hold a reference and the module-level
835 # functions can be wrapped.
835 # functions can be wrapped.
836 REPO_INTERFACES = [
836 REPO_INTERFACES = [
837 (repository.ilocalrepositorymain, lambda: makemain),
837 (repository.ilocalrepositorymain, lambda: makemain),
838 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
838 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
839 ]
839 ]
840
840
841 @interfaceutil.implementer(repository.ilocalrepositorymain)
841 @interfaceutil.implementer(repository.ilocalrepositorymain)
842 class localrepository(object):
842 class localrepository(object):
843 """Main class for representing local repositories.
843 """Main class for representing local repositories.
844
844
845 All local repositories are instances of this class.
845 All local repositories are instances of this class.
846
846
847 Constructed on its own, instances of this class are not usable as
847 Constructed on its own, instances of this class are not usable as
848 repository objects. To obtain a usable repository object, call
848 repository objects. To obtain a usable repository object, call
849 ``hg.repository()``, ``localrepo.instance()``, or
849 ``hg.repository()``, ``localrepo.instance()``, or
850 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
850 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
851 ``instance()`` adds support for creating new repositories.
851 ``instance()`` adds support for creating new repositories.
852 ``hg.repository()`` adds more extension integration, including calling
852 ``hg.repository()`` adds more extension integration, including calling
853 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
853 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
854 used.
854 used.
855 """
855 """
856
856
857 # obsolete experimental requirements:
857 # obsolete experimental requirements:
858 # - manifestv2: An experimental new manifest format that allowed
858 # - manifestv2: An experimental new manifest format that allowed
859 # for stem compression of long paths. Experiment ended up not
859 # for stem compression of long paths. Experiment ended up not
860 # being successful (repository sizes went up due to worse delta
860 # being successful (repository sizes went up due to worse delta
861 # chains), and the code was deleted in 4.6.
861 # chains), and the code was deleted in 4.6.
862 supportedformats = {
862 supportedformats = {
863 'revlogv1',
863 'revlogv1',
864 'generaldelta',
864 'generaldelta',
865 'treemanifest',
865 'treemanifest',
866 REVLOGV2_REQUIREMENT,
866 REVLOGV2_REQUIREMENT,
867 SPARSEREVLOG_REQUIREMENT,
867 SPARSEREVLOG_REQUIREMENT,
868 }
868 }
869 _basesupported = supportedformats | {
869 _basesupported = supportedformats | {
870 'store',
870 'store',
871 'fncache',
871 'fncache',
872 'shared',
872 'shared',
873 'relshared',
873 'relshared',
874 'dotencode',
874 'dotencode',
875 'exp-sparse',
875 'exp-sparse',
876 'internal-phase'
876 'internal-phase'
877 }
877 }
878
878
879 # list of prefix for file which can be written without 'wlock'
879 # list of prefix for file which can be written without 'wlock'
880 # Extensions should extend this list when needed
880 # Extensions should extend this list when needed
881 _wlockfreeprefix = {
881 _wlockfreeprefix = {
882 # We migh consider requiring 'wlock' for the next
882 # We migh consider requiring 'wlock' for the next
883 # two, but pretty much all the existing code assume
883 # two, but pretty much all the existing code assume
884 # wlock is not needed so we keep them excluded for
884 # wlock is not needed so we keep them excluded for
885 # now.
885 # now.
886 'hgrc',
886 'hgrc',
887 'requires',
887 'requires',
888 # XXX cache is a complicatged business someone
888 # XXX cache is a complicatged business someone
889 # should investigate this in depth at some point
889 # should investigate this in depth at some point
890 'cache/',
890 'cache/',
891 # XXX shouldn't be dirstate covered by the wlock?
891 # XXX shouldn't be dirstate covered by the wlock?
892 'dirstate',
892 'dirstate',
893 # XXX bisect was still a bit too messy at the time
893 # XXX bisect was still a bit too messy at the time
894 # this changeset was introduced. Someone should fix
894 # this changeset was introduced. Someone should fix
895 # the remainig bit and drop this line
895 # the remainig bit and drop this line
896 'bisect.state',
896 'bisect.state',
897 }
897 }
898
898
899 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
899 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
900 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
900 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
901 features, intents=None):
901 features, intents=None):
902 """Create a new local repository instance.
902 """Create a new local repository instance.
903
903
904 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
904 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
905 or ``localrepo.makelocalrepository()`` for obtaining a new repository
905 or ``localrepo.makelocalrepository()`` for obtaining a new repository
906 object.
906 object.
907
907
908 Arguments:
908 Arguments:
909
909
910 baseui
910 baseui
911 ``ui.ui`` instance that ``ui`` argument was based off of.
911 ``ui.ui`` instance that ``ui`` argument was based off of.
912
912
913 ui
913 ui
914 ``ui.ui`` instance for use by the repository.
914 ``ui.ui`` instance for use by the repository.
915
915
916 origroot
916 origroot
917 ``bytes`` path to working directory root of this repository.
917 ``bytes`` path to working directory root of this repository.
918
918
919 wdirvfs
919 wdirvfs
920 ``vfs.vfs`` rooted at the working directory.
920 ``vfs.vfs`` rooted at the working directory.
921
921
922 hgvfs
922 hgvfs
923 ``vfs.vfs`` rooted at .hg/
923 ``vfs.vfs`` rooted at .hg/
924
924
925 requirements
925 requirements
926 ``set`` of bytestrings representing repository opening requirements.
926 ``set`` of bytestrings representing repository opening requirements.
927
927
928 supportedrequirements
928 supportedrequirements
929 ``set`` of bytestrings representing repository requirements that we
929 ``set`` of bytestrings representing repository requirements that we
930 know how to open. May be a supetset of ``requirements``.
930 know how to open. May be a supetset of ``requirements``.
931
931
932 sharedpath
932 sharedpath
933 ``bytes`` Defining path to storage base directory. Points to a
933 ``bytes`` Defining path to storage base directory. Points to a
934 ``.hg/`` directory somewhere.
934 ``.hg/`` directory somewhere.
935
935
936 store
936 store
937 ``store.basicstore`` (or derived) instance providing access to
937 ``store.basicstore`` (or derived) instance providing access to
938 versioned storage.
938 versioned storage.
939
939
940 cachevfs
940 cachevfs
941 ``vfs.vfs`` used for cache files.
941 ``vfs.vfs`` used for cache files.
942
942
943 wcachevfs
943 wcachevfs
944 ``vfs.vfs`` used for cache files related to the working copy.
944 ``vfs.vfs`` used for cache files related to the working copy.
945
945
946 features
946 features
947 ``set`` of bytestrings defining features/capabilities of this
947 ``set`` of bytestrings defining features/capabilities of this
948 instance.
948 instance.
949
949
950 intents
950 intents
951 ``set`` of system strings indicating what this repo will be used
951 ``set`` of system strings indicating what this repo will be used
952 for.
952 for.
953 """
953 """
954 self.baseui = baseui
954 self.baseui = baseui
955 self.ui = ui
955 self.ui = ui
956 self.origroot = origroot
956 self.origroot = origroot
957 # vfs rooted at working directory.
957 # vfs rooted at working directory.
958 self.wvfs = wdirvfs
958 self.wvfs = wdirvfs
959 self.root = wdirvfs.base
959 self.root = wdirvfs.base
960 # vfs rooted at .hg/. Used to access most non-store paths.
960 # vfs rooted at .hg/. Used to access most non-store paths.
961 self.vfs = hgvfs
961 self.vfs = hgvfs
962 self.path = hgvfs.base
962 self.path = hgvfs.base
963 self.requirements = requirements
963 self.requirements = requirements
964 self.supported = supportedrequirements
964 self.supported = supportedrequirements
965 self.sharedpath = sharedpath
965 self.sharedpath = sharedpath
966 self.store = store
966 self.store = store
967 self.cachevfs = cachevfs
967 self.cachevfs = cachevfs
968 self.wcachevfs = wcachevfs
968 self.wcachevfs = wcachevfs
969 self.features = features
969 self.features = features
970
970
971 self.filtername = None
971 self.filtername = None
972
972
973 if (self.ui.configbool('devel', 'all-warnings') or
973 if (self.ui.configbool('devel', 'all-warnings') or
974 self.ui.configbool('devel', 'check-locks')):
974 self.ui.configbool('devel', 'check-locks')):
975 self.vfs.audit = self._getvfsward(self.vfs.audit)
975 self.vfs.audit = self._getvfsward(self.vfs.audit)
976 # A list of callback to shape the phase if no data were found.
976 # A list of callback to shape the phase if no data were found.
977 # Callback are in the form: func(repo, roots) --> processed root.
977 # Callback are in the form: func(repo, roots) --> processed root.
978 # This list it to be filled by extension during repo setup
978 # This list it to be filled by extension during repo setup
979 self._phasedefaults = []
979 self._phasedefaults = []
980
980
981 color.setup(self.ui)
981 color.setup(self.ui)
982
982
983 self.spath = self.store.path
983 self.spath = self.store.path
984 self.svfs = self.store.vfs
984 self.svfs = self.store.vfs
985 self.sjoin = self.store.join
985 self.sjoin = self.store.join
986 if (self.ui.configbool('devel', 'all-warnings') or
986 if (self.ui.configbool('devel', 'all-warnings') or
987 self.ui.configbool('devel', 'check-locks')):
987 self.ui.configbool('devel', 'check-locks')):
988 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
988 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
989 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
989 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
990 else: # standard vfs
990 else: # standard vfs
991 self.svfs.audit = self._getsvfsward(self.svfs.audit)
991 self.svfs.audit = self._getsvfsward(self.svfs.audit)
992
992
993 self._dirstatevalidatewarned = False
993 self._dirstatevalidatewarned = False
994
994
995 self._branchcaches = {}
995 self._branchcaches = branchmap.BranchMapCache()
996 self._revbranchcache = None
996 self._revbranchcache = None
997 self._filterpats = {}
997 self._filterpats = {}
998 self._datafilters = {}
998 self._datafilters = {}
999 self._transref = self._lockref = self._wlockref = None
999 self._transref = self._lockref = self._wlockref = None
1000
1000
1001 # A cache for various files under .hg/ that tracks file changes,
1001 # A cache for various files under .hg/ that tracks file changes,
1002 # (used by the filecache decorator)
1002 # (used by the filecache decorator)
1003 #
1003 #
1004 # Maps a property name to its util.filecacheentry
1004 # Maps a property name to its util.filecacheentry
1005 self._filecache = {}
1005 self._filecache = {}
1006
1006
1007 # hold sets of revision to be filtered
1007 # hold sets of revision to be filtered
1008 # should be cleared when something might have changed the filter value:
1008 # should be cleared when something might have changed the filter value:
1009 # - new changesets,
1009 # - new changesets,
1010 # - phase change,
1010 # - phase change,
1011 # - new obsolescence marker,
1011 # - new obsolescence marker,
1012 # - working directory parent change,
1012 # - working directory parent change,
1013 # - bookmark changes
1013 # - bookmark changes
1014 self.filteredrevcache = {}
1014 self.filteredrevcache = {}
1015
1015
1016 # post-dirstate-status hooks
1016 # post-dirstate-status hooks
1017 self._postdsstatus = []
1017 self._postdsstatus = []
1018
1018
1019 # generic mapping between names and nodes
1019 # generic mapping between names and nodes
1020 self.names = namespaces.namespaces()
1020 self.names = namespaces.namespaces()
1021
1021
1022 # Key to signature value.
1022 # Key to signature value.
1023 self._sparsesignaturecache = {}
1023 self._sparsesignaturecache = {}
1024 # Signature to cached matcher instance.
1024 # Signature to cached matcher instance.
1025 self._sparsematchercache = {}
1025 self._sparsematchercache = {}
1026
1026
1027 def _getvfsward(self, origfunc):
1027 def _getvfsward(self, origfunc):
1028 """build a ward for self.vfs"""
1028 """build a ward for self.vfs"""
1029 rref = weakref.ref(self)
1029 rref = weakref.ref(self)
1030 def checkvfs(path, mode=None):
1030 def checkvfs(path, mode=None):
1031 ret = origfunc(path, mode=mode)
1031 ret = origfunc(path, mode=mode)
1032 repo = rref()
1032 repo = rref()
1033 if (repo is None
1033 if (repo is None
1034 or not util.safehasattr(repo, '_wlockref')
1034 or not util.safehasattr(repo, '_wlockref')
1035 or not util.safehasattr(repo, '_lockref')):
1035 or not util.safehasattr(repo, '_lockref')):
1036 return
1036 return
1037 if mode in (None, 'r', 'rb'):
1037 if mode in (None, 'r', 'rb'):
1038 return
1038 return
1039 if path.startswith(repo.path):
1039 if path.startswith(repo.path):
1040 # truncate name relative to the repository (.hg)
1040 # truncate name relative to the repository (.hg)
1041 path = path[len(repo.path) + 1:]
1041 path = path[len(repo.path) + 1:]
1042 if path.startswith('cache/'):
1042 if path.startswith('cache/'):
1043 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1043 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1044 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1044 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1045 if path.startswith('journal.') or path.startswith('undo.'):
1045 if path.startswith('journal.') or path.startswith('undo.'):
1046 # journal is covered by 'lock'
1046 # journal is covered by 'lock'
1047 if repo._currentlock(repo._lockref) is None:
1047 if repo._currentlock(repo._lockref) is None:
1048 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 repo.ui.develwarn('write with no lock: "%s"' % path,
1049 stacklevel=3, config='check-locks')
1049 stacklevel=3, config='check-locks')
1050 elif repo._currentlock(repo._wlockref) is None:
1050 elif repo._currentlock(repo._wlockref) is None:
1051 # rest of vfs files are covered by 'wlock'
1051 # rest of vfs files are covered by 'wlock'
1052 #
1052 #
1053 # exclude special files
1053 # exclude special files
1054 for prefix in self._wlockfreeprefix:
1054 for prefix in self._wlockfreeprefix:
1055 if path.startswith(prefix):
1055 if path.startswith(prefix):
1056 return
1056 return
1057 repo.ui.develwarn('write with no wlock: "%s"' % path,
1057 repo.ui.develwarn('write with no wlock: "%s"' % path,
1058 stacklevel=3, config='check-locks')
1058 stacklevel=3, config='check-locks')
1059 return ret
1059 return ret
1060 return checkvfs
1060 return checkvfs
1061
1061
1062 def _getsvfsward(self, origfunc):
1062 def _getsvfsward(self, origfunc):
1063 """build a ward for self.svfs"""
1063 """build a ward for self.svfs"""
1064 rref = weakref.ref(self)
1064 rref = weakref.ref(self)
1065 def checksvfs(path, mode=None):
1065 def checksvfs(path, mode=None):
1066 ret = origfunc(path, mode=mode)
1066 ret = origfunc(path, mode=mode)
1067 repo = rref()
1067 repo = rref()
1068 if repo is None or not util.safehasattr(repo, '_lockref'):
1068 if repo is None or not util.safehasattr(repo, '_lockref'):
1069 return
1069 return
1070 if mode in (None, 'r', 'rb'):
1070 if mode in (None, 'r', 'rb'):
1071 return
1071 return
1072 if path.startswith(repo.sharedpath):
1072 if path.startswith(repo.sharedpath):
1073 # truncate name relative to the repository (.hg)
1073 # truncate name relative to the repository (.hg)
1074 path = path[len(repo.sharedpath) + 1:]
1074 path = path[len(repo.sharedpath) + 1:]
1075 if repo._currentlock(repo._lockref) is None:
1075 if repo._currentlock(repo._lockref) is None:
1076 repo.ui.develwarn('write with no lock: "%s"' % path,
1076 repo.ui.develwarn('write with no lock: "%s"' % path,
1077 stacklevel=4)
1077 stacklevel=4)
1078 return ret
1078 return ret
1079 return checksvfs
1079 return checksvfs
1080
1080
1081 def close(self):
1081 def close(self):
1082 self._writecaches()
1082 self._writecaches()
1083
1083
1084 def _writecaches(self):
1084 def _writecaches(self):
1085 if self._revbranchcache:
1085 if self._revbranchcache:
1086 self._revbranchcache.write()
1086 self._revbranchcache.write()
1087
1087
1088 def _restrictcapabilities(self, caps):
1088 def _restrictcapabilities(self, caps):
1089 if self.ui.configbool('experimental', 'bundle2-advertise'):
1089 if self.ui.configbool('experimental', 'bundle2-advertise'):
1090 caps = set(caps)
1090 caps = set(caps)
1091 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1091 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1092 role='client'))
1092 role='client'))
1093 caps.add('bundle2=' + urlreq.quote(capsblob))
1093 caps.add('bundle2=' + urlreq.quote(capsblob))
1094 return caps
1094 return caps
1095
1095
1096 def _writerequirements(self):
1096 def _writerequirements(self):
1097 scmutil.writerequires(self.vfs, self.requirements)
1097 scmutil.writerequires(self.vfs, self.requirements)
1098
1098
1099 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1099 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1100 # self -> auditor -> self._checknested -> self
1100 # self -> auditor -> self._checknested -> self
1101
1101
1102 @property
1102 @property
1103 def auditor(self):
1103 def auditor(self):
1104 # This is only used by context.workingctx.match in order to
1104 # This is only used by context.workingctx.match in order to
1105 # detect files in subrepos.
1105 # detect files in subrepos.
1106 return pathutil.pathauditor(self.root, callback=self._checknested)
1106 return pathutil.pathauditor(self.root, callback=self._checknested)
1107
1107
1108 @property
1108 @property
1109 def nofsauditor(self):
1109 def nofsauditor(self):
1110 # This is only used by context.basectx.match in order to detect
1110 # This is only used by context.basectx.match in order to detect
1111 # files in subrepos.
1111 # files in subrepos.
1112 return pathutil.pathauditor(self.root, callback=self._checknested,
1112 return pathutil.pathauditor(self.root, callback=self._checknested,
1113 realfs=False, cached=True)
1113 realfs=False, cached=True)
1114
1114
1115 def _checknested(self, path):
1115 def _checknested(self, path):
1116 """Determine if path is a legal nested repository."""
1116 """Determine if path is a legal nested repository."""
1117 if not path.startswith(self.root):
1117 if not path.startswith(self.root):
1118 return False
1118 return False
1119 subpath = path[len(self.root) + 1:]
1119 subpath = path[len(self.root) + 1:]
1120 normsubpath = util.pconvert(subpath)
1120 normsubpath = util.pconvert(subpath)
1121
1121
1122 # XXX: Checking against the current working copy is wrong in
1122 # XXX: Checking against the current working copy is wrong in
1123 # the sense that it can reject things like
1123 # the sense that it can reject things like
1124 #
1124 #
1125 # $ hg cat -r 10 sub/x.txt
1125 # $ hg cat -r 10 sub/x.txt
1126 #
1126 #
1127 # if sub/ is no longer a subrepository in the working copy
1127 # if sub/ is no longer a subrepository in the working copy
1128 # parent revision.
1128 # parent revision.
1129 #
1129 #
1130 # However, it can of course also allow things that would have
1130 # However, it can of course also allow things that would have
1131 # been rejected before, such as the above cat command if sub/
1131 # been rejected before, such as the above cat command if sub/
1132 # is a subrepository now, but was a normal directory before.
1132 # is a subrepository now, but was a normal directory before.
1133 # The old path auditor would have rejected by mistake since it
1133 # The old path auditor would have rejected by mistake since it
1134 # panics when it sees sub/.hg/.
1134 # panics when it sees sub/.hg/.
1135 #
1135 #
1136 # All in all, checking against the working copy seems sensible
1136 # All in all, checking against the working copy seems sensible
1137 # since we want to prevent access to nested repositories on
1137 # since we want to prevent access to nested repositories on
1138 # the filesystem *now*.
1138 # the filesystem *now*.
1139 ctx = self[None]
1139 ctx = self[None]
1140 parts = util.splitpath(subpath)
1140 parts = util.splitpath(subpath)
1141 while parts:
1141 while parts:
1142 prefix = '/'.join(parts)
1142 prefix = '/'.join(parts)
1143 if prefix in ctx.substate:
1143 if prefix in ctx.substate:
1144 if prefix == normsubpath:
1144 if prefix == normsubpath:
1145 return True
1145 return True
1146 else:
1146 else:
1147 sub = ctx.sub(prefix)
1147 sub = ctx.sub(prefix)
1148 return sub.checknested(subpath[len(prefix) + 1:])
1148 return sub.checknested(subpath[len(prefix) + 1:])
1149 else:
1149 else:
1150 parts.pop()
1150 parts.pop()
1151 return False
1151 return False
1152
1152
1153 def peer(self):
1153 def peer(self):
1154 return localpeer(self) # not cached to avoid reference cycle
1154 return localpeer(self) # not cached to avoid reference cycle
1155
1155
1156 def unfiltered(self):
1156 def unfiltered(self):
1157 """Return unfiltered version of the repository
1157 """Return unfiltered version of the repository
1158
1158
1159 Intended to be overwritten by filtered repo."""
1159 Intended to be overwritten by filtered repo."""
1160 return self
1160 return self
1161
1161
1162 def filtered(self, name, visibilityexceptions=None):
1162 def filtered(self, name, visibilityexceptions=None):
1163 """Return a filtered version of a repository"""
1163 """Return a filtered version of a repository"""
1164 cls = repoview.newtype(self.unfiltered().__class__)
1164 cls = repoview.newtype(self.unfiltered().__class__)
1165 return cls(self, name, visibilityexceptions)
1165 return cls(self, name, visibilityexceptions)
1166
1166
1167 @repofilecache('bookmarks', 'bookmarks.current')
1167 @repofilecache('bookmarks', 'bookmarks.current')
1168 def _bookmarks(self):
1168 def _bookmarks(self):
1169 return bookmarks.bmstore(self)
1169 return bookmarks.bmstore(self)
1170
1170
1171 @property
1171 @property
1172 def _activebookmark(self):
1172 def _activebookmark(self):
1173 return self._bookmarks.active
1173 return self._bookmarks.active
1174
1174
1175 # _phasesets depend on changelog. what we need is to call
1175 # _phasesets depend on changelog. what we need is to call
1176 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1176 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1177 # can't be easily expressed in filecache mechanism.
1177 # can't be easily expressed in filecache mechanism.
1178 @storecache('phaseroots', '00changelog.i')
1178 @storecache('phaseroots', '00changelog.i')
1179 def _phasecache(self):
1179 def _phasecache(self):
1180 return phases.phasecache(self, self._phasedefaults)
1180 return phases.phasecache(self, self._phasedefaults)
1181
1181
1182 @storecache('obsstore')
1182 @storecache('obsstore')
1183 def obsstore(self):
1183 def obsstore(self):
1184 return obsolete.makestore(self.ui, self)
1184 return obsolete.makestore(self.ui, self)
1185
1185
1186 @storecache('00changelog.i')
1186 @storecache('00changelog.i')
1187 def changelog(self):
1187 def changelog(self):
1188 return changelog.changelog(self.svfs,
1188 return changelog.changelog(self.svfs,
1189 trypending=txnutil.mayhavepending(self.root))
1189 trypending=txnutil.mayhavepending(self.root))
1190
1190
1191 @storecache('00manifest.i')
1191 @storecache('00manifest.i')
1192 def manifestlog(self):
1192 def manifestlog(self):
1193 rootstore = manifest.manifestrevlog(self.svfs)
1193 rootstore = manifest.manifestrevlog(self.svfs)
1194 return manifest.manifestlog(self.svfs, self, rootstore,
1194 return manifest.manifestlog(self.svfs, self, rootstore,
1195 self._storenarrowmatch)
1195 self._storenarrowmatch)
1196
1196
1197 @repofilecache('dirstate')
1197 @repofilecache('dirstate')
1198 def dirstate(self):
1198 def dirstate(self):
1199 return self._makedirstate()
1199 return self._makedirstate()
1200
1200
1201 def _makedirstate(self):
1201 def _makedirstate(self):
1202 """Extension point for wrapping the dirstate per-repo."""
1202 """Extension point for wrapping the dirstate per-repo."""
1203 sparsematchfn = lambda: sparse.matcher(self)
1203 sparsematchfn = lambda: sparse.matcher(self)
1204
1204
1205 return dirstate.dirstate(self.vfs, self.ui, self.root,
1205 return dirstate.dirstate(self.vfs, self.ui, self.root,
1206 self._dirstatevalidate, sparsematchfn)
1206 self._dirstatevalidate, sparsematchfn)
1207
1207
1208 def _dirstatevalidate(self, node):
1208 def _dirstatevalidate(self, node):
1209 try:
1209 try:
1210 self.changelog.rev(node)
1210 self.changelog.rev(node)
1211 return node
1211 return node
1212 except error.LookupError:
1212 except error.LookupError:
1213 if not self._dirstatevalidatewarned:
1213 if not self._dirstatevalidatewarned:
1214 self._dirstatevalidatewarned = True
1214 self._dirstatevalidatewarned = True
1215 self.ui.warn(_("warning: ignoring unknown"
1215 self.ui.warn(_("warning: ignoring unknown"
1216 " working parent %s!\n") % short(node))
1216 " working parent %s!\n") % short(node))
1217 return nullid
1217 return nullid
1218
1218
1219 @storecache(narrowspec.FILENAME)
1219 @storecache(narrowspec.FILENAME)
1220 def narrowpats(self):
1220 def narrowpats(self):
1221 """matcher patterns for this repository's narrowspec
1221 """matcher patterns for this repository's narrowspec
1222
1222
1223 A tuple of (includes, excludes).
1223 A tuple of (includes, excludes).
1224 """
1224 """
1225 return narrowspec.load(self)
1225 return narrowspec.load(self)
1226
1226
1227 @storecache(narrowspec.FILENAME)
1227 @storecache(narrowspec.FILENAME)
1228 def _storenarrowmatch(self):
1228 def _storenarrowmatch(self):
1229 if repository.NARROW_REQUIREMENT not in self.requirements:
1229 if repository.NARROW_REQUIREMENT not in self.requirements:
1230 return matchmod.always(self.root, '')
1230 return matchmod.always(self.root, '')
1231 include, exclude = self.narrowpats
1231 include, exclude = self.narrowpats
1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1233
1233
1234 @storecache(narrowspec.FILENAME)
1234 @storecache(narrowspec.FILENAME)
1235 def _narrowmatch(self):
1235 def _narrowmatch(self):
1236 if repository.NARROW_REQUIREMENT not in self.requirements:
1236 if repository.NARROW_REQUIREMENT not in self.requirements:
1237 return matchmod.always(self.root, '')
1237 return matchmod.always(self.root, '')
1238 narrowspec.checkworkingcopynarrowspec(self)
1238 narrowspec.checkworkingcopynarrowspec(self)
1239 include, exclude = self.narrowpats
1239 include, exclude = self.narrowpats
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1241
1241
1242 def narrowmatch(self, match=None, includeexact=False):
1242 def narrowmatch(self, match=None, includeexact=False):
1243 """matcher corresponding the the repo's narrowspec
1243 """matcher corresponding the the repo's narrowspec
1244
1244
1245 If `match` is given, then that will be intersected with the narrow
1245 If `match` is given, then that will be intersected with the narrow
1246 matcher.
1246 matcher.
1247
1247
1248 If `includeexact` is True, then any exact matches from `match` will
1248 If `includeexact` is True, then any exact matches from `match` will
1249 be included even if they're outside the narrowspec.
1249 be included even if they're outside the narrowspec.
1250 """
1250 """
1251 if match:
1251 if match:
1252 if includeexact and not self._narrowmatch.always():
1252 if includeexact and not self._narrowmatch.always():
1253 # do not exclude explicitly-specified paths so that they can
1253 # do not exclude explicitly-specified paths so that they can
1254 # be warned later on
1254 # be warned later on
1255 em = matchmod.exact(match._root, match._cwd, match.files())
1255 em = matchmod.exact(match._root, match._cwd, match.files())
1256 nm = matchmod.unionmatcher([self._narrowmatch, em])
1256 nm = matchmod.unionmatcher([self._narrowmatch, em])
1257 return matchmod.intersectmatchers(match, nm)
1257 return matchmod.intersectmatchers(match, nm)
1258 return matchmod.intersectmatchers(match, self._narrowmatch)
1258 return matchmod.intersectmatchers(match, self._narrowmatch)
1259 return self._narrowmatch
1259 return self._narrowmatch
1260
1260
1261 def setnarrowpats(self, newincludes, newexcludes):
1261 def setnarrowpats(self, newincludes, newexcludes):
1262 narrowspec.save(self, newincludes, newexcludes)
1262 narrowspec.save(self, newincludes, newexcludes)
1263 self.invalidate(clearfilecache=True)
1263 self.invalidate(clearfilecache=True)
1264
1264
1265 def __getitem__(self, changeid):
1265 def __getitem__(self, changeid):
1266 if changeid is None:
1266 if changeid is None:
1267 return context.workingctx(self)
1267 return context.workingctx(self)
1268 if isinstance(changeid, context.basectx):
1268 if isinstance(changeid, context.basectx):
1269 return changeid
1269 return changeid
1270 if isinstance(changeid, slice):
1270 if isinstance(changeid, slice):
1271 # wdirrev isn't contiguous so the slice shouldn't include it
1271 # wdirrev isn't contiguous so the slice shouldn't include it
1272 return [self[i]
1272 return [self[i]
1273 for i in pycompat.xrange(*changeid.indices(len(self)))
1273 for i in pycompat.xrange(*changeid.indices(len(self)))
1274 if i not in self.changelog.filteredrevs]
1274 if i not in self.changelog.filteredrevs]
1275 try:
1275 try:
1276 if isinstance(changeid, int):
1276 if isinstance(changeid, int):
1277 node = self.changelog.node(changeid)
1277 node = self.changelog.node(changeid)
1278 rev = changeid
1278 rev = changeid
1279 elif changeid == 'null':
1279 elif changeid == 'null':
1280 node = nullid
1280 node = nullid
1281 rev = nullrev
1281 rev = nullrev
1282 elif changeid == 'tip':
1282 elif changeid == 'tip':
1283 node = self.changelog.tip()
1283 node = self.changelog.tip()
1284 rev = self.changelog.rev(node)
1284 rev = self.changelog.rev(node)
1285 elif changeid == '.':
1285 elif changeid == '.':
1286 # this is a hack to delay/avoid loading obsmarkers
1286 # this is a hack to delay/avoid loading obsmarkers
1287 # when we know that '.' won't be hidden
1287 # when we know that '.' won't be hidden
1288 node = self.dirstate.p1()
1288 node = self.dirstate.p1()
1289 rev = self.unfiltered().changelog.rev(node)
1289 rev = self.unfiltered().changelog.rev(node)
1290 elif len(changeid) == 20:
1290 elif len(changeid) == 20:
1291 try:
1291 try:
1292 node = changeid
1292 node = changeid
1293 rev = self.changelog.rev(changeid)
1293 rev = self.changelog.rev(changeid)
1294 except error.FilteredLookupError:
1294 except error.FilteredLookupError:
1295 changeid = hex(changeid) # for the error message
1295 changeid = hex(changeid) # for the error message
1296 raise
1296 raise
1297 except LookupError:
1297 except LookupError:
1298 # check if it might have come from damaged dirstate
1298 # check if it might have come from damaged dirstate
1299 #
1299 #
1300 # XXX we could avoid the unfiltered if we had a recognizable
1300 # XXX we could avoid the unfiltered if we had a recognizable
1301 # exception for filtered changeset access
1301 # exception for filtered changeset access
1302 if (self.local()
1302 if (self.local()
1303 and changeid in self.unfiltered().dirstate.parents()):
1303 and changeid in self.unfiltered().dirstate.parents()):
1304 msg = _("working directory has unknown parent '%s'!")
1304 msg = _("working directory has unknown parent '%s'!")
1305 raise error.Abort(msg % short(changeid))
1305 raise error.Abort(msg % short(changeid))
1306 changeid = hex(changeid) # for the error message
1306 changeid = hex(changeid) # for the error message
1307 raise
1307 raise
1308
1308
1309 elif len(changeid) == 40:
1309 elif len(changeid) == 40:
1310 node = bin(changeid)
1310 node = bin(changeid)
1311 rev = self.changelog.rev(node)
1311 rev = self.changelog.rev(node)
1312 else:
1312 else:
1313 raise error.ProgrammingError(
1313 raise error.ProgrammingError(
1314 "unsupported changeid '%s' of type %s" %
1314 "unsupported changeid '%s' of type %s" %
1315 (changeid, type(changeid)))
1315 (changeid, type(changeid)))
1316
1316
1317 return context.changectx(self, rev, node)
1317 return context.changectx(self, rev, node)
1318
1318
1319 except (error.FilteredIndexError, error.FilteredLookupError):
1319 except (error.FilteredIndexError, error.FilteredLookupError):
1320 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1320 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1321 % pycompat.bytestr(changeid))
1321 % pycompat.bytestr(changeid))
1322 except (IndexError, LookupError):
1322 except (IndexError, LookupError):
1323 raise error.RepoLookupError(
1323 raise error.RepoLookupError(
1324 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1324 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1325 except error.WdirUnsupported:
1325 except error.WdirUnsupported:
1326 return context.workingctx(self)
1326 return context.workingctx(self)
1327
1327
1328 def __contains__(self, changeid):
1328 def __contains__(self, changeid):
1329 """True if the given changeid exists
1329 """True if the given changeid exists
1330
1330
1331 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1331 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1332 specified.
1332 specified.
1333 """
1333 """
1334 try:
1334 try:
1335 self[changeid]
1335 self[changeid]
1336 return True
1336 return True
1337 except error.RepoLookupError:
1337 except error.RepoLookupError:
1338 return False
1338 return False
1339
1339
1340 def __nonzero__(self):
1340 def __nonzero__(self):
1341 return True
1341 return True
1342
1342
1343 __bool__ = __nonzero__
1343 __bool__ = __nonzero__
1344
1344
1345 def __len__(self):
1345 def __len__(self):
1346 # no need to pay the cost of repoview.changelog
1346 # no need to pay the cost of repoview.changelog
1347 unfi = self.unfiltered()
1347 unfi = self.unfiltered()
1348 return len(unfi.changelog)
1348 return len(unfi.changelog)
1349
1349
1350 def __iter__(self):
1350 def __iter__(self):
1351 return iter(self.changelog)
1351 return iter(self.changelog)
1352
1352
1353 def revs(self, expr, *args):
1353 def revs(self, expr, *args):
1354 '''Find revisions matching a revset.
1354 '''Find revisions matching a revset.
1355
1355
1356 The revset is specified as a string ``expr`` that may contain
1356 The revset is specified as a string ``expr`` that may contain
1357 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1357 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1358
1358
1359 Revset aliases from the configuration are not expanded. To expand
1359 Revset aliases from the configuration are not expanded. To expand
1360 user aliases, consider calling ``scmutil.revrange()`` or
1360 user aliases, consider calling ``scmutil.revrange()`` or
1361 ``repo.anyrevs([expr], user=True)``.
1361 ``repo.anyrevs([expr], user=True)``.
1362
1362
1363 Returns a revset.abstractsmartset, which is a list-like interface
1363 Returns a revset.abstractsmartset, which is a list-like interface
1364 that contains integer revisions.
1364 that contains integer revisions.
1365 '''
1365 '''
1366 tree = revsetlang.spectree(expr, *args)
1366 tree = revsetlang.spectree(expr, *args)
1367 return revset.makematcher(tree)(self)
1367 return revset.makematcher(tree)(self)
1368
1368
1369 def set(self, expr, *args):
1369 def set(self, expr, *args):
1370 '''Find revisions matching a revset and emit changectx instances.
1370 '''Find revisions matching a revset and emit changectx instances.
1371
1371
1372 This is a convenience wrapper around ``revs()`` that iterates the
1372 This is a convenience wrapper around ``revs()`` that iterates the
1373 result and is a generator of changectx instances.
1373 result and is a generator of changectx instances.
1374
1374
1375 Revset aliases from the configuration are not expanded. To expand
1375 Revset aliases from the configuration are not expanded. To expand
1376 user aliases, consider calling ``scmutil.revrange()``.
1376 user aliases, consider calling ``scmutil.revrange()``.
1377 '''
1377 '''
1378 for r in self.revs(expr, *args):
1378 for r in self.revs(expr, *args):
1379 yield self[r]
1379 yield self[r]
1380
1380
1381 def anyrevs(self, specs, user=False, localalias=None):
1381 def anyrevs(self, specs, user=False, localalias=None):
1382 '''Find revisions matching one of the given revsets.
1382 '''Find revisions matching one of the given revsets.
1383
1383
1384 Revset aliases from the configuration are not expanded by default. To
1384 Revset aliases from the configuration are not expanded by default. To
1385 expand user aliases, specify ``user=True``. To provide some local
1385 expand user aliases, specify ``user=True``. To provide some local
1386 definitions overriding user aliases, set ``localalias`` to
1386 definitions overriding user aliases, set ``localalias`` to
1387 ``{name: definitionstring}``.
1387 ``{name: definitionstring}``.
1388 '''
1388 '''
1389 if user:
1389 if user:
1390 m = revset.matchany(self.ui, specs,
1390 m = revset.matchany(self.ui, specs,
1391 lookup=revset.lookupfn(self),
1391 lookup=revset.lookupfn(self),
1392 localalias=localalias)
1392 localalias=localalias)
1393 else:
1393 else:
1394 m = revset.matchany(None, specs, localalias=localalias)
1394 m = revset.matchany(None, specs, localalias=localalias)
1395 return m(self)
1395 return m(self)
1396
1396
1397 def url(self):
1397 def url(self):
1398 return 'file:' + self.root
1398 return 'file:' + self.root
1399
1399
1400 def hook(self, name, throw=False, **args):
1400 def hook(self, name, throw=False, **args):
1401 """Call a hook, passing this repo instance.
1401 """Call a hook, passing this repo instance.
1402
1402
1403 This a convenience method to aid invoking hooks. Extensions likely
1403 This a convenience method to aid invoking hooks. Extensions likely
1404 won't call this unless they have registered a custom hook or are
1404 won't call this unless they have registered a custom hook or are
1405 replacing code that is expected to call a hook.
1405 replacing code that is expected to call a hook.
1406 """
1406 """
1407 return hook.hook(self.ui, self, name, throw, **args)
1407 return hook.hook(self.ui, self, name, throw, **args)
1408
1408
1409 @filteredpropertycache
1409 @filteredpropertycache
1410 def _tagscache(self):
1410 def _tagscache(self):
1411 '''Returns a tagscache object that contains various tags related
1411 '''Returns a tagscache object that contains various tags related
1412 caches.'''
1412 caches.'''
1413
1413
1414 # This simplifies its cache management by having one decorated
1414 # This simplifies its cache management by having one decorated
1415 # function (this one) and the rest simply fetch things from it.
1415 # function (this one) and the rest simply fetch things from it.
1416 class tagscache(object):
1416 class tagscache(object):
1417 def __init__(self):
1417 def __init__(self):
1418 # These two define the set of tags for this repository. tags
1418 # These two define the set of tags for this repository. tags
1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1420 # 'local'. (Global tags are defined by .hgtags across all
1420 # 'local'. (Global tags are defined by .hgtags across all
1421 # heads, and local tags are defined in .hg/localtags.)
1421 # heads, and local tags are defined in .hg/localtags.)
1422 # They constitute the in-memory cache of tags.
1422 # They constitute the in-memory cache of tags.
1423 self.tags = self.tagtypes = None
1423 self.tags = self.tagtypes = None
1424
1424
1425 self.nodetagscache = self.tagslist = None
1425 self.nodetagscache = self.tagslist = None
1426
1426
1427 cache = tagscache()
1427 cache = tagscache()
1428 cache.tags, cache.tagtypes = self._findtags()
1428 cache.tags, cache.tagtypes = self._findtags()
1429
1429
1430 return cache
1430 return cache
1431
1431
1432 def tags(self):
1432 def tags(self):
1433 '''return a mapping of tag to node'''
1433 '''return a mapping of tag to node'''
1434 t = {}
1434 t = {}
1435 if self.changelog.filteredrevs:
1435 if self.changelog.filteredrevs:
1436 tags, tt = self._findtags()
1436 tags, tt = self._findtags()
1437 else:
1437 else:
1438 tags = self._tagscache.tags
1438 tags = self._tagscache.tags
1439 rev = self.changelog.rev
1439 rev = self.changelog.rev
1440 for k, v in tags.iteritems():
1440 for k, v in tags.iteritems():
1441 try:
1441 try:
1442 # ignore tags to unknown nodes
1442 # ignore tags to unknown nodes
1443 rev(v)
1443 rev(v)
1444 t[k] = v
1444 t[k] = v
1445 except (error.LookupError, ValueError):
1445 except (error.LookupError, ValueError):
1446 pass
1446 pass
1447 return t
1447 return t
1448
1448
1449 def _findtags(self):
1449 def _findtags(self):
1450 '''Do the hard work of finding tags. Return a pair of dicts
1450 '''Do the hard work of finding tags. Return a pair of dicts
1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1452 maps tag name to a string like \'global\' or \'local\'.
1452 maps tag name to a string like \'global\' or \'local\'.
1453 Subclasses or extensions are free to add their own tags, but
1453 Subclasses or extensions are free to add their own tags, but
1454 should be aware that the returned dicts will be retained for the
1454 should be aware that the returned dicts will be retained for the
1455 duration of the localrepo object.'''
1455 duration of the localrepo object.'''
1456
1456
1457 # XXX what tagtype should subclasses/extensions use? Currently
1457 # XXX what tagtype should subclasses/extensions use? Currently
1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1459 # Should each extension invent its own tag type? Should there
1459 # Should each extension invent its own tag type? Should there
1460 # be one tagtype for all such "virtual" tags? Or is the status
1460 # be one tagtype for all such "virtual" tags? Or is the status
1461 # quo fine?
1461 # quo fine?
1462
1462
1463
1463
1464 # map tag name to (node, hist)
1464 # map tag name to (node, hist)
1465 alltags = tagsmod.findglobaltags(self.ui, self)
1465 alltags = tagsmod.findglobaltags(self.ui, self)
1466 # map tag name to tag type
1466 # map tag name to tag type
1467 tagtypes = dict((tag, 'global') for tag in alltags)
1467 tagtypes = dict((tag, 'global') for tag in alltags)
1468
1468
1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1470
1470
1471 # Build the return dicts. Have to re-encode tag names because
1471 # Build the return dicts. Have to re-encode tag names because
1472 # the tags module always uses UTF-8 (in order not to lose info
1472 # the tags module always uses UTF-8 (in order not to lose info
1473 # writing to the cache), but the rest of Mercurial wants them in
1473 # writing to the cache), but the rest of Mercurial wants them in
1474 # local encoding.
1474 # local encoding.
1475 tags = {}
1475 tags = {}
1476 for (name, (node, hist)) in alltags.iteritems():
1476 for (name, (node, hist)) in alltags.iteritems():
1477 if node != nullid:
1477 if node != nullid:
1478 tags[encoding.tolocal(name)] = node
1478 tags[encoding.tolocal(name)] = node
1479 tags['tip'] = self.changelog.tip()
1479 tags['tip'] = self.changelog.tip()
1480 tagtypes = dict([(encoding.tolocal(name), value)
1480 tagtypes = dict([(encoding.tolocal(name), value)
1481 for (name, value) in tagtypes.iteritems()])
1481 for (name, value) in tagtypes.iteritems()])
1482 return (tags, tagtypes)
1482 return (tags, tagtypes)
1483
1483
1484 def tagtype(self, tagname):
1484 def tagtype(self, tagname):
1485 '''
1485 '''
1486 return the type of the given tag. result can be:
1486 return the type of the given tag. result can be:
1487
1487
1488 'local' : a local tag
1488 'local' : a local tag
1489 'global' : a global tag
1489 'global' : a global tag
1490 None : tag does not exist
1490 None : tag does not exist
1491 '''
1491 '''
1492
1492
1493 return self._tagscache.tagtypes.get(tagname)
1493 return self._tagscache.tagtypes.get(tagname)
1494
1494
1495 def tagslist(self):
1495 def tagslist(self):
1496 '''return a list of tags ordered by revision'''
1496 '''return a list of tags ordered by revision'''
1497 if not self._tagscache.tagslist:
1497 if not self._tagscache.tagslist:
1498 l = []
1498 l = []
1499 for t, n in self.tags().iteritems():
1499 for t, n in self.tags().iteritems():
1500 l.append((self.changelog.rev(n), t, n))
1500 l.append((self.changelog.rev(n), t, n))
1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1502
1502
1503 return self._tagscache.tagslist
1503 return self._tagscache.tagslist
1504
1504
1505 def nodetags(self, node):
1505 def nodetags(self, node):
1506 '''return the tags associated with a node'''
1506 '''return the tags associated with a node'''
1507 if not self._tagscache.nodetagscache:
1507 if not self._tagscache.nodetagscache:
1508 nodetagscache = {}
1508 nodetagscache = {}
1509 for t, n in self._tagscache.tags.iteritems():
1509 for t, n in self._tagscache.tags.iteritems():
1510 nodetagscache.setdefault(n, []).append(t)
1510 nodetagscache.setdefault(n, []).append(t)
1511 for tags in nodetagscache.itervalues():
1511 for tags in nodetagscache.itervalues():
1512 tags.sort()
1512 tags.sort()
1513 self._tagscache.nodetagscache = nodetagscache
1513 self._tagscache.nodetagscache = nodetagscache
1514 return self._tagscache.nodetagscache.get(node, [])
1514 return self._tagscache.nodetagscache.get(node, [])
1515
1515
1516 def nodebookmarks(self, node):
1516 def nodebookmarks(self, node):
1517 """return the list of bookmarks pointing to the specified node"""
1517 """return the list of bookmarks pointing to the specified node"""
1518 return self._bookmarks.names(node)
1518 return self._bookmarks.names(node)
1519
1519
1520 def branchmap(self):
1520 def branchmap(self):
1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1522 ordered by increasing revision number'''
1522 ordered by increasing revision number'''
1523 branchmap.updatecache(self)
1523 return self._branchcaches[self]
1524 return self._branchcaches[self.filtername]
1525
1524
1526 @unfilteredmethod
1525 @unfilteredmethod
1527 def revbranchcache(self):
1526 def revbranchcache(self):
1528 if not self._revbranchcache:
1527 if not self._revbranchcache:
1529 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1528 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1530 return self._revbranchcache
1529 return self._revbranchcache
1531
1530
1532 def branchtip(self, branch, ignoremissing=False):
1531 def branchtip(self, branch, ignoremissing=False):
1533 '''return the tip node for a given branch
1532 '''return the tip node for a given branch
1534
1533
1535 If ignoremissing is True, then this method will not raise an error.
1534 If ignoremissing is True, then this method will not raise an error.
1536 This is helpful for callers that only expect None for a missing branch
1535 This is helpful for callers that only expect None for a missing branch
1537 (e.g. namespace).
1536 (e.g. namespace).
1538
1537
1539 '''
1538 '''
1540 try:
1539 try:
1541 return self.branchmap().branchtip(branch)
1540 return self.branchmap().branchtip(branch)
1542 except KeyError:
1541 except KeyError:
1543 if not ignoremissing:
1542 if not ignoremissing:
1544 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1543 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1545 else:
1544 else:
1546 pass
1545 pass
1547
1546
1548 def lookup(self, key):
1547 def lookup(self, key):
1549 return scmutil.revsymbol(self, key).node()
1548 return scmutil.revsymbol(self, key).node()
1550
1549
1551 def lookupbranch(self, key):
1550 def lookupbranch(self, key):
1552 if key in self.branchmap():
1551 if key in self.branchmap():
1553 return key
1552 return key
1554
1553
1555 return scmutil.revsymbol(self, key).branch()
1554 return scmutil.revsymbol(self, key).branch()
1556
1555
1557 def known(self, nodes):
1556 def known(self, nodes):
1558 cl = self.changelog
1557 cl = self.changelog
1559 nm = cl.nodemap
1558 nm = cl.nodemap
1560 filtered = cl.filteredrevs
1559 filtered = cl.filteredrevs
1561 result = []
1560 result = []
1562 for n in nodes:
1561 for n in nodes:
1563 r = nm.get(n)
1562 r = nm.get(n)
1564 resp = not (r is None or r in filtered)
1563 resp = not (r is None or r in filtered)
1565 result.append(resp)
1564 result.append(resp)
1566 return result
1565 return result
1567
1566
1568 def local(self):
1567 def local(self):
1569 return self
1568 return self
1570
1569
1571 def publishing(self):
1570 def publishing(self):
1572 # it's safe (and desirable) to trust the publish flag unconditionally
1571 # it's safe (and desirable) to trust the publish flag unconditionally
1573 # so that we don't finalize changes shared between users via ssh or nfs
1572 # so that we don't finalize changes shared between users via ssh or nfs
1574 return self.ui.configbool('phases', 'publish', untrusted=True)
1573 return self.ui.configbool('phases', 'publish', untrusted=True)
1575
1574
1576 def cancopy(self):
1575 def cancopy(self):
1577 # so statichttprepo's override of local() works
1576 # so statichttprepo's override of local() works
1578 if not self.local():
1577 if not self.local():
1579 return False
1578 return False
1580 if not self.publishing():
1579 if not self.publishing():
1581 return True
1580 return True
1582 # if publishing we can't copy if there is filtered content
1581 # if publishing we can't copy if there is filtered content
1583 return not self.filtered('visible').changelog.filteredrevs
1582 return not self.filtered('visible').changelog.filteredrevs
1584
1583
1585 def shared(self):
1584 def shared(self):
1586 '''the type of shared repository (None if not shared)'''
1585 '''the type of shared repository (None if not shared)'''
1587 if self.sharedpath != self.path:
1586 if self.sharedpath != self.path:
1588 return 'store'
1587 return 'store'
1589 return None
1588 return None
1590
1589
1591 def wjoin(self, f, *insidef):
1590 def wjoin(self, f, *insidef):
1592 return self.vfs.reljoin(self.root, f, *insidef)
1591 return self.vfs.reljoin(self.root, f, *insidef)
1593
1592
1594 def setparents(self, p1, p2=nullid):
1593 def setparents(self, p1, p2=nullid):
1595 with self.dirstate.parentchange():
1594 with self.dirstate.parentchange():
1596 copies = self.dirstate.setparents(p1, p2)
1595 copies = self.dirstate.setparents(p1, p2)
1597 pctx = self[p1]
1596 pctx = self[p1]
1598 if copies:
1597 if copies:
1599 # Adjust copy records, the dirstate cannot do it, it
1598 # Adjust copy records, the dirstate cannot do it, it
1600 # requires access to parents manifests. Preserve them
1599 # requires access to parents manifests. Preserve them
1601 # only for entries added to first parent.
1600 # only for entries added to first parent.
1602 for f in copies:
1601 for f in copies:
1603 if f not in pctx and copies[f] in pctx:
1602 if f not in pctx and copies[f] in pctx:
1604 self.dirstate.copy(copies[f], f)
1603 self.dirstate.copy(copies[f], f)
1605 if p2 == nullid:
1604 if p2 == nullid:
1606 for f, s in sorted(self.dirstate.copies().items()):
1605 for f, s in sorted(self.dirstate.copies().items()):
1607 if f not in pctx and s not in pctx:
1606 if f not in pctx and s not in pctx:
1608 self.dirstate.copy(None, f)
1607 self.dirstate.copy(None, f)
1609
1608
1610 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1609 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1611 """changeid must be a changeset revision, if specified.
1610 """changeid must be a changeset revision, if specified.
1612 fileid can be a file revision or node."""
1611 fileid can be a file revision or node."""
1613 return context.filectx(self, path, changeid, fileid,
1612 return context.filectx(self, path, changeid, fileid,
1614 changectx=changectx)
1613 changectx=changectx)
1615
1614
1616 def getcwd(self):
1615 def getcwd(self):
1617 return self.dirstate.getcwd()
1616 return self.dirstate.getcwd()
1618
1617
1619 def pathto(self, f, cwd=None):
1618 def pathto(self, f, cwd=None):
1620 return self.dirstate.pathto(f, cwd)
1619 return self.dirstate.pathto(f, cwd)
1621
1620
1622 def _loadfilter(self, filter):
1621 def _loadfilter(self, filter):
1623 if filter not in self._filterpats:
1622 if filter not in self._filterpats:
1624 l = []
1623 l = []
1625 for pat, cmd in self.ui.configitems(filter):
1624 for pat, cmd in self.ui.configitems(filter):
1626 if cmd == '!':
1625 if cmd == '!':
1627 continue
1626 continue
1628 mf = matchmod.match(self.root, '', [pat])
1627 mf = matchmod.match(self.root, '', [pat])
1629 fn = None
1628 fn = None
1630 params = cmd
1629 params = cmd
1631 for name, filterfn in self._datafilters.iteritems():
1630 for name, filterfn in self._datafilters.iteritems():
1632 if cmd.startswith(name):
1631 if cmd.startswith(name):
1633 fn = filterfn
1632 fn = filterfn
1634 params = cmd[len(name):].lstrip()
1633 params = cmd[len(name):].lstrip()
1635 break
1634 break
1636 if not fn:
1635 if not fn:
1637 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1636 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1638 # Wrap old filters not supporting keyword arguments
1637 # Wrap old filters not supporting keyword arguments
1639 if not pycompat.getargspec(fn)[2]:
1638 if not pycompat.getargspec(fn)[2]:
1640 oldfn = fn
1639 oldfn = fn
1641 fn = lambda s, c, **kwargs: oldfn(s, c)
1640 fn = lambda s, c, **kwargs: oldfn(s, c)
1642 l.append((mf, fn, params))
1641 l.append((mf, fn, params))
1643 self._filterpats[filter] = l
1642 self._filterpats[filter] = l
1644 return self._filterpats[filter]
1643 return self._filterpats[filter]
1645
1644
1646 def _filter(self, filterpats, filename, data):
1645 def _filter(self, filterpats, filename, data):
1647 for mf, fn, cmd in filterpats:
1646 for mf, fn, cmd in filterpats:
1648 if mf(filename):
1647 if mf(filename):
1649 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1648 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1650 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1649 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1651 break
1650 break
1652
1651
1653 return data
1652 return data
1654
1653
1655 @unfilteredpropertycache
1654 @unfilteredpropertycache
1656 def _encodefilterpats(self):
1655 def _encodefilterpats(self):
1657 return self._loadfilter('encode')
1656 return self._loadfilter('encode')
1658
1657
1659 @unfilteredpropertycache
1658 @unfilteredpropertycache
1660 def _decodefilterpats(self):
1659 def _decodefilterpats(self):
1661 return self._loadfilter('decode')
1660 return self._loadfilter('decode')
1662
1661
1663 def adddatafilter(self, name, filter):
1662 def adddatafilter(self, name, filter):
1664 self._datafilters[name] = filter
1663 self._datafilters[name] = filter
1665
1664
1666 def wread(self, filename):
1665 def wread(self, filename):
1667 if self.wvfs.islink(filename):
1666 if self.wvfs.islink(filename):
1668 data = self.wvfs.readlink(filename)
1667 data = self.wvfs.readlink(filename)
1669 else:
1668 else:
1670 data = self.wvfs.read(filename)
1669 data = self.wvfs.read(filename)
1671 return self._filter(self._encodefilterpats, filename, data)
1670 return self._filter(self._encodefilterpats, filename, data)
1672
1671
1673 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1672 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1674 """write ``data`` into ``filename`` in the working directory
1673 """write ``data`` into ``filename`` in the working directory
1675
1674
1676 This returns length of written (maybe decoded) data.
1675 This returns length of written (maybe decoded) data.
1677 """
1676 """
1678 data = self._filter(self._decodefilterpats, filename, data)
1677 data = self._filter(self._decodefilterpats, filename, data)
1679 if 'l' in flags:
1678 if 'l' in flags:
1680 self.wvfs.symlink(data, filename)
1679 self.wvfs.symlink(data, filename)
1681 else:
1680 else:
1682 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1681 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1683 **kwargs)
1682 **kwargs)
1684 if 'x' in flags:
1683 if 'x' in flags:
1685 self.wvfs.setflags(filename, False, True)
1684 self.wvfs.setflags(filename, False, True)
1686 else:
1685 else:
1687 self.wvfs.setflags(filename, False, False)
1686 self.wvfs.setflags(filename, False, False)
1688 return len(data)
1687 return len(data)
1689
1688
1690 def wwritedata(self, filename, data):
1689 def wwritedata(self, filename, data):
1691 return self._filter(self._decodefilterpats, filename, data)
1690 return self._filter(self._decodefilterpats, filename, data)
1692
1691
1693 def currenttransaction(self):
1692 def currenttransaction(self):
1694 """return the current transaction or None if non exists"""
1693 """return the current transaction or None if non exists"""
1695 if self._transref:
1694 if self._transref:
1696 tr = self._transref()
1695 tr = self._transref()
1697 else:
1696 else:
1698 tr = None
1697 tr = None
1699
1698
1700 if tr and tr.running():
1699 if tr and tr.running():
1701 return tr
1700 return tr
1702 return None
1701 return None
1703
1702
1704 def transaction(self, desc, report=None):
1703 def transaction(self, desc, report=None):
1705 if (self.ui.configbool('devel', 'all-warnings')
1704 if (self.ui.configbool('devel', 'all-warnings')
1706 or self.ui.configbool('devel', 'check-locks')):
1705 or self.ui.configbool('devel', 'check-locks')):
1707 if self._currentlock(self._lockref) is None:
1706 if self._currentlock(self._lockref) is None:
1708 raise error.ProgrammingError('transaction requires locking')
1707 raise error.ProgrammingError('transaction requires locking')
1709 tr = self.currenttransaction()
1708 tr = self.currenttransaction()
1710 if tr is not None:
1709 if tr is not None:
1711 return tr.nest(name=desc)
1710 return tr.nest(name=desc)
1712
1711
1713 # abort here if the journal already exists
1712 # abort here if the journal already exists
1714 if self.svfs.exists("journal"):
1713 if self.svfs.exists("journal"):
1715 raise error.RepoError(
1714 raise error.RepoError(
1716 _("abandoned transaction found"),
1715 _("abandoned transaction found"),
1717 hint=_("run 'hg recover' to clean up transaction"))
1716 hint=_("run 'hg recover' to clean up transaction"))
1718
1717
1719 idbase = "%.40f#%f" % (random.random(), time.time())
1718 idbase = "%.40f#%f" % (random.random(), time.time())
1720 ha = hex(hashlib.sha1(idbase).digest())
1719 ha = hex(hashlib.sha1(idbase).digest())
1721 txnid = 'TXN:' + ha
1720 txnid = 'TXN:' + ha
1722 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1721 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1723
1722
1724 self._writejournal(desc)
1723 self._writejournal(desc)
1725 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1724 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1726 if report:
1725 if report:
1727 rp = report
1726 rp = report
1728 else:
1727 else:
1729 rp = self.ui.warn
1728 rp = self.ui.warn
1730 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1729 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1731 # we must avoid cyclic reference between repo and transaction.
1730 # we must avoid cyclic reference between repo and transaction.
1732 reporef = weakref.ref(self)
1731 reporef = weakref.ref(self)
1733 # Code to track tag movement
1732 # Code to track tag movement
1734 #
1733 #
1735 # Since tags are all handled as file content, it is actually quite hard
1734 # Since tags are all handled as file content, it is actually quite hard
1736 # to track these movement from a code perspective. So we fallback to a
1735 # to track these movement from a code perspective. So we fallback to a
1737 # tracking at the repository level. One could envision to track changes
1736 # tracking at the repository level. One could envision to track changes
1738 # to the '.hgtags' file through changegroup apply but that fails to
1737 # to the '.hgtags' file through changegroup apply but that fails to
1739 # cope with case where transaction expose new heads without changegroup
1738 # cope with case where transaction expose new heads without changegroup
1740 # being involved (eg: phase movement).
1739 # being involved (eg: phase movement).
1741 #
1740 #
1742 # For now, We gate the feature behind a flag since this likely comes
1741 # For now, We gate the feature behind a flag since this likely comes
1743 # with performance impacts. The current code run more often than needed
1742 # with performance impacts. The current code run more often than needed
1744 # and do not use caches as much as it could. The current focus is on
1743 # and do not use caches as much as it could. The current focus is on
1745 # the behavior of the feature so we disable it by default. The flag
1744 # the behavior of the feature so we disable it by default. The flag
1746 # will be removed when we are happy with the performance impact.
1745 # will be removed when we are happy with the performance impact.
1747 #
1746 #
1748 # Once this feature is no longer experimental move the following
1747 # Once this feature is no longer experimental move the following
1749 # documentation to the appropriate help section:
1748 # documentation to the appropriate help section:
1750 #
1749 #
1751 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1750 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1752 # tags (new or changed or deleted tags). In addition the details of
1751 # tags (new or changed or deleted tags). In addition the details of
1753 # these changes are made available in a file at:
1752 # these changes are made available in a file at:
1754 # ``REPOROOT/.hg/changes/tags.changes``.
1753 # ``REPOROOT/.hg/changes/tags.changes``.
1755 # Make sure you check for HG_TAG_MOVED before reading that file as it
1754 # Make sure you check for HG_TAG_MOVED before reading that file as it
1756 # might exist from a previous transaction even if no tag were touched
1755 # might exist from a previous transaction even if no tag were touched
1757 # in this one. Changes are recorded in a line base format::
1756 # in this one. Changes are recorded in a line base format::
1758 #
1757 #
1759 # <action> <hex-node> <tag-name>\n
1758 # <action> <hex-node> <tag-name>\n
1760 #
1759 #
1761 # Actions are defined as follow:
1760 # Actions are defined as follow:
1762 # "-R": tag is removed,
1761 # "-R": tag is removed,
1763 # "+A": tag is added,
1762 # "+A": tag is added,
1764 # "-M": tag is moved (old value),
1763 # "-M": tag is moved (old value),
1765 # "+M": tag is moved (new value),
1764 # "+M": tag is moved (new value),
1766 tracktags = lambda x: None
1765 tracktags = lambda x: None
1767 # experimental config: experimental.hook-track-tags
1766 # experimental config: experimental.hook-track-tags
1768 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1767 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1769 if desc != 'strip' and shouldtracktags:
1768 if desc != 'strip' and shouldtracktags:
1770 oldheads = self.changelog.headrevs()
1769 oldheads = self.changelog.headrevs()
1771 def tracktags(tr2):
1770 def tracktags(tr2):
1772 repo = reporef()
1771 repo = reporef()
1773 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1772 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1774 newheads = repo.changelog.headrevs()
1773 newheads = repo.changelog.headrevs()
1775 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1774 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1776 # notes: we compare lists here.
1775 # notes: we compare lists here.
1777 # As we do it only once buiding set would not be cheaper
1776 # As we do it only once buiding set would not be cheaper
1778 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1777 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1779 if changes:
1778 if changes:
1780 tr2.hookargs['tag_moved'] = '1'
1779 tr2.hookargs['tag_moved'] = '1'
1781 with repo.vfs('changes/tags.changes', 'w',
1780 with repo.vfs('changes/tags.changes', 'w',
1782 atomictemp=True) as changesfile:
1781 atomictemp=True) as changesfile:
1783 # note: we do not register the file to the transaction
1782 # note: we do not register the file to the transaction
1784 # because we needs it to still exist on the transaction
1783 # because we needs it to still exist on the transaction
1785 # is close (for txnclose hooks)
1784 # is close (for txnclose hooks)
1786 tagsmod.writediff(changesfile, changes)
1785 tagsmod.writediff(changesfile, changes)
1787 def validate(tr2):
1786 def validate(tr2):
1788 """will run pre-closing hooks"""
1787 """will run pre-closing hooks"""
1789 # XXX the transaction API is a bit lacking here so we take a hacky
1788 # XXX the transaction API is a bit lacking here so we take a hacky
1790 # path for now
1789 # path for now
1791 #
1790 #
1792 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1791 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1793 # dict is copied before these run. In addition we needs the data
1792 # dict is copied before these run. In addition we needs the data
1794 # available to in memory hooks too.
1793 # available to in memory hooks too.
1795 #
1794 #
1796 # Moreover, we also need to make sure this runs before txnclose
1795 # Moreover, we also need to make sure this runs before txnclose
1797 # hooks and there is no "pending" mechanism that would execute
1796 # hooks and there is no "pending" mechanism that would execute
1798 # logic only if hooks are about to run.
1797 # logic only if hooks are about to run.
1799 #
1798 #
1800 # Fixing this limitation of the transaction is also needed to track
1799 # Fixing this limitation of the transaction is also needed to track
1801 # other families of changes (bookmarks, phases, obsolescence).
1800 # other families of changes (bookmarks, phases, obsolescence).
1802 #
1801 #
1803 # This will have to be fixed before we remove the experimental
1802 # This will have to be fixed before we remove the experimental
1804 # gating.
1803 # gating.
1805 tracktags(tr2)
1804 tracktags(tr2)
1806 repo = reporef()
1805 repo = reporef()
1807 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1806 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1808 scmutil.enforcesinglehead(repo, tr2, desc)
1807 scmutil.enforcesinglehead(repo, tr2, desc)
1809 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1808 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1810 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1809 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1811 args = tr.hookargs.copy()
1810 args = tr.hookargs.copy()
1812 args.update(bookmarks.preparehookargs(name, old, new))
1811 args.update(bookmarks.preparehookargs(name, old, new))
1813 repo.hook('pretxnclose-bookmark', throw=True,
1812 repo.hook('pretxnclose-bookmark', throw=True,
1814 txnname=desc,
1813 txnname=desc,
1815 **pycompat.strkwargs(args))
1814 **pycompat.strkwargs(args))
1816 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1815 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1817 cl = repo.unfiltered().changelog
1816 cl = repo.unfiltered().changelog
1818 for rev, (old, new) in tr.changes['phases'].items():
1817 for rev, (old, new) in tr.changes['phases'].items():
1819 args = tr.hookargs.copy()
1818 args = tr.hookargs.copy()
1820 node = hex(cl.node(rev))
1819 node = hex(cl.node(rev))
1821 args.update(phases.preparehookargs(node, old, new))
1820 args.update(phases.preparehookargs(node, old, new))
1822 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1821 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1823 **pycompat.strkwargs(args))
1822 **pycompat.strkwargs(args))
1824
1823
1825 repo.hook('pretxnclose', throw=True,
1824 repo.hook('pretxnclose', throw=True,
1826 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1825 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1827 def releasefn(tr, success):
1826 def releasefn(tr, success):
1828 repo = reporef()
1827 repo = reporef()
1829 if success:
1828 if success:
1830 # this should be explicitly invoked here, because
1829 # this should be explicitly invoked here, because
1831 # in-memory changes aren't written out at closing
1830 # in-memory changes aren't written out at closing
1832 # transaction, if tr.addfilegenerator (via
1831 # transaction, if tr.addfilegenerator (via
1833 # dirstate.write or so) isn't invoked while
1832 # dirstate.write or so) isn't invoked while
1834 # transaction running
1833 # transaction running
1835 repo.dirstate.write(None)
1834 repo.dirstate.write(None)
1836 else:
1835 else:
1837 # discard all changes (including ones already written
1836 # discard all changes (including ones already written
1838 # out) in this transaction
1837 # out) in this transaction
1839 narrowspec.restorebackup(self, 'journal.narrowspec')
1838 narrowspec.restorebackup(self, 'journal.narrowspec')
1840 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1839 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1841 repo.dirstate.restorebackup(None, 'journal.dirstate')
1840 repo.dirstate.restorebackup(None, 'journal.dirstate')
1842
1841
1843 repo.invalidate(clearfilecache=True)
1842 repo.invalidate(clearfilecache=True)
1844
1843
1845 tr = transaction.transaction(rp, self.svfs, vfsmap,
1844 tr = transaction.transaction(rp, self.svfs, vfsmap,
1846 "journal",
1845 "journal",
1847 "undo",
1846 "undo",
1848 aftertrans(renames),
1847 aftertrans(renames),
1849 self.store.createmode,
1848 self.store.createmode,
1850 validator=validate,
1849 validator=validate,
1851 releasefn=releasefn,
1850 releasefn=releasefn,
1852 checkambigfiles=_cachedfiles,
1851 checkambigfiles=_cachedfiles,
1853 name=desc)
1852 name=desc)
1854 tr.changes['origrepolen'] = len(self)
1853 tr.changes['origrepolen'] = len(self)
1855 tr.changes['obsmarkers'] = set()
1854 tr.changes['obsmarkers'] = set()
1856 tr.changes['phases'] = {}
1855 tr.changes['phases'] = {}
1857 tr.changes['bookmarks'] = {}
1856 tr.changes['bookmarks'] = {}
1858
1857
1859 tr.hookargs['txnid'] = txnid
1858 tr.hookargs['txnid'] = txnid
1860 # note: writing the fncache only during finalize mean that the file is
1859 # note: writing the fncache only during finalize mean that the file is
1861 # outdated when running hooks. As fncache is used for streaming clone,
1860 # outdated when running hooks. As fncache is used for streaming clone,
1862 # this is not expected to break anything that happen during the hooks.
1861 # this is not expected to break anything that happen during the hooks.
1863 tr.addfinalize('flush-fncache', self.store.write)
1862 tr.addfinalize('flush-fncache', self.store.write)
1864 def txnclosehook(tr2):
1863 def txnclosehook(tr2):
1865 """To be run if transaction is successful, will schedule a hook run
1864 """To be run if transaction is successful, will schedule a hook run
1866 """
1865 """
1867 # Don't reference tr2 in hook() so we don't hold a reference.
1866 # Don't reference tr2 in hook() so we don't hold a reference.
1868 # This reduces memory consumption when there are multiple
1867 # This reduces memory consumption when there are multiple
1869 # transactions per lock. This can likely go away if issue5045
1868 # transactions per lock. This can likely go away if issue5045
1870 # fixes the function accumulation.
1869 # fixes the function accumulation.
1871 hookargs = tr2.hookargs
1870 hookargs = tr2.hookargs
1872
1871
1873 def hookfunc():
1872 def hookfunc():
1874 repo = reporef()
1873 repo = reporef()
1875 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1874 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1876 bmchanges = sorted(tr.changes['bookmarks'].items())
1875 bmchanges = sorted(tr.changes['bookmarks'].items())
1877 for name, (old, new) in bmchanges:
1876 for name, (old, new) in bmchanges:
1878 args = tr.hookargs.copy()
1877 args = tr.hookargs.copy()
1879 args.update(bookmarks.preparehookargs(name, old, new))
1878 args.update(bookmarks.preparehookargs(name, old, new))
1880 repo.hook('txnclose-bookmark', throw=False,
1879 repo.hook('txnclose-bookmark', throw=False,
1881 txnname=desc, **pycompat.strkwargs(args))
1880 txnname=desc, **pycompat.strkwargs(args))
1882
1881
1883 if hook.hashook(repo.ui, 'txnclose-phase'):
1882 if hook.hashook(repo.ui, 'txnclose-phase'):
1884 cl = repo.unfiltered().changelog
1883 cl = repo.unfiltered().changelog
1885 phasemv = sorted(tr.changes['phases'].items())
1884 phasemv = sorted(tr.changes['phases'].items())
1886 for rev, (old, new) in phasemv:
1885 for rev, (old, new) in phasemv:
1887 args = tr.hookargs.copy()
1886 args = tr.hookargs.copy()
1888 node = hex(cl.node(rev))
1887 node = hex(cl.node(rev))
1889 args.update(phases.preparehookargs(node, old, new))
1888 args.update(phases.preparehookargs(node, old, new))
1890 repo.hook('txnclose-phase', throw=False, txnname=desc,
1889 repo.hook('txnclose-phase', throw=False, txnname=desc,
1891 **pycompat.strkwargs(args))
1890 **pycompat.strkwargs(args))
1892
1891
1893 repo.hook('txnclose', throw=False, txnname=desc,
1892 repo.hook('txnclose', throw=False, txnname=desc,
1894 **pycompat.strkwargs(hookargs))
1893 **pycompat.strkwargs(hookargs))
1895 reporef()._afterlock(hookfunc)
1894 reporef()._afterlock(hookfunc)
1896 tr.addfinalize('txnclose-hook', txnclosehook)
1895 tr.addfinalize('txnclose-hook', txnclosehook)
1897 # Include a leading "-" to make it happen before the transaction summary
1896 # Include a leading "-" to make it happen before the transaction summary
1898 # reports registered via scmutil.registersummarycallback() whose names
1897 # reports registered via scmutil.registersummarycallback() whose names
1899 # are 00-txnreport etc. That way, the caches will be warm when the
1898 # are 00-txnreport etc. That way, the caches will be warm when the
1900 # callbacks run.
1899 # callbacks run.
1901 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1900 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1902 def txnaborthook(tr2):
1901 def txnaborthook(tr2):
1903 """To be run if transaction is aborted
1902 """To be run if transaction is aborted
1904 """
1903 """
1905 reporef().hook('txnabort', throw=False, txnname=desc,
1904 reporef().hook('txnabort', throw=False, txnname=desc,
1906 **pycompat.strkwargs(tr2.hookargs))
1905 **pycompat.strkwargs(tr2.hookargs))
1907 tr.addabort('txnabort-hook', txnaborthook)
1906 tr.addabort('txnabort-hook', txnaborthook)
1908 # avoid eager cache invalidation. in-memory data should be identical
1907 # avoid eager cache invalidation. in-memory data should be identical
1909 # to stored data if transaction has no error.
1908 # to stored data if transaction has no error.
1910 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1909 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1911 self._transref = weakref.ref(tr)
1910 self._transref = weakref.ref(tr)
1912 scmutil.registersummarycallback(self, tr, desc)
1911 scmutil.registersummarycallback(self, tr, desc)
1913 return tr
1912 return tr
1914
1913
1915 def _journalfiles(self):
1914 def _journalfiles(self):
1916 return ((self.svfs, 'journal'),
1915 return ((self.svfs, 'journal'),
1917 (self.svfs, 'journal.narrowspec'),
1916 (self.svfs, 'journal.narrowspec'),
1918 (self.vfs, 'journal.narrowspec.dirstate'),
1917 (self.vfs, 'journal.narrowspec.dirstate'),
1919 (self.vfs, 'journal.dirstate'),
1918 (self.vfs, 'journal.dirstate'),
1920 (self.vfs, 'journal.branch'),
1919 (self.vfs, 'journal.branch'),
1921 (self.vfs, 'journal.desc'),
1920 (self.vfs, 'journal.desc'),
1922 (self.vfs, 'journal.bookmarks'),
1921 (self.vfs, 'journal.bookmarks'),
1923 (self.svfs, 'journal.phaseroots'))
1922 (self.svfs, 'journal.phaseroots'))
1924
1923
1925 def undofiles(self):
1924 def undofiles(self):
1926 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1925 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1927
1926
1928 @unfilteredmethod
1927 @unfilteredmethod
1929 def _writejournal(self, desc):
1928 def _writejournal(self, desc):
1930 self.dirstate.savebackup(None, 'journal.dirstate')
1929 self.dirstate.savebackup(None, 'journal.dirstate')
1931 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1930 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1932 narrowspec.savebackup(self, 'journal.narrowspec')
1931 narrowspec.savebackup(self, 'journal.narrowspec')
1933 self.vfs.write("journal.branch",
1932 self.vfs.write("journal.branch",
1934 encoding.fromlocal(self.dirstate.branch()))
1933 encoding.fromlocal(self.dirstate.branch()))
1935 self.vfs.write("journal.desc",
1934 self.vfs.write("journal.desc",
1936 "%d\n%s\n" % (len(self), desc))
1935 "%d\n%s\n" % (len(self), desc))
1937 self.vfs.write("journal.bookmarks",
1936 self.vfs.write("journal.bookmarks",
1938 self.vfs.tryread("bookmarks"))
1937 self.vfs.tryread("bookmarks"))
1939 self.svfs.write("journal.phaseroots",
1938 self.svfs.write("journal.phaseroots",
1940 self.svfs.tryread("phaseroots"))
1939 self.svfs.tryread("phaseroots"))
1941
1940
1942 def recover(self):
1941 def recover(self):
1943 with self.lock():
1942 with self.lock():
1944 if self.svfs.exists("journal"):
1943 if self.svfs.exists("journal"):
1945 self.ui.status(_("rolling back interrupted transaction\n"))
1944 self.ui.status(_("rolling back interrupted transaction\n"))
1946 vfsmap = {'': self.svfs,
1945 vfsmap = {'': self.svfs,
1947 'plain': self.vfs,}
1946 'plain': self.vfs,}
1948 transaction.rollback(self.svfs, vfsmap, "journal",
1947 transaction.rollback(self.svfs, vfsmap, "journal",
1949 self.ui.warn,
1948 self.ui.warn,
1950 checkambigfiles=_cachedfiles)
1949 checkambigfiles=_cachedfiles)
1951 self.invalidate()
1950 self.invalidate()
1952 return True
1951 return True
1953 else:
1952 else:
1954 self.ui.warn(_("no interrupted transaction available\n"))
1953 self.ui.warn(_("no interrupted transaction available\n"))
1955 return False
1954 return False
1956
1955
1957 def rollback(self, dryrun=False, force=False):
1956 def rollback(self, dryrun=False, force=False):
1958 wlock = lock = dsguard = None
1957 wlock = lock = dsguard = None
1959 try:
1958 try:
1960 wlock = self.wlock()
1959 wlock = self.wlock()
1961 lock = self.lock()
1960 lock = self.lock()
1962 if self.svfs.exists("undo"):
1961 if self.svfs.exists("undo"):
1963 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1962 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1964
1963
1965 return self._rollback(dryrun, force, dsguard)
1964 return self._rollback(dryrun, force, dsguard)
1966 else:
1965 else:
1967 self.ui.warn(_("no rollback information available\n"))
1966 self.ui.warn(_("no rollback information available\n"))
1968 return 1
1967 return 1
1969 finally:
1968 finally:
1970 release(dsguard, lock, wlock)
1969 release(dsguard, lock, wlock)
1971
1970
1972 @unfilteredmethod # Until we get smarter cache management
1971 @unfilteredmethod # Until we get smarter cache management
1973 def _rollback(self, dryrun, force, dsguard):
1972 def _rollback(self, dryrun, force, dsguard):
1974 ui = self.ui
1973 ui = self.ui
1975 try:
1974 try:
1976 args = self.vfs.read('undo.desc').splitlines()
1975 args = self.vfs.read('undo.desc').splitlines()
1977 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1976 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1978 if len(args) >= 3:
1977 if len(args) >= 3:
1979 detail = args[2]
1978 detail = args[2]
1980 oldtip = oldlen - 1
1979 oldtip = oldlen - 1
1981
1980
1982 if detail and ui.verbose:
1981 if detail and ui.verbose:
1983 msg = (_('repository tip rolled back to revision %d'
1982 msg = (_('repository tip rolled back to revision %d'
1984 ' (undo %s: %s)\n')
1983 ' (undo %s: %s)\n')
1985 % (oldtip, desc, detail))
1984 % (oldtip, desc, detail))
1986 else:
1985 else:
1987 msg = (_('repository tip rolled back to revision %d'
1986 msg = (_('repository tip rolled back to revision %d'
1988 ' (undo %s)\n')
1987 ' (undo %s)\n')
1989 % (oldtip, desc))
1988 % (oldtip, desc))
1990 except IOError:
1989 except IOError:
1991 msg = _('rolling back unknown transaction\n')
1990 msg = _('rolling back unknown transaction\n')
1992 desc = None
1991 desc = None
1993
1992
1994 if not force and self['.'] != self['tip'] and desc == 'commit':
1993 if not force and self['.'] != self['tip'] and desc == 'commit':
1995 raise error.Abort(
1994 raise error.Abort(
1996 _('rollback of last commit while not checked out '
1995 _('rollback of last commit while not checked out '
1997 'may lose data'), hint=_('use -f to force'))
1996 'may lose data'), hint=_('use -f to force'))
1998
1997
1999 ui.status(msg)
1998 ui.status(msg)
2000 if dryrun:
1999 if dryrun:
2001 return 0
2000 return 0
2002
2001
2003 parents = self.dirstate.parents()
2002 parents = self.dirstate.parents()
2004 self.destroying()
2003 self.destroying()
2005 vfsmap = {'plain': self.vfs, '': self.svfs}
2004 vfsmap = {'plain': self.vfs, '': self.svfs}
2006 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2005 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2007 checkambigfiles=_cachedfiles)
2006 checkambigfiles=_cachedfiles)
2008 if self.vfs.exists('undo.bookmarks'):
2007 if self.vfs.exists('undo.bookmarks'):
2009 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2008 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2010 if self.svfs.exists('undo.phaseroots'):
2009 if self.svfs.exists('undo.phaseroots'):
2011 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2010 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2012 self.invalidate()
2011 self.invalidate()
2013
2012
2014 parentgone = any(p not in self.changelog.nodemap for p in parents)
2013 parentgone = any(p not in self.changelog.nodemap for p in parents)
2015 if parentgone:
2014 if parentgone:
2016 # prevent dirstateguard from overwriting already restored one
2015 # prevent dirstateguard from overwriting already restored one
2017 dsguard.close()
2016 dsguard.close()
2018
2017
2019 narrowspec.restorebackup(self, 'undo.narrowspec')
2018 narrowspec.restorebackup(self, 'undo.narrowspec')
2020 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2019 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2021 self.dirstate.restorebackup(None, 'undo.dirstate')
2020 self.dirstate.restorebackup(None, 'undo.dirstate')
2022 try:
2021 try:
2023 branch = self.vfs.read('undo.branch')
2022 branch = self.vfs.read('undo.branch')
2024 self.dirstate.setbranch(encoding.tolocal(branch))
2023 self.dirstate.setbranch(encoding.tolocal(branch))
2025 except IOError:
2024 except IOError:
2026 ui.warn(_('named branch could not be reset: '
2025 ui.warn(_('named branch could not be reset: '
2027 'current branch is still \'%s\'\n')
2026 'current branch is still \'%s\'\n')
2028 % self.dirstate.branch())
2027 % self.dirstate.branch())
2029
2028
2030 parents = tuple([p.rev() for p in self[None].parents()])
2029 parents = tuple([p.rev() for p in self[None].parents()])
2031 if len(parents) > 1:
2030 if len(parents) > 1:
2032 ui.status(_('working directory now based on '
2031 ui.status(_('working directory now based on '
2033 'revisions %d and %d\n') % parents)
2032 'revisions %d and %d\n') % parents)
2034 else:
2033 else:
2035 ui.status(_('working directory now based on '
2034 ui.status(_('working directory now based on '
2036 'revision %d\n') % parents)
2035 'revision %d\n') % parents)
2037 mergemod.mergestate.clean(self, self['.'].node())
2036 mergemod.mergestate.clean(self, self['.'].node())
2038
2037
2039 # TODO: if we know which new heads may result from this rollback, pass
2038 # TODO: if we know which new heads may result from this rollback, pass
2040 # them to destroy(), which will prevent the branchhead cache from being
2039 # them to destroy(), which will prevent the branchhead cache from being
2041 # invalidated.
2040 # invalidated.
2042 self.destroyed()
2041 self.destroyed()
2043 return 0
2042 return 0
2044
2043
2045 def _buildcacheupdater(self, newtransaction):
2044 def _buildcacheupdater(self, newtransaction):
2046 """called during transaction to build the callback updating cache
2045 """called during transaction to build the callback updating cache
2047
2046
2048 Lives on the repository to help extension who might want to augment
2047 Lives on the repository to help extension who might want to augment
2049 this logic. For this purpose, the created transaction is passed to the
2048 this logic. For this purpose, the created transaction is passed to the
2050 method.
2049 method.
2051 """
2050 """
2052 # we must avoid cyclic reference between repo and transaction.
2051 # we must avoid cyclic reference between repo and transaction.
2053 reporef = weakref.ref(self)
2052 reporef = weakref.ref(self)
2054 def updater(tr):
2053 def updater(tr):
2055 repo = reporef()
2054 repo = reporef()
2056 repo.updatecaches(tr)
2055 repo.updatecaches(tr)
2057 return updater
2056 return updater
2058
2057
2059 @unfilteredmethod
2058 @unfilteredmethod
2060 def updatecaches(self, tr=None, full=False):
2059 def updatecaches(self, tr=None, full=False):
2061 """warm appropriate caches
2060 """warm appropriate caches
2062
2061
2063 If this function is called after a transaction closed. The transaction
2062 If this function is called after a transaction closed. The transaction
2064 will be available in the 'tr' argument. This can be used to selectively
2063 will be available in the 'tr' argument. This can be used to selectively
2065 update caches relevant to the changes in that transaction.
2064 update caches relevant to the changes in that transaction.
2066
2065
2067 If 'full' is set, make sure all caches the function knows about have
2066 If 'full' is set, make sure all caches the function knows about have
2068 up-to-date data. Even the ones usually loaded more lazily.
2067 up-to-date data. Even the ones usually loaded more lazily.
2069 """
2068 """
2070 if tr is not None and tr.hookargs.get('source') == 'strip':
2069 if tr is not None and tr.hookargs.get('source') == 'strip':
2071 # During strip, many caches are invalid but
2070 # During strip, many caches are invalid but
2072 # later call to `destroyed` will refresh them.
2071 # later call to `destroyed` will refresh them.
2073 return
2072 return
2074
2073
2075 if tr is None or tr.changes['origrepolen'] < len(self):
2074 if tr is None or tr.changes['origrepolen'] < len(self):
2076 # updating the unfiltered branchmap should refresh all the others,
2075 # accessing the 'ser ved' branchmap should refresh all the others,
2077 self.ui.debug('updating the branch cache\n')
2076 self.ui.debug('updating the branch cache\n')
2078 branchmap.updatecache(self.filtered('served'))
2077 self.filtered('served').branchmap()
2079
2078
2080 if full:
2079 if full:
2081 rbc = self.revbranchcache()
2080 rbc = self.revbranchcache()
2082 for r in self.changelog:
2081 for r in self.changelog:
2083 rbc.branchinfo(r)
2082 rbc.branchinfo(r)
2084 rbc.write()
2083 rbc.write()
2085
2084
2086 # ensure the working copy parents are in the manifestfulltextcache
2085 # ensure the working copy parents are in the manifestfulltextcache
2087 for ctx in self['.'].parents():
2086 for ctx in self['.'].parents():
2088 ctx.manifest() # accessing the manifest is enough
2087 ctx.manifest() # accessing the manifest is enough
2089
2088
2090 def invalidatecaches(self):
2089 def invalidatecaches(self):
2091
2090
2092 if r'_tagscache' in vars(self):
2091 if r'_tagscache' in vars(self):
2093 # can't use delattr on proxy
2092 # can't use delattr on proxy
2094 del self.__dict__[r'_tagscache']
2093 del self.__dict__[r'_tagscache']
2095
2094
2096 self.unfiltered()._branchcaches.clear()
2095 self._branchcaches.clear()
2097 self.invalidatevolatilesets()
2096 self.invalidatevolatilesets()
2098 self._sparsesignaturecache.clear()
2097 self._sparsesignaturecache.clear()
2099
2098
2100 def invalidatevolatilesets(self):
2099 def invalidatevolatilesets(self):
2101 self.filteredrevcache.clear()
2100 self.filteredrevcache.clear()
2102 obsolete.clearobscaches(self)
2101 obsolete.clearobscaches(self)
2103
2102
2104 def invalidatedirstate(self):
2103 def invalidatedirstate(self):
2105 '''Invalidates the dirstate, causing the next call to dirstate
2104 '''Invalidates the dirstate, causing the next call to dirstate
2106 to check if it was modified since the last time it was read,
2105 to check if it was modified since the last time it was read,
2107 rereading it if it has.
2106 rereading it if it has.
2108
2107
2109 This is different to dirstate.invalidate() that it doesn't always
2108 This is different to dirstate.invalidate() that it doesn't always
2110 rereads the dirstate. Use dirstate.invalidate() if you want to
2109 rereads the dirstate. Use dirstate.invalidate() if you want to
2111 explicitly read the dirstate again (i.e. restoring it to a previous
2110 explicitly read the dirstate again (i.e. restoring it to a previous
2112 known good state).'''
2111 known good state).'''
2113 if hasunfilteredcache(self, r'dirstate'):
2112 if hasunfilteredcache(self, r'dirstate'):
2114 for k in self.dirstate._filecache:
2113 for k in self.dirstate._filecache:
2115 try:
2114 try:
2116 delattr(self.dirstate, k)
2115 delattr(self.dirstate, k)
2117 except AttributeError:
2116 except AttributeError:
2118 pass
2117 pass
2119 delattr(self.unfiltered(), r'dirstate')
2118 delattr(self.unfiltered(), r'dirstate')
2120
2119
2121 def invalidate(self, clearfilecache=False):
2120 def invalidate(self, clearfilecache=False):
2122 '''Invalidates both store and non-store parts other than dirstate
2121 '''Invalidates both store and non-store parts other than dirstate
2123
2122
2124 If a transaction is running, invalidation of store is omitted,
2123 If a transaction is running, invalidation of store is omitted,
2125 because discarding in-memory changes might cause inconsistency
2124 because discarding in-memory changes might cause inconsistency
2126 (e.g. incomplete fncache causes unintentional failure, but
2125 (e.g. incomplete fncache causes unintentional failure, but
2127 redundant one doesn't).
2126 redundant one doesn't).
2128 '''
2127 '''
2129 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2128 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2130 for k in list(self._filecache.keys()):
2129 for k in list(self._filecache.keys()):
2131 # dirstate is invalidated separately in invalidatedirstate()
2130 # dirstate is invalidated separately in invalidatedirstate()
2132 if k == 'dirstate':
2131 if k == 'dirstate':
2133 continue
2132 continue
2134 if (k == 'changelog' and
2133 if (k == 'changelog' and
2135 self.currenttransaction() and
2134 self.currenttransaction() and
2136 self.changelog._delayed):
2135 self.changelog._delayed):
2137 # The changelog object may store unwritten revisions. We don't
2136 # The changelog object may store unwritten revisions. We don't
2138 # want to lose them.
2137 # want to lose them.
2139 # TODO: Solve the problem instead of working around it.
2138 # TODO: Solve the problem instead of working around it.
2140 continue
2139 continue
2141
2140
2142 if clearfilecache:
2141 if clearfilecache:
2143 del self._filecache[k]
2142 del self._filecache[k]
2144 try:
2143 try:
2145 delattr(unfiltered, k)
2144 delattr(unfiltered, k)
2146 except AttributeError:
2145 except AttributeError:
2147 pass
2146 pass
2148 self.invalidatecaches()
2147 self.invalidatecaches()
2149 if not self.currenttransaction():
2148 if not self.currenttransaction():
2150 # TODO: Changing contents of store outside transaction
2149 # TODO: Changing contents of store outside transaction
2151 # causes inconsistency. We should make in-memory store
2150 # causes inconsistency. We should make in-memory store
2152 # changes detectable, and abort if changed.
2151 # changes detectable, and abort if changed.
2153 self.store.invalidatecaches()
2152 self.store.invalidatecaches()
2154
2153
2155 def invalidateall(self):
2154 def invalidateall(self):
2156 '''Fully invalidates both store and non-store parts, causing the
2155 '''Fully invalidates both store and non-store parts, causing the
2157 subsequent operation to reread any outside changes.'''
2156 subsequent operation to reread any outside changes.'''
2158 # extension should hook this to invalidate its caches
2157 # extension should hook this to invalidate its caches
2159 self.invalidate()
2158 self.invalidate()
2160 self.invalidatedirstate()
2159 self.invalidatedirstate()
2161
2160
2162 @unfilteredmethod
2161 @unfilteredmethod
2163 def _refreshfilecachestats(self, tr):
2162 def _refreshfilecachestats(self, tr):
2164 """Reload stats of cached files so that they are flagged as valid"""
2163 """Reload stats of cached files so that they are flagged as valid"""
2165 for k, ce in self._filecache.items():
2164 for k, ce in self._filecache.items():
2166 k = pycompat.sysstr(k)
2165 k = pycompat.sysstr(k)
2167 if k == r'dirstate' or k not in self.__dict__:
2166 if k == r'dirstate' or k not in self.__dict__:
2168 continue
2167 continue
2169 ce.refresh()
2168 ce.refresh()
2170
2169
2171 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2170 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2172 inheritchecker=None, parentenvvar=None):
2171 inheritchecker=None, parentenvvar=None):
2173 parentlock = None
2172 parentlock = None
2174 # the contents of parentenvvar are used by the underlying lock to
2173 # the contents of parentenvvar are used by the underlying lock to
2175 # determine whether it can be inherited
2174 # determine whether it can be inherited
2176 if parentenvvar is not None:
2175 if parentenvvar is not None:
2177 parentlock = encoding.environ.get(parentenvvar)
2176 parentlock = encoding.environ.get(parentenvvar)
2178
2177
2179 timeout = 0
2178 timeout = 0
2180 warntimeout = 0
2179 warntimeout = 0
2181 if wait:
2180 if wait:
2182 timeout = self.ui.configint("ui", "timeout")
2181 timeout = self.ui.configint("ui", "timeout")
2183 warntimeout = self.ui.configint("ui", "timeout.warn")
2182 warntimeout = self.ui.configint("ui", "timeout.warn")
2184 # internal config: ui.signal-safe-lock
2183 # internal config: ui.signal-safe-lock
2185 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2184 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2186
2185
2187 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2186 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2188 releasefn=releasefn,
2187 releasefn=releasefn,
2189 acquirefn=acquirefn, desc=desc,
2188 acquirefn=acquirefn, desc=desc,
2190 inheritchecker=inheritchecker,
2189 inheritchecker=inheritchecker,
2191 parentlock=parentlock,
2190 parentlock=parentlock,
2192 signalsafe=signalsafe)
2191 signalsafe=signalsafe)
2193 return l
2192 return l
2194
2193
2195 def _afterlock(self, callback):
2194 def _afterlock(self, callback):
2196 """add a callback to be run when the repository is fully unlocked
2195 """add a callback to be run when the repository is fully unlocked
2197
2196
2198 The callback will be executed when the outermost lock is released
2197 The callback will be executed when the outermost lock is released
2199 (with wlock being higher level than 'lock')."""
2198 (with wlock being higher level than 'lock')."""
2200 for ref in (self._wlockref, self._lockref):
2199 for ref in (self._wlockref, self._lockref):
2201 l = ref and ref()
2200 l = ref and ref()
2202 if l and l.held:
2201 if l and l.held:
2203 l.postrelease.append(callback)
2202 l.postrelease.append(callback)
2204 break
2203 break
2205 else: # no lock have been found.
2204 else: # no lock have been found.
2206 callback()
2205 callback()
2207
2206
2208 def lock(self, wait=True):
2207 def lock(self, wait=True):
2209 '''Lock the repository store (.hg/store) and return a weak reference
2208 '''Lock the repository store (.hg/store) and return a weak reference
2210 to the lock. Use this before modifying the store (e.g. committing or
2209 to the lock. Use this before modifying the store (e.g. committing or
2211 stripping). If you are opening a transaction, get a lock as well.)
2210 stripping). If you are opening a transaction, get a lock as well.)
2212
2211
2213 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2212 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2214 'wlock' first to avoid a dead-lock hazard.'''
2213 'wlock' first to avoid a dead-lock hazard.'''
2215 l = self._currentlock(self._lockref)
2214 l = self._currentlock(self._lockref)
2216 if l is not None:
2215 if l is not None:
2217 l.lock()
2216 l.lock()
2218 return l
2217 return l
2219
2218
2220 l = self._lock(self.svfs, "lock", wait, None,
2219 l = self._lock(self.svfs, "lock", wait, None,
2221 self.invalidate, _('repository %s') % self.origroot)
2220 self.invalidate, _('repository %s') % self.origroot)
2222 self._lockref = weakref.ref(l)
2221 self._lockref = weakref.ref(l)
2223 return l
2222 return l
2224
2223
2225 def _wlockchecktransaction(self):
2224 def _wlockchecktransaction(self):
2226 if self.currenttransaction() is not None:
2225 if self.currenttransaction() is not None:
2227 raise error.LockInheritanceContractViolation(
2226 raise error.LockInheritanceContractViolation(
2228 'wlock cannot be inherited in the middle of a transaction')
2227 'wlock cannot be inherited in the middle of a transaction')
2229
2228
2230 def wlock(self, wait=True):
2229 def wlock(self, wait=True):
2231 '''Lock the non-store parts of the repository (everything under
2230 '''Lock the non-store parts of the repository (everything under
2232 .hg except .hg/store) and return a weak reference to the lock.
2231 .hg except .hg/store) and return a weak reference to the lock.
2233
2232
2234 Use this before modifying files in .hg.
2233 Use this before modifying files in .hg.
2235
2234
2236 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2237 'wlock' first to avoid a dead-lock hazard.'''
2236 'wlock' first to avoid a dead-lock hazard.'''
2238 l = self._wlockref and self._wlockref()
2237 l = self._wlockref and self._wlockref()
2239 if l is not None and l.held:
2238 if l is not None and l.held:
2240 l.lock()
2239 l.lock()
2241 return l
2240 return l
2242
2241
2243 # We do not need to check for non-waiting lock acquisition. Such
2242 # We do not need to check for non-waiting lock acquisition. Such
2244 # acquisition would not cause dead-lock as they would just fail.
2243 # acquisition would not cause dead-lock as they would just fail.
2245 if wait and (self.ui.configbool('devel', 'all-warnings')
2244 if wait and (self.ui.configbool('devel', 'all-warnings')
2246 or self.ui.configbool('devel', 'check-locks')):
2245 or self.ui.configbool('devel', 'check-locks')):
2247 if self._currentlock(self._lockref) is not None:
2246 if self._currentlock(self._lockref) is not None:
2248 self.ui.develwarn('"wlock" acquired after "lock"')
2247 self.ui.develwarn('"wlock" acquired after "lock"')
2249
2248
2250 def unlock():
2249 def unlock():
2251 if self.dirstate.pendingparentchange():
2250 if self.dirstate.pendingparentchange():
2252 self.dirstate.invalidate()
2251 self.dirstate.invalidate()
2253 else:
2252 else:
2254 self.dirstate.write(None)
2253 self.dirstate.write(None)
2255
2254
2256 self._filecache['dirstate'].refresh()
2255 self._filecache['dirstate'].refresh()
2257
2256
2258 l = self._lock(self.vfs, "wlock", wait, unlock,
2257 l = self._lock(self.vfs, "wlock", wait, unlock,
2259 self.invalidatedirstate, _('working directory of %s') %
2258 self.invalidatedirstate, _('working directory of %s') %
2260 self.origroot,
2259 self.origroot,
2261 inheritchecker=self._wlockchecktransaction,
2260 inheritchecker=self._wlockchecktransaction,
2262 parentenvvar='HG_WLOCK_LOCKER')
2261 parentenvvar='HG_WLOCK_LOCKER')
2263 self._wlockref = weakref.ref(l)
2262 self._wlockref = weakref.ref(l)
2264 return l
2263 return l
2265
2264
2266 def _currentlock(self, lockref):
2265 def _currentlock(self, lockref):
2267 """Returns the lock if it's held, or None if it's not."""
2266 """Returns the lock if it's held, or None if it's not."""
2268 if lockref is None:
2267 if lockref is None:
2269 return None
2268 return None
2270 l = lockref()
2269 l = lockref()
2271 if l is None or not l.held:
2270 if l is None or not l.held:
2272 return None
2271 return None
2273 return l
2272 return l
2274
2273
2275 def currentwlock(self):
2274 def currentwlock(self):
2276 """Returns the wlock if it's held, or None if it's not."""
2275 """Returns the wlock if it's held, or None if it's not."""
2277 return self._currentlock(self._wlockref)
2276 return self._currentlock(self._wlockref)
2278
2277
2279 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2278 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2280 """
2279 """
2281 commit an individual file as part of a larger transaction
2280 commit an individual file as part of a larger transaction
2282 """
2281 """
2283
2282
2284 fname = fctx.path()
2283 fname = fctx.path()
2285 fparent1 = manifest1.get(fname, nullid)
2284 fparent1 = manifest1.get(fname, nullid)
2286 fparent2 = manifest2.get(fname, nullid)
2285 fparent2 = manifest2.get(fname, nullid)
2287 if isinstance(fctx, context.filectx):
2286 if isinstance(fctx, context.filectx):
2288 node = fctx.filenode()
2287 node = fctx.filenode()
2289 if node in [fparent1, fparent2]:
2288 if node in [fparent1, fparent2]:
2290 self.ui.debug('reusing %s filelog entry\n' % fname)
2289 self.ui.debug('reusing %s filelog entry\n' % fname)
2291 if manifest1.flags(fname) != fctx.flags():
2290 if manifest1.flags(fname) != fctx.flags():
2292 changelist.append(fname)
2291 changelist.append(fname)
2293 return node
2292 return node
2294
2293
2295 flog = self.file(fname)
2294 flog = self.file(fname)
2296 meta = {}
2295 meta = {}
2297 copy = fctx.renamed()
2296 copy = fctx.renamed()
2298 if copy and copy[0] != fname:
2297 if copy and copy[0] != fname:
2299 # Mark the new revision of this file as a copy of another
2298 # Mark the new revision of this file as a copy of another
2300 # file. This copy data will effectively act as a parent
2299 # file. This copy data will effectively act as a parent
2301 # of this new revision. If this is a merge, the first
2300 # of this new revision. If this is a merge, the first
2302 # parent will be the nullid (meaning "look up the copy data")
2301 # parent will be the nullid (meaning "look up the copy data")
2303 # and the second one will be the other parent. For example:
2302 # and the second one will be the other parent. For example:
2304 #
2303 #
2305 # 0 --- 1 --- 3 rev1 changes file foo
2304 # 0 --- 1 --- 3 rev1 changes file foo
2306 # \ / rev2 renames foo to bar and changes it
2305 # \ / rev2 renames foo to bar and changes it
2307 # \- 2 -/ rev3 should have bar with all changes and
2306 # \- 2 -/ rev3 should have bar with all changes and
2308 # should record that bar descends from
2307 # should record that bar descends from
2309 # bar in rev2 and foo in rev1
2308 # bar in rev2 and foo in rev1
2310 #
2309 #
2311 # this allows this merge to succeed:
2310 # this allows this merge to succeed:
2312 #
2311 #
2313 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2312 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2314 # \ / merging rev3 and rev4 should use bar@rev2
2313 # \ / merging rev3 and rev4 should use bar@rev2
2315 # \- 2 --- 4 as the merge base
2314 # \- 2 --- 4 as the merge base
2316 #
2315 #
2317
2316
2318 cfname = copy[0]
2317 cfname = copy[0]
2319 crev = manifest1.get(cfname)
2318 crev = manifest1.get(cfname)
2320 newfparent = fparent2
2319 newfparent = fparent2
2321
2320
2322 if manifest2: # branch merge
2321 if manifest2: # branch merge
2323 if fparent2 == nullid or crev is None: # copied on remote side
2322 if fparent2 == nullid or crev is None: # copied on remote side
2324 if cfname in manifest2:
2323 if cfname in manifest2:
2325 crev = manifest2[cfname]
2324 crev = manifest2[cfname]
2326 newfparent = fparent1
2325 newfparent = fparent1
2327
2326
2328 # Here, we used to search backwards through history to try to find
2327 # Here, we used to search backwards through history to try to find
2329 # where the file copy came from if the source of a copy was not in
2328 # where the file copy came from if the source of a copy was not in
2330 # the parent directory. However, this doesn't actually make sense to
2329 # the parent directory. However, this doesn't actually make sense to
2331 # do (what does a copy from something not in your working copy even
2330 # do (what does a copy from something not in your working copy even
2332 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2331 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2333 # the user that copy information was dropped, so if they didn't
2332 # the user that copy information was dropped, so if they didn't
2334 # expect this outcome it can be fixed, but this is the correct
2333 # expect this outcome it can be fixed, but this is the correct
2335 # behavior in this circumstance.
2334 # behavior in this circumstance.
2336
2335
2337 if crev:
2336 if crev:
2338 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2337 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2339 meta["copy"] = cfname
2338 meta["copy"] = cfname
2340 meta["copyrev"] = hex(crev)
2339 meta["copyrev"] = hex(crev)
2341 fparent1, fparent2 = nullid, newfparent
2340 fparent1, fparent2 = nullid, newfparent
2342 else:
2341 else:
2343 self.ui.warn(_("warning: can't find ancestor for '%s' "
2342 self.ui.warn(_("warning: can't find ancestor for '%s' "
2344 "copied from '%s'!\n") % (fname, cfname))
2343 "copied from '%s'!\n") % (fname, cfname))
2345
2344
2346 elif fparent1 == nullid:
2345 elif fparent1 == nullid:
2347 fparent1, fparent2 = fparent2, nullid
2346 fparent1, fparent2 = fparent2, nullid
2348 elif fparent2 != nullid:
2347 elif fparent2 != nullid:
2349 # is one parent an ancestor of the other?
2348 # is one parent an ancestor of the other?
2350 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2349 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2351 if fparent1 in fparentancestors:
2350 if fparent1 in fparentancestors:
2352 fparent1, fparent2 = fparent2, nullid
2351 fparent1, fparent2 = fparent2, nullid
2353 elif fparent2 in fparentancestors:
2352 elif fparent2 in fparentancestors:
2354 fparent2 = nullid
2353 fparent2 = nullid
2355
2354
2356 # is the file changed?
2355 # is the file changed?
2357 text = fctx.data()
2356 text = fctx.data()
2358 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2357 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2359 changelist.append(fname)
2358 changelist.append(fname)
2360 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2359 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2361 # are just the flags changed during merge?
2360 # are just the flags changed during merge?
2362 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2361 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2363 changelist.append(fname)
2362 changelist.append(fname)
2364
2363
2365 return fparent1
2364 return fparent1
2366
2365
2367 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2366 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2368 """check for commit arguments that aren't committable"""
2367 """check for commit arguments that aren't committable"""
2369 if match.isexact() or match.prefix():
2368 if match.isexact() or match.prefix():
2370 matched = set(status.modified + status.added + status.removed)
2369 matched = set(status.modified + status.added + status.removed)
2371
2370
2372 for f in match.files():
2371 for f in match.files():
2373 f = self.dirstate.normalize(f)
2372 f = self.dirstate.normalize(f)
2374 if f == '.' or f in matched or f in wctx.substate:
2373 if f == '.' or f in matched or f in wctx.substate:
2375 continue
2374 continue
2376 if f in status.deleted:
2375 if f in status.deleted:
2377 fail(f, _('file not found!'))
2376 fail(f, _('file not found!'))
2378 if f in vdirs: # visited directory
2377 if f in vdirs: # visited directory
2379 d = f + '/'
2378 d = f + '/'
2380 for mf in matched:
2379 for mf in matched:
2381 if mf.startswith(d):
2380 if mf.startswith(d):
2382 break
2381 break
2383 else:
2382 else:
2384 fail(f, _("no match under directory!"))
2383 fail(f, _("no match under directory!"))
2385 elif f not in self.dirstate:
2384 elif f not in self.dirstate:
2386 fail(f, _("file not tracked!"))
2385 fail(f, _("file not tracked!"))
2387
2386
2388 @unfilteredmethod
2387 @unfilteredmethod
2389 def commit(self, text="", user=None, date=None, match=None, force=False,
2388 def commit(self, text="", user=None, date=None, match=None, force=False,
2390 editor=False, extra=None):
2389 editor=False, extra=None):
2391 """Add a new revision to current repository.
2390 """Add a new revision to current repository.
2392
2391
2393 Revision information is gathered from the working directory,
2392 Revision information is gathered from the working directory,
2394 match can be used to filter the committed files. If editor is
2393 match can be used to filter the committed files. If editor is
2395 supplied, it is called to get a commit message.
2394 supplied, it is called to get a commit message.
2396 """
2395 """
2397 if extra is None:
2396 if extra is None:
2398 extra = {}
2397 extra = {}
2399
2398
2400 def fail(f, msg):
2399 def fail(f, msg):
2401 raise error.Abort('%s: %s' % (f, msg))
2400 raise error.Abort('%s: %s' % (f, msg))
2402
2401
2403 if not match:
2402 if not match:
2404 match = matchmod.always(self.root, '')
2403 match = matchmod.always(self.root, '')
2405
2404
2406 if not force:
2405 if not force:
2407 vdirs = []
2406 vdirs = []
2408 match.explicitdir = vdirs.append
2407 match.explicitdir = vdirs.append
2409 match.bad = fail
2408 match.bad = fail
2410
2409
2411 # lock() for recent changelog (see issue4368)
2410 # lock() for recent changelog (see issue4368)
2412 with self.wlock(), self.lock():
2411 with self.wlock(), self.lock():
2413 wctx = self[None]
2412 wctx = self[None]
2414 merge = len(wctx.parents()) > 1
2413 merge = len(wctx.parents()) > 1
2415
2414
2416 if not force and merge and not match.always():
2415 if not force and merge and not match.always():
2417 raise error.Abort(_('cannot partially commit a merge '
2416 raise error.Abort(_('cannot partially commit a merge '
2418 '(do not specify files or patterns)'))
2417 '(do not specify files or patterns)'))
2419
2418
2420 status = self.status(match=match, clean=force)
2419 status = self.status(match=match, clean=force)
2421 if force:
2420 if force:
2422 status.modified.extend(status.clean) # mq may commit clean files
2421 status.modified.extend(status.clean) # mq may commit clean files
2423
2422
2424 # check subrepos
2423 # check subrepos
2425 subs, commitsubs, newstate = subrepoutil.precommit(
2424 subs, commitsubs, newstate = subrepoutil.precommit(
2426 self.ui, wctx, status, match, force=force)
2425 self.ui, wctx, status, match, force=force)
2427
2426
2428 # make sure all explicit patterns are matched
2427 # make sure all explicit patterns are matched
2429 if not force:
2428 if not force:
2430 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2429 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2431
2430
2432 cctx = context.workingcommitctx(self, status,
2431 cctx = context.workingcommitctx(self, status,
2433 text, user, date, extra)
2432 text, user, date, extra)
2434
2433
2435 # internal config: ui.allowemptycommit
2434 # internal config: ui.allowemptycommit
2436 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2435 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2437 or extra.get('close') or merge or cctx.files()
2436 or extra.get('close') or merge or cctx.files()
2438 or self.ui.configbool('ui', 'allowemptycommit'))
2437 or self.ui.configbool('ui', 'allowemptycommit'))
2439 if not allowemptycommit:
2438 if not allowemptycommit:
2440 return None
2439 return None
2441
2440
2442 if merge and cctx.deleted():
2441 if merge and cctx.deleted():
2443 raise error.Abort(_("cannot commit merge with missing files"))
2442 raise error.Abort(_("cannot commit merge with missing files"))
2444
2443
2445 ms = mergemod.mergestate.read(self)
2444 ms = mergemod.mergestate.read(self)
2446 mergeutil.checkunresolved(ms)
2445 mergeutil.checkunresolved(ms)
2447
2446
2448 if editor:
2447 if editor:
2449 cctx._text = editor(self, cctx, subs)
2448 cctx._text = editor(self, cctx, subs)
2450 edited = (text != cctx._text)
2449 edited = (text != cctx._text)
2451
2450
2452 # Save commit message in case this transaction gets rolled back
2451 # Save commit message in case this transaction gets rolled back
2453 # (e.g. by a pretxncommit hook). Leave the content alone on
2452 # (e.g. by a pretxncommit hook). Leave the content alone on
2454 # the assumption that the user will use the same editor again.
2453 # the assumption that the user will use the same editor again.
2455 msgfn = self.savecommitmessage(cctx._text)
2454 msgfn = self.savecommitmessage(cctx._text)
2456
2455
2457 # commit subs and write new state
2456 # commit subs and write new state
2458 if subs:
2457 if subs:
2459 for s in sorted(commitsubs):
2458 for s in sorted(commitsubs):
2460 sub = wctx.sub(s)
2459 sub = wctx.sub(s)
2461 self.ui.status(_('committing subrepository %s\n') %
2460 self.ui.status(_('committing subrepository %s\n') %
2462 subrepoutil.subrelpath(sub))
2461 subrepoutil.subrelpath(sub))
2463 sr = sub.commit(cctx._text, user, date)
2462 sr = sub.commit(cctx._text, user, date)
2464 newstate[s] = (newstate[s][0], sr)
2463 newstate[s] = (newstate[s][0], sr)
2465 subrepoutil.writestate(self, newstate)
2464 subrepoutil.writestate(self, newstate)
2466
2465
2467 p1, p2 = self.dirstate.parents()
2466 p1, p2 = self.dirstate.parents()
2468 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2467 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2469 try:
2468 try:
2470 self.hook("precommit", throw=True, parent1=hookp1,
2469 self.hook("precommit", throw=True, parent1=hookp1,
2471 parent2=hookp2)
2470 parent2=hookp2)
2472 with self.transaction('commit'):
2471 with self.transaction('commit'):
2473 ret = self.commitctx(cctx, True)
2472 ret = self.commitctx(cctx, True)
2474 # update bookmarks, dirstate and mergestate
2473 # update bookmarks, dirstate and mergestate
2475 bookmarks.update(self, [p1, p2], ret)
2474 bookmarks.update(self, [p1, p2], ret)
2476 cctx.markcommitted(ret)
2475 cctx.markcommitted(ret)
2477 ms.reset()
2476 ms.reset()
2478 except: # re-raises
2477 except: # re-raises
2479 if edited:
2478 if edited:
2480 self.ui.write(
2479 self.ui.write(
2481 _('note: commit message saved in %s\n') % msgfn)
2480 _('note: commit message saved in %s\n') % msgfn)
2482 raise
2481 raise
2483
2482
2484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2483 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2485 # hack for command that use a temporary commit (eg: histedit)
2484 # hack for command that use a temporary commit (eg: histedit)
2486 # temporary commit got stripped before hook release
2485 # temporary commit got stripped before hook release
2487 if self.changelog.hasnode(ret):
2486 if self.changelog.hasnode(ret):
2488 self.hook("commit", node=node, parent1=parent1,
2487 self.hook("commit", node=node, parent1=parent1,
2489 parent2=parent2)
2488 parent2=parent2)
2490 self._afterlock(commithook)
2489 self._afterlock(commithook)
2491 return ret
2490 return ret
2492
2491
2493 @unfilteredmethod
2492 @unfilteredmethod
2494 def commitctx(self, ctx, error=False):
2493 def commitctx(self, ctx, error=False):
2495 """Add a new revision to current repository.
2494 """Add a new revision to current repository.
2496 Revision information is passed via the context argument.
2495 Revision information is passed via the context argument.
2497
2496
2498 ctx.files() should list all files involved in this commit, i.e.
2497 ctx.files() should list all files involved in this commit, i.e.
2499 modified/added/removed files. On merge, it may be wider than the
2498 modified/added/removed files. On merge, it may be wider than the
2500 ctx.files() to be committed, since any file nodes derived directly
2499 ctx.files() to be committed, since any file nodes derived directly
2501 from p1 or p2 are excluded from the committed ctx.files().
2500 from p1 or p2 are excluded from the committed ctx.files().
2502 """
2501 """
2503
2502
2504 p1, p2 = ctx.p1(), ctx.p2()
2503 p1, p2 = ctx.p1(), ctx.p2()
2505 user = ctx.user()
2504 user = ctx.user()
2506
2505
2507 with self.lock(), self.transaction("commit") as tr:
2506 with self.lock(), self.transaction("commit") as tr:
2508 trp = weakref.proxy(tr)
2507 trp = weakref.proxy(tr)
2509
2508
2510 if ctx.manifestnode():
2509 if ctx.manifestnode():
2511 # reuse an existing manifest revision
2510 # reuse an existing manifest revision
2512 self.ui.debug('reusing known manifest\n')
2511 self.ui.debug('reusing known manifest\n')
2513 mn = ctx.manifestnode()
2512 mn = ctx.manifestnode()
2514 files = ctx.files()
2513 files = ctx.files()
2515 elif ctx.files():
2514 elif ctx.files():
2516 m1ctx = p1.manifestctx()
2515 m1ctx = p1.manifestctx()
2517 m2ctx = p2.manifestctx()
2516 m2ctx = p2.manifestctx()
2518 mctx = m1ctx.copy()
2517 mctx = m1ctx.copy()
2519
2518
2520 m = mctx.read()
2519 m = mctx.read()
2521 m1 = m1ctx.read()
2520 m1 = m1ctx.read()
2522 m2 = m2ctx.read()
2521 m2 = m2ctx.read()
2523
2522
2524 # check in files
2523 # check in files
2525 added = []
2524 added = []
2526 changed = []
2525 changed = []
2527 removed = list(ctx.removed())
2526 removed = list(ctx.removed())
2528 linkrev = len(self)
2527 linkrev = len(self)
2529 self.ui.note(_("committing files:\n"))
2528 self.ui.note(_("committing files:\n"))
2530 for f in sorted(ctx.modified() + ctx.added()):
2529 for f in sorted(ctx.modified() + ctx.added()):
2531 self.ui.note(f + "\n")
2530 self.ui.note(f + "\n")
2532 try:
2531 try:
2533 fctx = ctx[f]
2532 fctx = ctx[f]
2534 if fctx is None:
2533 if fctx is None:
2535 removed.append(f)
2534 removed.append(f)
2536 else:
2535 else:
2537 added.append(f)
2536 added.append(f)
2538 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2537 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2539 trp, changed)
2538 trp, changed)
2540 m.setflag(f, fctx.flags())
2539 m.setflag(f, fctx.flags())
2541 except OSError:
2540 except OSError:
2542 self.ui.warn(_("trouble committing %s!\n") % f)
2541 self.ui.warn(_("trouble committing %s!\n") % f)
2543 raise
2542 raise
2544 except IOError as inst:
2543 except IOError as inst:
2545 errcode = getattr(inst, 'errno', errno.ENOENT)
2544 errcode = getattr(inst, 'errno', errno.ENOENT)
2546 if error or errcode and errcode != errno.ENOENT:
2545 if error or errcode and errcode != errno.ENOENT:
2547 self.ui.warn(_("trouble committing %s!\n") % f)
2546 self.ui.warn(_("trouble committing %s!\n") % f)
2548 raise
2547 raise
2549
2548
2550 # update manifest
2549 # update manifest
2551 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2550 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2552 drop = [f for f in removed if f in m]
2551 drop = [f for f in removed if f in m]
2553 for f in drop:
2552 for f in drop:
2554 del m[f]
2553 del m[f]
2555 files = changed + removed
2554 files = changed + removed
2556 md = None
2555 md = None
2557 if not files:
2556 if not files:
2558 # if no "files" actually changed in terms of the changelog,
2557 # if no "files" actually changed in terms of the changelog,
2559 # try hard to detect unmodified manifest entry so that the
2558 # try hard to detect unmodified manifest entry so that the
2560 # exact same commit can be reproduced later on convert.
2559 # exact same commit can be reproduced later on convert.
2561 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2560 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2562 if not files and md:
2561 if not files and md:
2563 self.ui.debug('not reusing manifest (no file change in '
2562 self.ui.debug('not reusing manifest (no file change in '
2564 'changelog, but manifest differs)\n')
2563 'changelog, but manifest differs)\n')
2565 if files or md:
2564 if files or md:
2566 self.ui.note(_("committing manifest\n"))
2565 self.ui.note(_("committing manifest\n"))
2567 # we're using narrowmatch here since it's already applied at
2566 # we're using narrowmatch here since it's already applied at
2568 # other stages (such as dirstate.walk), so we're already
2567 # other stages (such as dirstate.walk), so we're already
2569 # ignoring things outside of narrowspec in most cases. The
2568 # ignoring things outside of narrowspec in most cases. The
2570 # one case where we might have files outside the narrowspec
2569 # one case where we might have files outside the narrowspec
2571 # at this point is merges, and we already error out in the
2570 # at this point is merges, and we already error out in the
2572 # case where the merge has files outside of the narrowspec,
2571 # case where the merge has files outside of the narrowspec,
2573 # so this is safe.
2572 # so this is safe.
2574 mn = mctx.write(trp, linkrev,
2573 mn = mctx.write(trp, linkrev,
2575 p1.manifestnode(), p2.manifestnode(),
2574 p1.manifestnode(), p2.manifestnode(),
2576 added, drop, match=self.narrowmatch())
2575 added, drop, match=self.narrowmatch())
2577 else:
2576 else:
2578 self.ui.debug('reusing manifest form p1 (listed files '
2577 self.ui.debug('reusing manifest form p1 (listed files '
2579 'actually unchanged)\n')
2578 'actually unchanged)\n')
2580 mn = p1.manifestnode()
2579 mn = p1.manifestnode()
2581 else:
2580 else:
2582 self.ui.debug('reusing manifest from p1 (no file change)\n')
2581 self.ui.debug('reusing manifest from p1 (no file change)\n')
2583 mn = p1.manifestnode()
2582 mn = p1.manifestnode()
2584 files = []
2583 files = []
2585
2584
2586 # update changelog
2585 # update changelog
2587 self.ui.note(_("committing changelog\n"))
2586 self.ui.note(_("committing changelog\n"))
2588 self.changelog.delayupdate(tr)
2587 self.changelog.delayupdate(tr)
2589 n = self.changelog.add(mn, files, ctx.description(),
2588 n = self.changelog.add(mn, files, ctx.description(),
2590 trp, p1.node(), p2.node(),
2589 trp, p1.node(), p2.node(),
2591 user, ctx.date(), ctx.extra().copy())
2590 user, ctx.date(), ctx.extra().copy())
2592 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2591 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2593 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2592 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2594 parent2=xp2)
2593 parent2=xp2)
2595 # set the new commit is proper phase
2594 # set the new commit is proper phase
2596 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2595 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2597 if targetphase:
2596 if targetphase:
2598 # retract boundary do not alter parent changeset.
2597 # retract boundary do not alter parent changeset.
2599 # if a parent have higher the resulting phase will
2598 # if a parent have higher the resulting phase will
2600 # be compliant anyway
2599 # be compliant anyway
2601 #
2600 #
2602 # if minimal phase was 0 we don't need to retract anything
2601 # if minimal phase was 0 we don't need to retract anything
2603 phases.registernew(self, tr, targetphase, [n])
2602 phases.registernew(self, tr, targetphase, [n])
2604 return n
2603 return n
2605
2604
2606 @unfilteredmethod
2605 @unfilteredmethod
2607 def destroying(self):
2606 def destroying(self):
2608 '''Inform the repository that nodes are about to be destroyed.
2607 '''Inform the repository that nodes are about to be destroyed.
2609 Intended for use by strip and rollback, so there's a common
2608 Intended for use by strip and rollback, so there's a common
2610 place for anything that has to be done before destroying history.
2609 place for anything that has to be done before destroying history.
2611
2610
2612 This is mostly useful for saving state that is in memory and waiting
2611 This is mostly useful for saving state that is in memory and waiting
2613 to be flushed when the current lock is released. Because a call to
2612 to be flushed when the current lock is released. Because a call to
2614 destroyed is imminent, the repo will be invalidated causing those
2613 destroyed is imminent, the repo will be invalidated causing those
2615 changes to stay in memory (waiting for the next unlock), or vanish
2614 changes to stay in memory (waiting for the next unlock), or vanish
2616 completely.
2615 completely.
2617 '''
2616 '''
2618 # When using the same lock to commit and strip, the phasecache is left
2617 # When using the same lock to commit and strip, the phasecache is left
2619 # dirty after committing. Then when we strip, the repo is invalidated,
2618 # dirty after committing. Then when we strip, the repo is invalidated,
2620 # causing those changes to disappear.
2619 # causing those changes to disappear.
2621 if '_phasecache' in vars(self):
2620 if '_phasecache' in vars(self):
2622 self._phasecache.write()
2621 self._phasecache.write()
2623
2622
2624 @unfilteredmethod
2623 @unfilteredmethod
2625 def destroyed(self):
2624 def destroyed(self):
2626 '''Inform the repository that nodes have been destroyed.
2625 '''Inform the repository that nodes have been destroyed.
2627 Intended for use by strip and rollback, so there's a common
2626 Intended for use by strip and rollback, so there's a common
2628 place for anything that has to be done after destroying history.
2627 place for anything that has to be done after destroying history.
2629 '''
2628 '''
2630 # When one tries to:
2629 # When one tries to:
2631 # 1) destroy nodes thus calling this method (e.g. strip)
2630 # 1) destroy nodes thus calling this method (e.g. strip)
2632 # 2) use phasecache somewhere (e.g. commit)
2631 # 2) use phasecache somewhere (e.g. commit)
2633 #
2632 #
2634 # then 2) will fail because the phasecache contains nodes that were
2633 # then 2) will fail because the phasecache contains nodes that were
2635 # removed. We can either remove phasecache from the filecache,
2634 # removed. We can either remove phasecache from the filecache,
2636 # causing it to reload next time it is accessed, or simply filter
2635 # causing it to reload next time it is accessed, or simply filter
2637 # the removed nodes now and write the updated cache.
2636 # the removed nodes now and write the updated cache.
2638 self._phasecache.filterunknown(self)
2637 self._phasecache.filterunknown(self)
2639 self._phasecache.write()
2638 self._phasecache.write()
2640
2639
2641 # refresh all repository caches
2640 # refresh all repository caches
2642 self.updatecaches()
2641 self.updatecaches()
2643
2642
2644 # Ensure the persistent tag cache is updated. Doing it now
2643 # Ensure the persistent tag cache is updated. Doing it now
2645 # means that the tag cache only has to worry about destroyed
2644 # means that the tag cache only has to worry about destroyed
2646 # heads immediately after a strip/rollback. That in turn
2645 # heads immediately after a strip/rollback. That in turn
2647 # guarantees that "cachetip == currenttip" (comparing both rev
2646 # guarantees that "cachetip == currenttip" (comparing both rev
2648 # and node) always means no nodes have been added or destroyed.
2647 # and node) always means no nodes have been added or destroyed.
2649
2648
2650 # XXX this is suboptimal when qrefresh'ing: we strip the current
2649 # XXX this is suboptimal when qrefresh'ing: we strip the current
2651 # head, refresh the tag cache, then immediately add a new head.
2650 # head, refresh the tag cache, then immediately add a new head.
2652 # But I think doing it this way is necessary for the "instant
2651 # But I think doing it this way is necessary for the "instant
2653 # tag cache retrieval" case to work.
2652 # tag cache retrieval" case to work.
2654 self.invalidate()
2653 self.invalidate()
2655
2654
2656 def status(self, node1='.', node2=None, match=None,
2655 def status(self, node1='.', node2=None, match=None,
2657 ignored=False, clean=False, unknown=False,
2656 ignored=False, clean=False, unknown=False,
2658 listsubrepos=False):
2657 listsubrepos=False):
2659 '''a convenience method that calls node1.status(node2)'''
2658 '''a convenience method that calls node1.status(node2)'''
2660 return self[node1].status(node2, match, ignored, clean, unknown,
2659 return self[node1].status(node2, match, ignored, clean, unknown,
2661 listsubrepos)
2660 listsubrepos)
2662
2661
2663 def addpostdsstatus(self, ps):
2662 def addpostdsstatus(self, ps):
2664 """Add a callback to run within the wlock, at the point at which status
2663 """Add a callback to run within the wlock, at the point at which status
2665 fixups happen.
2664 fixups happen.
2666
2665
2667 On status completion, callback(wctx, status) will be called with the
2666 On status completion, callback(wctx, status) will be called with the
2668 wlock held, unless the dirstate has changed from underneath or the wlock
2667 wlock held, unless the dirstate has changed from underneath or the wlock
2669 couldn't be grabbed.
2668 couldn't be grabbed.
2670
2669
2671 Callbacks should not capture and use a cached copy of the dirstate --
2670 Callbacks should not capture and use a cached copy of the dirstate --
2672 it might change in the meanwhile. Instead, they should access the
2671 it might change in the meanwhile. Instead, they should access the
2673 dirstate via wctx.repo().dirstate.
2672 dirstate via wctx.repo().dirstate.
2674
2673
2675 This list is emptied out after each status run -- extensions should
2674 This list is emptied out after each status run -- extensions should
2676 make sure it adds to this list each time dirstate.status is called.
2675 make sure it adds to this list each time dirstate.status is called.
2677 Extensions should also make sure they don't call this for statuses
2676 Extensions should also make sure they don't call this for statuses
2678 that don't involve the dirstate.
2677 that don't involve the dirstate.
2679 """
2678 """
2680
2679
2681 # The list is located here for uniqueness reasons -- it is actually
2680 # The list is located here for uniqueness reasons -- it is actually
2682 # managed by the workingctx, but that isn't unique per-repo.
2681 # managed by the workingctx, but that isn't unique per-repo.
2683 self._postdsstatus.append(ps)
2682 self._postdsstatus.append(ps)
2684
2683
2685 def postdsstatus(self):
2684 def postdsstatus(self):
2686 """Used by workingctx to get the list of post-dirstate-status hooks."""
2685 """Used by workingctx to get the list of post-dirstate-status hooks."""
2687 return self._postdsstatus
2686 return self._postdsstatus
2688
2687
2689 def clearpostdsstatus(self):
2688 def clearpostdsstatus(self):
2690 """Used by workingctx to clear post-dirstate-status hooks."""
2689 """Used by workingctx to clear post-dirstate-status hooks."""
2691 del self._postdsstatus[:]
2690 del self._postdsstatus[:]
2692
2691
2693 def heads(self, start=None):
2692 def heads(self, start=None):
2694 if start is None:
2693 if start is None:
2695 cl = self.changelog
2694 cl = self.changelog
2696 headrevs = reversed(cl.headrevs())
2695 headrevs = reversed(cl.headrevs())
2697 return [cl.node(rev) for rev in headrevs]
2696 return [cl.node(rev) for rev in headrevs]
2698
2697
2699 heads = self.changelog.heads(start)
2698 heads = self.changelog.heads(start)
2700 # sort the output in rev descending order
2699 # sort the output in rev descending order
2701 return sorted(heads, key=self.changelog.rev, reverse=True)
2700 return sorted(heads, key=self.changelog.rev, reverse=True)
2702
2701
2703 def branchheads(self, branch=None, start=None, closed=False):
2702 def branchheads(self, branch=None, start=None, closed=False):
2704 '''return a (possibly filtered) list of heads for the given branch
2703 '''return a (possibly filtered) list of heads for the given branch
2705
2704
2706 Heads are returned in topological order, from newest to oldest.
2705 Heads are returned in topological order, from newest to oldest.
2707 If branch is None, use the dirstate branch.
2706 If branch is None, use the dirstate branch.
2708 If start is not None, return only heads reachable from start.
2707 If start is not None, return only heads reachable from start.
2709 If closed is True, return heads that are marked as closed as well.
2708 If closed is True, return heads that are marked as closed as well.
2710 '''
2709 '''
2711 if branch is None:
2710 if branch is None:
2712 branch = self[None].branch()
2711 branch = self[None].branch()
2713 branches = self.branchmap()
2712 branches = self.branchmap()
2714 if branch not in branches:
2713 if branch not in branches:
2715 return []
2714 return []
2716 # the cache returns heads ordered lowest to highest
2715 # the cache returns heads ordered lowest to highest
2717 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2716 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2718 if start is not None:
2717 if start is not None:
2719 # filter out the heads that cannot be reached from startrev
2718 # filter out the heads that cannot be reached from startrev
2720 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2719 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2721 bheads = [h for h in bheads if h in fbheads]
2720 bheads = [h for h in bheads if h in fbheads]
2722 return bheads
2721 return bheads
2723
2722
2724 def branches(self, nodes):
2723 def branches(self, nodes):
2725 if not nodes:
2724 if not nodes:
2726 nodes = [self.changelog.tip()]
2725 nodes = [self.changelog.tip()]
2727 b = []
2726 b = []
2728 for n in nodes:
2727 for n in nodes:
2729 t = n
2728 t = n
2730 while True:
2729 while True:
2731 p = self.changelog.parents(n)
2730 p = self.changelog.parents(n)
2732 if p[1] != nullid or p[0] == nullid:
2731 if p[1] != nullid or p[0] == nullid:
2733 b.append((t, n, p[0], p[1]))
2732 b.append((t, n, p[0], p[1]))
2734 break
2733 break
2735 n = p[0]
2734 n = p[0]
2736 return b
2735 return b
2737
2736
2738 def between(self, pairs):
2737 def between(self, pairs):
2739 r = []
2738 r = []
2740
2739
2741 for top, bottom in pairs:
2740 for top, bottom in pairs:
2742 n, l, i = top, [], 0
2741 n, l, i = top, [], 0
2743 f = 1
2742 f = 1
2744
2743
2745 while n != bottom and n != nullid:
2744 while n != bottom and n != nullid:
2746 p = self.changelog.parents(n)[0]
2745 p = self.changelog.parents(n)[0]
2747 if i == f:
2746 if i == f:
2748 l.append(n)
2747 l.append(n)
2749 f = f * 2
2748 f = f * 2
2750 n = p
2749 n = p
2751 i += 1
2750 i += 1
2752
2751
2753 r.append(l)
2752 r.append(l)
2754
2753
2755 return r
2754 return r
2756
2755
2757 def checkpush(self, pushop):
2756 def checkpush(self, pushop):
2758 """Extensions can override this function if additional checks have
2757 """Extensions can override this function if additional checks have
2759 to be performed before pushing, or call it if they override push
2758 to be performed before pushing, or call it if they override push
2760 command.
2759 command.
2761 """
2760 """
2762
2761
2763 @unfilteredpropertycache
2762 @unfilteredpropertycache
2764 def prepushoutgoinghooks(self):
2763 def prepushoutgoinghooks(self):
2765 """Return util.hooks consists of a pushop with repo, remote, outgoing
2764 """Return util.hooks consists of a pushop with repo, remote, outgoing
2766 methods, which are called before pushing changesets.
2765 methods, which are called before pushing changesets.
2767 """
2766 """
2768 return util.hooks()
2767 return util.hooks()
2769
2768
2770 def pushkey(self, namespace, key, old, new):
2769 def pushkey(self, namespace, key, old, new):
2771 try:
2770 try:
2772 tr = self.currenttransaction()
2771 tr = self.currenttransaction()
2773 hookargs = {}
2772 hookargs = {}
2774 if tr is not None:
2773 if tr is not None:
2775 hookargs.update(tr.hookargs)
2774 hookargs.update(tr.hookargs)
2776 hookargs = pycompat.strkwargs(hookargs)
2775 hookargs = pycompat.strkwargs(hookargs)
2777 hookargs[r'namespace'] = namespace
2776 hookargs[r'namespace'] = namespace
2778 hookargs[r'key'] = key
2777 hookargs[r'key'] = key
2779 hookargs[r'old'] = old
2778 hookargs[r'old'] = old
2780 hookargs[r'new'] = new
2779 hookargs[r'new'] = new
2781 self.hook('prepushkey', throw=True, **hookargs)
2780 self.hook('prepushkey', throw=True, **hookargs)
2782 except error.HookAbort as exc:
2781 except error.HookAbort as exc:
2783 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2782 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2784 if exc.hint:
2783 if exc.hint:
2785 self.ui.write_err(_("(%s)\n") % exc.hint)
2784 self.ui.write_err(_("(%s)\n") % exc.hint)
2786 return False
2785 return False
2787 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2786 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2788 ret = pushkey.push(self, namespace, key, old, new)
2787 ret = pushkey.push(self, namespace, key, old, new)
2789 def runhook():
2788 def runhook():
2790 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2789 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2791 ret=ret)
2790 ret=ret)
2792 self._afterlock(runhook)
2791 self._afterlock(runhook)
2793 return ret
2792 return ret
2794
2793
2795 def listkeys(self, namespace):
2794 def listkeys(self, namespace):
2796 self.hook('prelistkeys', throw=True, namespace=namespace)
2795 self.hook('prelistkeys', throw=True, namespace=namespace)
2797 self.ui.debug('listing keys for "%s"\n' % namespace)
2796 self.ui.debug('listing keys for "%s"\n' % namespace)
2798 values = pushkey.list(self, namespace)
2797 values = pushkey.list(self, namespace)
2799 self.hook('listkeys', namespace=namespace, values=values)
2798 self.hook('listkeys', namespace=namespace, values=values)
2800 return values
2799 return values
2801
2800
2802 def debugwireargs(self, one, two, three=None, four=None, five=None):
2801 def debugwireargs(self, one, two, three=None, four=None, five=None):
2803 '''used to test argument passing over the wire'''
2802 '''used to test argument passing over the wire'''
2804 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2803 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2805 pycompat.bytestr(four),
2804 pycompat.bytestr(four),
2806 pycompat.bytestr(five))
2805 pycompat.bytestr(five))
2807
2806
2808 def savecommitmessage(self, text):
2807 def savecommitmessage(self, text):
2809 fp = self.vfs('last-message.txt', 'wb')
2808 fp = self.vfs('last-message.txt', 'wb')
2810 try:
2809 try:
2811 fp.write(text)
2810 fp.write(text)
2812 finally:
2811 finally:
2813 fp.close()
2812 fp.close()
2814 return self.pathto(fp.name[len(self.root) + 1:])
2813 return self.pathto(fp.name[len(self.root) + 1:])
2815
2814
2816 # used to avoid circular references so destructors work
2815 # used to avoid circular references so destructors work
2817 def aftertrans(files):
2816 def aftertrans(files):
2818 renamefiles = [tuple(t) for t in files]
2817 renamefiles = [tuple(t) for t in files]
2819 def a():
2818 def a():
2820 for vfs, src, dest in renamefiles:
2819 for vfs, src, dest in renamefiles:
2821 # if src and dest refer to a same file, vfs.rename is a no-op,
2820 # if src and dest refer to a same file, vfs.rename is a no-op,
2822 # leaving both src and dest on disk. delete dest to make sure
2821 # leaving both src and dest on disk. delete dest to make sure
2823 # the rename couldn't be such a no-op.
2822 # the rename couldn't be such a no-op.
2824 vfs.tryunlink(dest)
2823 vfs.tryunlink(dest)
2825 try:
2824 try:
2826 vfs.rename(src, dest)
2825 vfs.rename(src, dest)
2827 except OSError: # journal file does not yet exist
2826 except OSError: # journal file does not yet exist
2828 pass
2827 pass
2829 return a
2828 return a
2830
2829
2831 def undoname(fn):
2830 def undoname(fn):
2832 base, name = os.path.split(fn)
2831 base, name = os.path.split(fn)
2833 assert name.startswith('journal')
2832 assert name.startswith('journal')
2834 return os.path.join(base, name.replace('journal', 'undo', 1))
2833 return os.path.join(base, name.replace('journal', 'undo', 1))
2835
2834
2836 def instance(ui, path, create, intents=None, createopts=None):
2835 def instance(ui, path, create, intents=None, createopts=None):
2837 localpath = util.urllocalpath(path)
2836 localpath = util.urllocalpath(path)
2838 if create:
2837 if create:
2839 createrepository(ui, localpath, createopts=createopts)
2838 createrepository(ui, localpath, createopts=createopts)
2840
2839
2841 return makelocalrepository(ui, localpath, intents=intents)
2840 return makelocalrepository(ui, localpath, intents=intents)
2842
2841
2843 def islocal(path):
2842 def islocal(path):
2844 return True
2843 return True
2845
2844
2846 def defaultcreateopts(ui, createopts=None):
2845 def defaultcreateopts(ui, createopts=None):
2847 """Populate the default creation options for a repository.
2846 """Populate the default creation options for a repository.
2848
2847
2849 A dictionary of explicitly requested creation options can be passed
2848 A dictionary of explicitly requested creation options can be passed
2850 in. Missing keys will be populated.
2849 in. Missing keys will be populated.
2851 """
2850 """
2852 createopts = dict(createopts or {})
2851 createopts = dict(createopts or {})
2853
2852
2854 if 'backend' not in createopts:
2853 if 'backend' not in createopts:
2855 # experimental config: storage.new-repo-backend
2854 # experimental config: storage.new-repo-backend
2856 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2855 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2857
2856
2858 return createopts
2857 return createopts
2859
2858
2860 def newreporequirements(ui, createopts):
2859 def newreporequirements(ui, createopts):
2861 """Determine the set of requirements for a new local repository.
2860 """Determine the set of requirements for a new local repository.
2862
2861
2863 Extensions can wrap this function to specify custom requirements for
2862 Extensions can wrap this function to specify custom requirements for
2864 new repositories.
2863 new repositories.
2865 """
2864 """
2866 # If the repo is being created from a shared repository, we copy
2865 # If the repo is being created from a shared repository, we copy
2867 # its requirements.
2866 # its requirements.
2868 if 'sharedrepo' in createopts:
2867 if 'sharedrepo' in createopts:
2869 requirements = set(createopts['sharedrepo'].requirements)
2868 requirements = set(createopts['sharedrepo'].requirements)
2870 if createopts.get('sharedrelative'):
2869 if createopts.get('sharedrelative'):
2871 requirements.add('relshared')
2870 requirements.add('relshared')
2872 else:
2871 else:
2873 requirements.add('shared')
2872 requirements.add('shared')
2874
2873
2875 return requirements
2874 return requirements
2876
2875
2877 if 'backend' not in createopts:
2876 if 'backend' not in createopts:
2878 raise error.ProgrammingError('backend key not present in createopts; '
2877 raise error.ProgrammingError('backend key not present in createopts; '
2879 'was defaultcreateopts() called?')
2878 'was defaultcreateopts() called?')
2880
2879
2881 if createopts['backend'] != 'revlogv1':
2880 if createopts['backend'] != 'revlogv1':
2882 raise error.Abort(_('unable to determine repository requirements for '
2881 raise error.Abort(_('unable to determine repository requirements for '
2883 'storage backend: %s') % createopts['backend'])
2882 'storage backend: %s') % createopts['backend'])
2884
2883
2885 requirements = {'revlogv1'}
2884 requirements = {'revlogv1'}
2886 if ui.configbool('format', 'usestore'):
2885 if ui.configbool('format', 'usestore'):
2887 requirements.add('store')
2886 requirements.add('store')
2888 if ui.configbool('format', 'usefncache'):
2887 if ui.configbool('format', 'usefncache'):
2889 requirements.add('fncache')
2888 requirements.add('fncache')
2890 if ui.configbool('format', 'dotencode'):
2889 if ui.configbool('format', 'dotencode'):
2891 requirements.add('dotencode')
2890 requirements.add('dotencode')
2892
2891
2893 compengine = ui.config('experimental', 'format.compression')
2892 compengine = ui.config('experimental', 'format.compression')
2894 if compengine not in util.compengines:
2893 if compengine not in util.compengines:
2895 raise error.Abort(_('compression engine %s defined by '
2894 raise error.Abort(_('compression engine %s defined by '
2896 'experimental.format.compression not available') %
2895 'experimental.format.compression not available') %
2897 compengine,
2896 compengine,
2898 hint=_('run "hg debuginstall" to list available '
2897 hint=_('run "hg debuginstall" to list available '
2899 'compression engines'))
2898 'compression engines'))
2900
2899
2901 # zlib is the historical default and doesn't need an explicit requirement.
2900 # zlib is the historical default and doesn't need an explicit requirement.
2902 if compengine != 'zlib':
2901 if compengine != 'zlib':
2903 requirements.add('exp-compression-%s' % compengine)
2902 requirements.add('exp-compression-%s' % compengine)
2904
2903
2905 if scmutil.gdinitconfig(ui):
2904 if scmutil.gdinitconfig(ui):
2906 requirements.add('generaldelta')
2905 requirements.add('generaldelta')
2907 if ui.configbool('format', 'sparse-revlog'):
2906 if ui.configbool('format', 'sparse-revlog'):
2908 requirements.add(SPARSEREVLOG_REQUIREMENT)
2907 requirements.add(SPARSEREVLOG_REQUIREMENT)
2909 if ui.configbool('experimental', 'treemanifest'):
2908 if ui.configbool('experimental', 'treemanifest'):
2910 requirements.add('treemanifest')
2909 requirements.add('treemanifest')
2911
2910
2912 revlogv2 = ui.config('experimental', 'revlogv2')
2911 revlogv2 = ui.config('experimental', 'revlogv2')
2913 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2912 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2914 requirements.remove('revlogv1')
2913 requirements.remove('revlogv1')
2915 # generaldelta is implied by revlogv2.
2914 # generaldelta is implied by revlogv2.
2916 requirements.discard('generaldelta')
2915 requirements.discard('generaldelta')
2917 requirements.add(REVLOGV2_REQUIREMENT)
2916 requirements.add(REVLOGV2_REQUIREMENT)
2918 # experimental config: format.internal-phase
2917 # experimental config: format.internal-phase
2919 if ui.configbool('format', 'internal-phase'):
2918 if ui.configbool('format', 'internal-phase'):
2920 requirements.add('internal-phase')
2919 requirements.add('internal-phase')
2921
2920
2922 if createopts.get('narrowfiles'):
2921 if createopts.get('narrowfiles'):
2923 requirements.add(repository.NARROW_REQUIREMENT)
2922 requirements.add(repository.NARROW_REQUIREMENT)
2924
2923
2925 if createopts.get('lfs'):
2924 if createopts.get('lfs'):
2926 requirements.add('lfs')
2925 requirements.add('lfs')
2927
2926
2928 return requirements
2927 return requirements
2929
2928
2930 def filterknowncreateopts(ui, createopts):
2929 def filterknowncreateopts(ui, createopts):
2931 """Filters a dict of repo creation options against options that are known.
2930 """Filters a dict of repo creation options against options that are known.
2932
2931
2933 Receives a dict of repo creation options and returns a dict of those
2932 Receives a dict of repo creation options and returns a dict of those
2934 options that we don't know how to handle.
2933 options that we don't know how to handle.
2935
2934
2936 This function is called as part of repository creation. If the
2935 This function is called as part of repository creation. If the
2937 returned dict contains any items, repository creation will not
2936 returned dict contains any items, repository creation will not
2938 be allowed, as it means there was a request to create a repository
2937 be allowed, as it means there was a request to create a repository
2939 with options not recognized by loaded code.
2938 with options not recognized by loaded code.
2940
2939
2941 Extensions can wrap this function to filter out creation options
2940 Extensions can wrap this function to filter out creation options
2942 they know how to handle.
2941 they know how to handle.
2943 """
2942 """
2944 known = {
2943 known = {
2945 'backend',
2944 'backend',
2946 'lfs',
2945 'lfs',
2947 'narrowfiles',
2946 'narrowfiles',
2948 'sharedrepo',
2947 'sharedrepo',
2949 'sharedrelative',
2948 'sharedrelative',
2950 'shareditems',
2949 'shareditems',
2951 'shallowfilestore',
2950 'shallowfilestore',
2952 }
2951 }
2953
2952
2954 return {k: v for k, v in createopts.items() if k not in known}
2953 return {k: v for k, v in createopts.items() if k not in known}
2955
2954
2956 def createrepository(ui, path, createopts=None):
2955 def createrepository(ui, path, createopts=None):
2957 """Create a new repository in a vfs.
2956 """Create a new repository in a vfs.
2958
2957
2959 ``path`` path to the new repo's working directory.
2958 ``path`` path to the new repo's working directory.
2960 ``createopts`` options for the new repository.
2959 ``createopts`` options for the new repository.
2961
2960
2962 The following keys for ``createopts`` are recognized:
2961 The following keys for ``createopts`` are recognized:
2963
2962
2964 backend
2963 backend
2965 The storage backend to use.
2964 The storage backend to use.
2966 lfs
2965 lfs
2967 Repository will be created with ``lfs`` requirement. The lfs extension
2966 Repository will be created with ``lfs`` requirement. The lfs extension
2968 will automatically be loaded when the repository is accessed.
2967 will automatically be loaded when the repository is accessed.
2969 narrowfiles
2968 narrowfiles
2970 Set up repository to support narrow file storage.
2969 Set up repository to support narrow file storage.
2971 sharedrepo
2970 sharedrepo
2972 Repository object from which storage should be shared.
2971 Repository object from which storage should be shared.
2973 sharedrelative
2972 sharedrelative
2974 Boolean indicating if the path to the shared repo should be
2973 Boolean indicating if the path to the shared repo should be
2975 stored as relative. By default, the pointer to the "parent" repo
2974 stored as relative. By default, the pointer to the "parent" repo
2976 is stored as an absolute path.
2975 is stored as an absolute path.
2977 shareditems
2976 shareditems
2978 Set of items to share to the new repository (in addition to storage).
2977 Set of items to share to the new repository (in addition to storage).
2979 shallowfilestore
2978 shallowfilestore
2980 Indicates that storage for files should be shallow (not all ancestor
2979 Indicates that storage for files should be shallow (not all ancestor
2981 revisions are known).
2980 revisions are known).
2982 """
2981 """
2983 createopts = defaultcreateopts(ui, createopts=createopts)
2982 createopts = defaultcreateopts(ui, createopts=createopts)
2984
2983
2985 unknownopts = filterknowncreateopts(ui, createopts)
2984 unknownopts = filterknowncreateopts(ui, createopts)
2986
2985
2987 if not isinstance(unknownopts, dict):
2986 if not isinstance(unknownopts, dict):
2988 raise error.ProgrammingError('filterknowncreateopts() did not return '
2987 raise error.ProgrammingError('filterknowncreateopts() did not return '
2989 'a dict')
2988 'a dict')
2990
2989
2991 if unknownopts:
2990 if unknownopts:
2992 raise error.Abort(_('unable to create repository because of unknown '
2991 raise error.Abort(_('unable to create repository because of unknown '
2993 'creation option: %s') %
2992 'creation option: %s') %
2994 ', '.join(sorted(unknownopts)),
2993 ', '.join(sorted(unknownopts)),
2995 hint=_('is a required extension not loaded?'))
2994 hint=_('is a required extension not loaded?'))
2996
2995
2997 requirements = newreporequirements(ui, createopts=createopts)
2996 requirements = newreporequirements(ui, createopts=createopts)
2998
2997
2999 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2998 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3000
2999
3001 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3000 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3002 if hgvfs.exists():
3001 if hgvfs.exists():
3003 raise error.RepoError(_('repository %s already exists') % path)
3002 raise error.RepoError(_('repository %s already exists') % path)
3004
3003
3005 if 'sharedrepo' in createopts:
3004 if 'sharedrepo' in createopts:
3006 sharedpath = createopts['sharedrepo'].sharedpath
3005 sharedpath = createopts['sharedrepo'].sharedpath
3007
3006
3008 if createopts.get('sharedrelative'):
3007 if createopts.get('sharedrelative'):
3009 try:
3008 try:
3010 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3009 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3011 except (IOError, ValueError) as e:
3010 except (IOError, ValueError) as e:
3012 # ValueError is raised on Windows if the drive letters differ
3011 # ValueError is raised on Windows if the drive letters differ
3013 # on each path.
3012 # on each path.
3014 raise error.Abort(_('cannot calculate relative path'),
3013 raise error.Abort(_('cannot calculate relative path'),
3015 hint=stringutil.forcebytestr(e))
3014 hint=stringutil.forcebytestr(e))
3016
3015
3017 if not wdirvfs.exists():
3016 if not wdirvfs.exists():
3018 wdirvfs.makedirs()
3017 wdirvfs.makedirs()
3019
3018
3020 hgvfs.makedir(notindexed=True)
3019 hgvfs.makedir(notindexed=True)
3021 if 'sharedrepo' not in createopts:
3020 if 'sharedrepo' not in createopts:
3022 hgvfs.mkdir(b'cache')
3021 hgvfs.mkdir(b'cache')
3023 hgvfs.mkdir(b'wcache')
3022 hgvfs.mkdir(b'wcache')
3024
3023
3025 if b'store' in requirements and 'sharedrepo' not in createopts:
3024 if b'store' in requirements and 'sharedrepo' not in createopts:
3026 hgvfs.mkdir(b'store')
3025 hgvfs.mkdir(b'store')
3027
3026
3028 # We create an invalid changelog outside the store so very old
3027 # We create an invalid changelog outside the store so very old
3029 # Mercurial versions (which didn't know about the requirements
3028 # Mercurial versions (which didn't know about the requirements
3030 # file) encounter an error on reading the changelog. This
3029 # file) encounter an error on reading the changelog. This
3031 # effectively locks out old clients and prevents them from
3030 # effectively locks out old clients and prevents them from
3032 # mucking with a repo in an unknown format.
3031 # mucking with a repo in an unknown format.
3033 #
3032 #
3034 # The revlog header has version 2, which won't be recognized by
3033 # The revlog header has version 2, which won't be recognized by
3035 # such old clients.
3034 # such old clients.
3036 hgvfs.append(b'00changelog.i',
3035 hgvfs.append(b'00changelog.i',
3037 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3036 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3038 b'layout')
3037 b'layout')
3039
3038
3040 scmutil.writerequires(hgvfs, requirements)
3039 scmutil.writerequires(hgvfs, requirements)
3041
3040
3042 # Write out file telling readers where to find the shared store.
3041 # Write out file telling readers where to find the shared store.
3043 if 'sharedrepo' in createopts:
3042 if 'sharedrepo' in createopts:
3044 hgvfs.write(b'sharedpath', sharedpath)
3043 hgvfs.write(b'sharedpath', sharedpath)
3045
3044
3046 if createopts.get('shareditems'):
3045 if createopts.get('shareditems'):
3047 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3046 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3048 hgvfs.write(b'shared', shared)
3047 hgvfs.write(b'shared', shared)
3049
3048
3050 def poisonrepository(repo):
3049 def poisonrepository(repo):
3051 """Poison a repository instance so it can no longer be used."""
3050 """Poison a repository instance so it can no longer be used."""
3052 # Perform any cleanup on the instance.
3051 # Perform any cleanup on the instance.
3053 repo.close()
3052 repo.close()
3054
3053
3055 # Our strategy is to replace the type of the object with one that
3054 # Our strategy is to replace the type of the object with one that
3056 # has all attribute lookups result in error.
3055 # has all attribute lookups result in error.
3057 #
3056 #
3058 # But we have to allow the close() method because some constructors
3057 # But we have to allow the close() method because some constructors
3059 # of repos call close() on repo references.
3058 # of repos call close() on repo references.
3060 class poisonedrepository(object):
3059 class poisonedrepository(object):
3061 def __getattribute__(self, item):
3060 def __getattribute__(self, item):
3062 if item == r'close':
3061 if item == r'close':
3063 return object.__getattribute__(self, item)
3062 return object.__getattribute__(self, item)
3064
3063
3065 raise error.ProgrammingError('repo instances should not be used '
3064 raise error.ProgrammingError('repo instances should not be used '
3066 'after unshare')
3065 'after unshare')
3067
3066
3068 def close(self):
3067 def close(self):
3069 pass
3068 pass
3070
3069
3071 # We may have a repoview, which intercepts __setattr__. So be sure
3070 # We may have a repoview, which intercepts __setattr__. So be sure
3072 # we operate at the lowest level possible.
3071 # we operate at the lowest level possible.
3073 object.__setattr__(repo, r'__class__', poisonedrepository)
3072 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,228 +1,229 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 branchmap,
16 changelog,
17 changelog,
17 error,
18 error,
18 localrepo,
19 localrepo,
19 manifest,
20 manifest,
20 namespaces,
21 namespaces,
21 pathutil,
22 pathutil,
22 pycompat,
23 pycompat,
23 url,
24 url,
24 util,
25 util,
25 vfs as vfsmod,
26 vfs as vfsmod,
26 )
27 )
27
28
28 urlerr = util.urlerr
29 urlerr = util.urlerr
29 urlreq = util.urlreq
30 urlreq = util.urlreq
30
31
31 class httprangereader(object):
32 class httprangereader(object):
32 def __init__(self, url, opener):
33 def __init__(self, url, opener):
33 # we assume opener has HTTPRangeHandler
34 # we assume opener has HTTPRangeHandler
34 self.url = url
35 self.url = url
35 self.pos = 0
36 self.pos = 0
36 self.opener = opener
37 self.opener = opener
37 self.name = url
38 self.name = url
38
39
39 def __enter__(self):
40 def __enter__(self):
40 return self
41 return self
41
42
42 def __exit__(self, exc_type, exc_value, traceback):
43 def __exit__(self, exc_type, exc_value, traceback):
43 self.close()
44 self.close()
44
45
45 def seek(self, pos):
46 def seek(self, pos):
46 self.pos = pos
47 self.pos = pos
47 def read(self, bytes=None):
48 def read(self, bytes=None):
48 req = urlreq.request(pycompat.strurl(self.url))
49 req = urlreq.request(pycompat.strurl(self.url))
49 end = ''
50 end = ''
50 if bytes:
51 if bytes:
51 end = self.pos + bytes - 1
52 end = self.pos + bytes - 1
52 if self.pos or end:
53 if self.pos or end:
53 req.add_header(r'Range', r'bytes=%d-%s' % (self.pos, end))
54 req.add_header(r'Range', r'bytes=%d-%s' % (self.pos, end))
54
55
55 try:
56 try:
56 f = self.opener.open(req)
57 f = self.opener.open(req)
57 data = f.read()
58 data = f.read()
58 code = f.code
59 code = f.code
59 except urlerr.httperror as inst:
60 except urlerr.httperror as inst:
60 num = inst.code == 404 and errno.ENOENT or None
61 num = inst.code == 404 and errno.ENOENT or None
61 raise IOError(num, inst)
62 raise IOError(num, inst)
62 except urlerr.urlerror as inst:
63 except urlerr.urlerror as inst:
63 raise IOError(None, inst.reason)
64 raise IOError(None, inst.reason)
64
65
65 if code == 200:
66 if code == 200:
66 # HTTPRangeHandler does nothing if remote does not support
67 # HTTPRangeHandler does nothing if remote does not support
67 # Range headers and returns the full entity. Let's slice it.
68 # Range headers and returns the full entity. Let's slice it.
68 if bytes:
69 if bytes:
69 data = data[self.pos:self.pos + bytes]
70 data = data[self.pos:self.pos + bytes]
70 else:
71 else:
71 data = data[self.pos:]
72 data = data[self.pos:]
72 elif bytes:
73 elif bytes:
73 data = data[:bytes]
74 data = data[:bytes]
74 self.pos += len(data)
75 self.pos += len(data)
75 return data
76 return data
76 def readlines(self):
77 def readlines(self):
77 return self.read().splitlines(True)
78 return self.read().splitlines(True)
78 def __iter__(self):
79 def __iter__(self):
79 return iter(self.readlines())
80 return iter(self.readlines())
80 def close(self):
81 def close(self):
81 pass
82 pass
82
83
83 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
84 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
84 # which was itself extracted from urlgrabber. See the last version of
85 # which was itself extracted from urlgrabber. See the last version of
85 # byterange.py from history if you need more information.
86 # byterange.py from history if you need more information.
86 class _RangeError(IOError):
87 class _RangeError(IOError):
87 """Error raised when an unsatisfiable range is requested."""
88 """Error raised when an unsatisfiable range is requested."""
88
89
89 class _HTTPRangeHandler(urlreq.basehandler):
90 class _HTTPRangeHandler(urlreq.basehandler):
90 """Handler that enables HTTP Range headers.
91 """Handler that enables HTTP Range headers.
91
92
92 This was extremely simple. The Range header is a HTTP feature to
93 This was extremely simple. The Range header is a HTTP feature to
93 begin with so all this class does is tell urllib2 that the
94 begin with so all this class does is tell urllib2 that the
94 "206 Partial Content" response from the HTTP server is what we
95 "206 Partial Content" response from the HTTP server is what we
95 expected.
96 expected.
96 """
97 """
97
98
98 def http_error_206(self, req, fp, code, msg, hdrs):
99 def http_error_206(self, req, fp, code, msg, hdrs):
99 # 206 Partial Content Response
100 # 206 Partial Content Response
100 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
101 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
101 r.code = code
102 r.code = code
102 r.msg = msg
103 r.msg = msg
103 return r
104 return r
104
105
105 def http_error_416(self, req, fp, code, msg, hdrs):
106 def http_error_416(self, req, fp, code, msg, hdrs):
106 # HTTP's Range Not Satisfiable error
107 # HTTP's Range Not Satisfiable error
107 raise _RangeError('Requested Range Not Satisfiable')
108 raise _RangeError('Requested Range Not Satisfiable')
108
109
109 def build_opener(ui, authinfo):
110 def build_opener(ui, authinfo):
110 # urllib cannot handle URLs with embedded user or passwd
111 # urllib cannot handle URLs with embedded user or passwd
111 urlopener = url.opener(ui, authinfo)
112 urlopener = url.opener(ui, authinfo)
112 urlopener.add_handler(_HTTPRangeHandler())
113 urlopener.add_handler(_HTTPRangeHandler())
113
114
114 class statichttpvfs(vfsmod.abstractvfs):
115 class statichttpvfs(vfsmod.abstractvfs):
115 def __init__(self, base):
116 def __init__(self, base):
116 self.base = base
117 self.base = base
117
118
118 def __call__(self, path, mode='r', *args, **kw):
119 def __call__(self, path, mode='r', *args, **kw):
119 if mode not in ('r', 'rb'):
120 if mode not in ('r', 'rb'):
120 raise IOError('Permission denied')
121 raise IOError('Permission denied')
121 f = "/".join((self.base, urlreq.quote(path)))
122 f = "/".join((self.base, urlreq.quote(path)))
122 return httprangereader(f, urlopener)
123 return httprangereader(f, urlopener)
123
124
124 def join(self, path):
125 def join(self, path):
125 if path:
126 if path:
126 return pathutil.join(self.base, path)
127 return pathutil.join(self.base, path)
127 else:
128 else:
128 return self.base
129 return self.base
129
130
130 return statichttpvfs
131 return statichttpvfs
131
132
132 class statichttppeer(localrepo.localpeer):
133 class statichttppeer(localrepo.localpeer):
133 def local(self):
134 def local(self):
134 return None
135 return None
135 def canpush(self):
136 def canpush(self):
136 return False
137 return False
137
138
138 class statichttprepository(localrepo.localrepository,
139 class statichttprepository(localrepo.localrepository,
139 localrepo.revlogfilestorage):
140 localrepo.revlogfilestorage):
140 supported = localrepo.localrepository._basesupported
141 supported = localrepo.localrepository._basesupported
141
142
142 def __init__(self, ui, path):
143 def __init__(self, ui, path):
143 self._url = path
144 self._url = path
144 self.ui = ui
145 self.ui = ui
145
146
146 self.root = path
147 self.root = path
147 u = util.url(path.rstrip('/') + "/.hg")
148 u = util.url(path.rstrip('/') + "/.hg")
148 self.path, authinfo = u.authinfo()
149 self.path, authinfo = u.authinfo()
149
150
150 vfsclass = build_opener(ui, authinfo)
151 vfsclass = build_opener(ui, authinfo)
151 self.vfs = vfsclass(self.path)
152 self.vfs = vfsclass(self.path)
152 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 self._phasedefaults = []
154 self._phasedefaults = []
154
155
155 self.names = namespaces.namespaces()
156 self.names = namespaces.namespaces()
156 self.filtername = None
157 self.filtername = None
157
158
158 try:
159 try:
159 requirements = set(self.vfs.read(b'requires').splitlines())
160 requirements = set(self.vfs.read(b'requires').splitlines())
160 except IOError as inst:
161 except IOError as inst:
161 if inst.errno != errno.ENOENT:
162 if inst.errno != errno.ENOENT:
162 raise
163 raise
163 requirements = set()
164 requirements = set()
164
165
165 # check if it is a non-empty old-style repository
166 # check if it is a non-empty old-style repository
166 try:
167 try:
167 fp = self.vfs("00changelog.i")
168 fp = self.vfs("00changelog.i")
168 fp.read(1)
169 fp.read(1)
169 fp.close()
170 fp.close()
170 except IOError as inst:
171 except IOError as inst:
171 if inst.errno != errno.ENOENT:
172 if inst.errno != errno.ENOENT:
172 raise
173 raise
173 # we do not care about empty old-style repositories here
174 # we do not care about empty old-style repositories here
174 msg = _("'%s' does not appear to be an hg repository") % path
175 msg = _("'%s' does not appear to be an hg repository") % path
175 raise error.RepoError(msg)
176 raise error.RepoError(msg)
176
177
177 supportedrequirements = localrepo.gathersupportedrequirements(ui)
178 supportedrequirements = localrepo.gathersupportedrequirements(ui)
178 localrepo.ensurerequirementsrecognized(requirements,
179 localrepo.ensurerequirementsrecognized(requirements,
179 supportedrequirements)
180 supportedrequirements)
180 localrepo.ensurerequirementscompatible(ui, requirements)
181 localrepo.ensurerequirementscompatible(ui, requirements)
181
182
182 # setup store
183 # setup store
183 self.store = localrepo.makestore(requirements, self.path, vfsclass)
184 self.store = localrepo.makestore(requirements, self.path, vfsclass)
184 self.spath = self.store.path
185 self.spath = self.store.path
185 self.svfs = self.store.opener
186 self.svfs = self.store.opener
186 self.sjoin = self.store.join
187 self.sjoin = self.store.join
187 self._filecache = {}
188 self._filecache = {}
188 self.requirements = requirements
189 self.requirements = requirements
189
190
190 rootmanifest = manifest.manifestrevlog(self.svfs)
191 rootmanifest = manifest.manifestrevlog(self.svfs)
191 self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest,
192 self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest,
192 self.narrowmatch())
193 self.narrowmatch())
193 self.changelog = changelog.changelog(self.svfs)
194 self.changelog = changelog.changelog(self.svfs)
194 self._tags = None
195 self._tags = None
195 self.nodetagscache = None
196 self.nodetagscache = None
196 self._branchcaches = {}
197 self._branchcaches = branchmap.BranchMapCache()
197 self._revbranchcache = None
198 self._revbranchcache = None
198 self.encodepats = None
199 self.encodepats = None
199 self.decodepats = None
200 self.decodepats = None
200 self._transref = None
201 self._transref = None
201
202
202 def _restrictcapabilities(self, caps):
203 def _restrictcapabilities(self, caps):
203 caps = super(statichttprepository, self)._restrictcapabilities(caps)
204 caps = super(statichttprepository, self)._restrictcapabilities(caps)
204 return caps.difference(["pushkey"])
205 return caps.difference(["pushkey"])
205
206
206 def url(self):
207 def url(self):
207 return self._url
208 return self._url
208
209
209 def local(self):
210 def local(self):
210 return False
211 return False
211
212
212 def peer(self):
213 def peer(self):
213 return statichttppeer(self)
214 return statichttppeer(self)
214
215
215 def wlock(self, wait=True):
216 def wlock(self, wait=True):
216 raise error.LockUnavailable(0, _('lock not available'), 'lock',
217 raise error.LockUnavailable(0, _('lock not available'), 'lock',
217 _('cannot lock static-http repository'))
218 _('cannot lock static-http repository'))
218
219
219 def lock(self, wait=True):
220 def lock(self, wait=True):
220 raise error.Abort(_('cannot lock static-http repository'))
221 raise error.Abort(_('cannot lock static-http repository'))
221
222
222 def _writecaches(self):
223 def _writecaches(self):
223 pass # statichttprepository are read only
224 pass # statichttprepository are read only
224
225
225 def instance(ui, path, create, intents=None, createopts=None):
226 def instance(ui, path, create, intents=None, createopts=None):
226 if create:
227 if create:
227 raise error.Abort(_('cannot create new static-http repository'))
228 raise error.Abort(_('cannot create new static-http repository'))
228 return statichttprepository(ui, path[7:])
229 return statichttprepository(ui, path[7:])
@@ -1,659 +1,658 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 branchmap,
17 cacheutil,
16 cacheutil,
18 error,
17 error,
19 narrowspec,
18 narrowspec,
20 phases,
19 phases,
21 pycompat,
20 pycompat,
22 repository,
21 repository,
23 store,
22 store,
24 util,
23 util,
25 )
24 )
26
25
27 def canperformstreamclone(pullop, bundle2=False):
26 def canperformstreamclone(pullop, bundle2=False):
28 """Whether it is possible to perform a streaming clone as part of pull.
27 """Whether it is possible to perform a streaming clone as part of pull.
29
28
30 ``bundle2`` will cause the function to consider stream clone through
29 ``bundle2`` will cause the function to consider stream clone through
31 bundle2 and only through bundle2.
30 bundle2 and only through bundle2.
32
31
33 Returns a tuple of (supported, requirements). ``supported`` is True if
32 Returns a tuple of (supported, requirements). ``supported`` is True if
34 streaming clone is supported and False otherwise. ``requirements`` is
33 streaming clone is supported and False otherwise. ``requirements`` is
35 a set of repo requirements from the remote, or ``None`` if stream clone
34 a set of repo requirements from the remote, or ``None`` if stream clone
36 isn't supported.
35 isn't supported.
37 """
36 """
38 repo = pullop.repo
37 repo = pullop.repo
39 remote = pullop.remote
38 remote = pullop.remote
40
39
41 bundle2supported = False
40 bundle2supported = False
42 if pullop.canusebundle2:
41 if pullop.canusebundle2:
43 if 'v2' in pullop.remotebundle2caps.get('stream', []):
42 if 'v2' in pullop.remotebundle2caps.get('stream', []):
44 bundle2supported = True
43 bundle2supported = True
45 # else
44 # else
46 # Server doesn't support bundle2 stream clone or doesn't support
45 # Server doesn't support bundle2 stream clone or doesn't support
47 # the versions we support. Fall back and possibly allow legacy.
46 # the versions we support. Fall back and possibly allow legacy.
48
47
49 # Ensures legacy code path uses available bundle2.
48 # Ensures legacy code path uses available bundle2.
50 if bundle2supported and not bundle2:
49 if bundle2supported and not bundle2:
51 return False, None
50 return False, None
52 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
51 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
53 elif bundle2 and not bundle2supported:
52 elif bundle2 and not bundle2supported:
54 return False, None
53 return False, None
55
54
56 # Streaming clone only works on empty repositories.
55 # Streaming clone only works on empty repositories.
57 if len(repo):
56 if len(repo):
58 return False, None
57 return False, None
59
58
60 # Streaming clone only works if all data is being requested.
59 # Streaming clone only works if all data is being requested.
61 if pullop.heads:
60 if pullop.heads:
62 return False, None
61 return False, None
63
62
64 streamrequested = pullop.streamclonerequested
63 streamrequested = pullop.streamclonerequested
65
64
66 # If we don't have a preference, let the server decide for us. This
65 # If we don't have a preference, let the server decide for us. This
67 # likely only comes into play in LANs.
66 # likely only comes into play in LANs.
68 if streamrequested is None:
67 if streamrequested is None:
69 # The server can advertise whether to prefer streaming clone.
68 # The server can advertise whether to prefer streaming clone.
70 streamrequested = remote.capable('stream-preferred')
69 streamrequested = remote.capable('stream-preferred')
71
70
72 if not streamrequested:
71 if not streamrequested:
73 return False, None
72 return False, None
74
73
75 # In order for stream clone to work, the client has to support all the
74 # In order for stream clone to work, the client has to support all the
76 # requirements advertised by the server.
75 # requirements advertised by the server.
77 #
76 #
78 # The server advertises its requirements via the "stream" and "streamreqs"
77 # The server advertises its requirements via the "stream" and "streamreqs"
79 # capability. "stream" (a value-less capability) is advertised if and only
78 # capability. "stream" (a value-less capability) is advertised if and only
80 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
79 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
81 # is advertised and contains a comma-delimited list of requirements.
80 # is advertised and contains a comma-delimited list of requirements.
82 requirements = set()
81 requirements = set()
83 if remote.capable('stream'):
82 if remote.capable('stream'):
84 requirements.add('revlogv1')
83 requirements.add('revlogv1')
85 else:
84 else:
86 streamreqs = remote.capable('streamreqs')
85 streamreqs = remote.capable('streamreqs')
87 # This is weird and shouldn't happen with modern servers.
86 # This is weird and shouldn't happen with modern servers.
88 if not streamreqs:
87 if not streamreqs:
89 pullop.repo.ui.warn(_(
88 pullop.repo.ui.warn(_(
90 'warning: stream clone requested but server has them '
89 'warning: stream clone requested but server has them '
91 'disabled\n'))
90 'disabled\n'))
92 return False, None
91 return False, None
93
92
94 streamreqs = set(streamreqs.split(','))
93 streamreqs = set(streamreqs.split(','))
95 # Server requires something we don't support. Bail.
94 # Server requires something we don't support. Bail.
96 missingreqs = streamreqs - repo.supportedformats
95 missingreqs = streamreqs - repo.supportedformats
97 if missingreqs:
96 if missingreqs:
98 pullop.repo.ui.warn(_(
97 pullop.repo.ui.warn(_(
99 'warning: stream clone requested but client is missing '
98 'warning: stream clone requested but client is missing '
100 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
99 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
101 pullop.repo.ui.warn(
100 pullop.repo.ui.warn(
102 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
101 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
103 'for more information)\n'))
102 'for more information)\n'))
104 return False, None
103 return False, None
105 requirements = streamreqs
104 requirements = streamreqs
106
105
107 return True, requirements
106 return True, requirements
108
107
109 def maybeperformlegacystreamclone(pullop):
108 def maybeperformlegacystreamclone(pullop):
110 """Possibly perform a legacy stream clone operation.
109 """Possibly perform a legacy stream clone operation.
111
110
112 Legacy stream clones are performed as part of pull but before all other
111 Legacy stream clones are performed as part of pull but before all other
113 operations.
112 operations.
114
113
115 A legacy stream clone will not be performed if a bundle2 stream clone is
114 A legacy stream clone will not be performed if a bundle2 stream clone is
116 supported.
115 supported.
117 """
116 """
118 from . import localrepo
117 from . import localrepo
119
118
120 supported, requirements = canperformstreamclone(pullop)
119 supported, requirements = canperformstreamclone(pullop)
121
120
122 if not supported:
121 if not supported:
123 return
122 return
124
123
125 repo = pullop.repo
124 repo = pullop.repo
126 remote = pullop.remote
125 remote = pullop.remote
127
126
128 # Save remote branchmap. We will use it later to speed up branchcache
127 # Save remote branchmap. We will use it later to speed up branchcache
129 # creation.
128 # creation.
130 rbranchmap = None
129 rbranchmap = None
131 if remote.capable('branchmap'):
130 if remote.capable('branchmap'):
132 with remote.commandexecutor() as e:
131 with remote.commandexecutor() as e:
133 rbranchmap = e.callcommand('branchmap', {}).result()
132 rbranchmap = e.callcommand('branchmap', {}).result()
134
133
135 repo.ui.status(_('streaming all changes\n'))
134 repo.ui.status(_('streaming all changes\n'))
136
135
137 with remote.commandexecutor() as e:
136 with remote.commandexecutor() as e:
138 fp = e.callcommand('stream_out', {}).result()
137 fp = e.callcommand('stream_out', {}).result()
139
138
140 # TODO strictly speaking, this code should all be inside the context
139 # TODO strictly speaking, this code should all be inside the context
141 # manager because the context manager is supposed to ensure all wire state
140 # manager because the context manager is supposed to ensure all wire state
142 # is flushed when exiting. But the legacy peers don't do this, so it
141 # is flushed when exiting. But the legacy peers don't do this, so it
143 # doesn't matter.
142 # doesn't matter.
144 l = fp.readline()
143 l = fp.readline()
145 try:
144 try:
146 resp = int(l)
145 resp = int(l)
147 except ValueError:
146 except ValueError:
148 raise error.ResponseError(
147 raise error.ResponseError(
149 _('unexpected response from remote server:'), l)
148 _('unexpected response from remote server:'), l)
150 if resp == 1:
149 if resp == 1:
151 raise error.Abort(_('operation forbidden by server'))
150 raise error.Abort(_('operation forbidden by server'))
152 elif resp == 2:
151 elif resp == 2:
153 raise error.Abort(_('locking the remote repository failed'))
152 raise error.Abort(_('locking the remote repository failed'))
154 elif resp != 0:
153 elif resp != 0:
155 raise error.Abort(_('the server sent an unknown error code'))
154 raise error.Abort(_('the server sent an unknown error code'))
156
155
157 l = fp.readline()
156 l = fp.readline()
158 try:
157 try:
159 filecount, bytecount = map(int, l.split(' ', 1))
158 filecount, bytecount = map(int, l.split(' ', 1))
160 except (ValueError, TypeError):
159 except (ValueError, TypeError):
161 raise error.ResponseError(
160 raise error.ResponseError(
162 _('unexpected response from remote server:'), l)
161 _('unexpected response from remote server:'), l)
163
162
164 with repo.lock():
163 with repo.lock():
165 consumev1(repo, fp, filecount, bytecount)
164 consumev1(repo, fp, filecount, bytecount)
166
165
167 # new requirements = old non-format requirements +
166 # new requirements = old non-format requirements +
168 # new format-related remote requirements
167 # new format-related remote requirements
169 # requirements from the streamed-in repository
168 # requirements from the streamed-in repository
170 repo.requirements = requirements | (
169 repo.requirements = requirements | (
171 repo.requirements - repo.supportedformats)
170 repo.requirements - repo.supportedformats)
172 repo.svfs.options = localrepo.resolvestorevfsoptions(
171 repo.svfs.options = localrepo.resolvestorevfsoptions(
173 repo.ui, repo.requirements, repo.features)
172 repo.ui, repo.requirements, repo.features)
174 repo._writerequirements()
173 repo._writerequirements()
175
174
176 if rbranchmap:
175 if rbranchmap:
177 branchmap.replacecache(repo, rbranchmap)
176 repo._branchcaches.replace(repo, rbranchmap)
178
177
179 repo.invalidate()
178 repo.invalidate()
180
179
181 def allowservergeneration(repo):
180 def allowservergeneration(repo):
182 """Whether streaming clones are allowed from the server."""
181 """Whether streaming clones are allowed from the server."""
183 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
182 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
184 return False
183 return False
185
184
186 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
185 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
187 return False
186 return False
188
187
189 # The way stream clone works makes it impossible to hide secret changesets.
188 # The way stream clone works makes it impossible to hide secret changesets.
190 # So don't allow this by default.
189 # So don't allow this by default.
191 secret = phases.hassecret(repo)
190 secret = phases.hassecret(repo)
192 if secret:
191 if secret:
193 return repo.ui.configbool('server', 'uncompressedallowsecret')
192 return repo.ui.configbool('server', 'uncompressedallowsecret')
194
193
195 return True
194 return True
196
195
197 # This is it's own function so extensions can override it.
196 # This is it's own function so extensions can override it.
198 def _walkstreamfiles(repo, matcher=None):
197 def _walkstreamfiles(repo, matcher=None):
199 return repo.store.walk(matcher)
198 return repo.store.walk(matcher)
200
199
201 def generatev1(repo):
200 def generatev1(repo):
202 """Emit content for version 1 of a streaming clone.
201 """Emit content for version 1 of a streaming clone.
203
202
204 This returns a 3-tuple of (file count, byte size, data iterator).
203 This returns a 3-tuple of (file count, byte size, data iterator).
205
204
206 The data iterator consists of N entries for each file being transferred.
205 The data iterator consists of N entries for each file being transferred.
207 Each file entry starts as a line with the file name and integer size
206 Each file entry starts as a line with the file name and integer size
208 delimited by a null byte.
207 delimited by a null byte.
209
208
210 The raw file data follows. Following the raw file data is the next file
209 The raw file data follows. Following the raw file data is the next file
211 entry, or EOF.
210 entry, or EOF.
212
211
213 When used on the wire protocol, an additional line indicating protocol
212 When used on the wire protocol, an additional line indicating protocol
214 success will be prepended to the stream. This function is not responsible
213 success will be prepended to the stream. This function is not responsible
215 for adding it.
214 for adding it.
216
215
217 This function will obtain a repository lock to ensure a consistent view of
216 This function will obtain a repository lock to ensure a consistent view of
218 the store is captured. It therefore may raise LockError.
217 the store is captured. It therefore may raise LockError.
219 """
218 """
220 entries = []
219 entries = []
221 total_bytes = 0
220 total_bytes = 0
222 # Get consistent snapshot of repo, lock during scan.
221 # Get consistent snapshot of repo, lock during scan.
223 with repo.lock():
222 with repo.lock():
224 repo.ui.debug('scanning\n')
223 repo.ui.debug('scanning\n')
225 for name, ename, size in _walkstreamfiles(repo):
224 for name, ename, size in _walkstreamfiles(repo):
226 if size:
225 if size:
227 entries.append((name, size))
226 entries.append((name, size))
228 total_bytes += size
227 total_bytes += size
229
228
230 repo.ui.debug('%d files, %d bytes to transfer\n' %
229 repo.ui.debug('%d files, %d bytes to transfer\n' %
231 (len(entries), total_bytes))
230 (len(entries), total_bytes))
232
231
233 svfs = repo.svfs
232 svfs = repo.svfs
234 debugflag = repo.ui.debugflag
233 debugflag = repo.ui.debugflag
235
234
236 def emitrevlogdata():
235 def emitrevlogdata():
237 for name, size in entries:
236 for name, size in entries:
238 if debugflag:
237 if debugflag:
239 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
238 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
240 # partially encode name over the wire for backwards compat
239 # partially encode name over the wire for backwards compat
241 yield '%s\0%d\n' % (store.encodedir(name), size)
240 yield '%s\0%d\n' % (store.encodedir(name), size)
242 # auditing at this stage is both pointless (paths are already
241 # auditing at this stage is both pointless (paths are already
243 # trusted by the local repo) and expensive
242 # trusted by the local repo) and expensive
244 with svfs(name, 'rb', auditpath=False) as fp:
243 with svfs(name, 'rb', auditpath=False) as fp:
245 if size <= 65536:
244 if size <= 65536:
246 yield fp.read(size)
245 yield fp.read(size)
247 else:
246 else:
248 for chunk in util.filechunkiter(fp, limit=size):
247 for chunk in util.filechunkiter(fp, limit=size):
249 yield chunk
248 yield chunk
250
249
251 return len(entries), total_bytes, emitrevlogdata()
250 return len(entries), total_bytes, emitrevlogdata()
252
251
253 def generatev1wireproto(repo):
252 def generatev1wireproto(repo):
254 """Emit content for version 1 of streaming clone suitable for the wire.
253 """Emit content for version 1 of streaming clone suitable for the wire.
255
254
256 This is the data output from ``generatev1()`` with 2 header lines. The
255 This is the data output from ``generatev1()`` with 2 header lines. The
257 first line indicates overall success. The 2nd contains the file count and
256 first line indicates overall success. The 2nd contains the file count and
258 byte size of payload.
257 byte size of payload.
259
258
260 The success line contains "0" for success, "1" for stream generation not
259 The success line contains "0" for success, "1" for stream generation not
261 allowed, and "2" for error locking the repository (possibly indicating
260 allowed, and "2" for error locking the repository (possibly indicating
262 a permissions error for the server process).
261 a permissions error for the server process).
263 """
262 """
264 if not allowservergeneration(repo):
263 if not allowservergeneration(repo):
265 yield '1\n'
264 yield '1\n'
266 return
265 return
267
266
268 try:
267 try:
269 filecount, bytecount, it = generatev1(repo)
268 filecount, bytecount, it = generatev1(repo)
270 except error.LockError:
269 except error.LockError:
271 yield '2\n'
270 yield '2\n'
272 return
271 return
273
272
274 # Indicates successful response.
273 # Indicates successful response.
275 yield '0\n'
274 yield '0\n'
276 yield '%d %d\n' % (filecount, bytecount)
275 yield '%d %d\n' % (filecount, bytecount)
277 for chunk in it:
276 for chunk in it:
278 yield chunk
277 yield chunk
279
278
280 def generatebundlev1(repo, compression='UN'):
279 def generatebundlev1(repo, compression='UN'):
281 """Emit content for version 1 of a stream clone bundle.
280 """Emit content for version 1 of a stream clone bundle.
282
281
283 The first 4 bytes of the output ("HGS1") denote this as stream clone
282 The first 4 bytes of the output ("HGS1") denote this as stream clone
284 bundle version 1.
283 bundle version 1.
285
284
286 The next 2 bytes indicate the compression type. Only "UN" is currently
285 The next 2 bytes indicate the compression type. Only "UN" is currently
287 supported.
286 supported.
288
287
289 The next 16 bytes are two 64-bit big endian unsigned integers indicating
288 The next 16 bytes are two 64-bit big endian unsigned integers indicating
290 file count and byte count, respectively.
289 file count and byte count, respectively.
291
290
292 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
291 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
293 of the requirements string, including a trailing \0. The following N bytes
292 of the requirements string, including a trailing \0. The following N bytes
294 are the requirements string, which is ASCII containing a comma-delimited
293 are the requirements string, which is ASCII containing a comma-delimited
295 list of repo requirements that are needed to support the data.
294 list of repo requirements that are needed to support the data.
296
295
297 The remaining content is the output of ``generatev1()`` (which may be
296 The remaining content is the output of ``generatev1()`` (which may be
298 compressed in the future).
297 compressed in the future).
299
298
300 Returns a tuple of (requirements, data generator).
299 Returns a tuple of (requirements, data generator).
301 """
300 """
302 if compression != 'UN':
301 if compression != 'UN':
303 raise ValueError('we do not support the compression argument yet')
302 raise ValueError('we do not support the compression argument yet')
304
303
305 requirements = repo.requirements & repo.supportedformats
304 requirements = repo.requirements & repo.supportedformats
306 requires = ','.join(sorted(requirements))
305 requires = ','.join(sorted(requirements))
307
306
308 def gen():
307 def gen():
309 yield 'HGS1'
308 yield 'HGS1'
310 yield compression
309 yield compression
311
310
312 filecount, bytecount, it = generatev1(repo)
311 filecount, bytecount, it = generatev1(repo)
313 repo.ui.status(_('writing %d bytes for %d files\n') %
312 repo.ui.status(_('writing %d bytes for %d files\n') %
314 (bytecount, filecount))
313 (bytecount, filecount))
315
314
316 yield struct.pack('>QQ', filecount, bytecount)
315 yield struct.pack('>QQ', filecount, bytecount)
317 yield struct.pack('>H', len(requires) + 1)
316 yield struct.pack('>H', len(requires) + 1)
318 yield requires + '\0'
317 yield requires + '\0'
319
318
320 # This is where we'll add compression in the future.
319 # This is where we'll add compression in the future.
321 assert compression == 'UN'
320 assert compression == 'UN'
322
321
323 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
322 progress = repo.ui.makeprogress(_('bundle'), total=bytecount,
324 unit=_('bytes'))
323 unit=_('bytes'))
325 progress.update(0)
324 progress.update(0)
326
325
327 for chunk in it:
326 for chunk in it:
328 progress.increment(step=len(chunk))
327 progress.increment(step=len(chunk))
329 yield chunk
328 yield chunk
330
329
331 progress.complete()
330 progress.complete()
332
331
333 return requirements, gen()
332 return requirements, gen()
334
333
335 def consumev1(repo, fp, filecount, bytecount):
334 def consumev1(repo, fp, filecount, bytecount):
336 """Apply the contents from version 1 of a streaming clone file handle.
335 """Apply the contents from version 1 of a streaming clone file handle.
337
336
338 This takes the output from "stream_out" and applies it to the specified
337 This takes the output from "stream_out" and applies it to the specified
339 repository.
338 repository.
340
339
341 Like "stream_out," the status line added by the wire protocol is not
340 Like "stream_out," the status line added by the wire protocol is not
342 handled by this function.
341 handled by this function.
343 """
342 """
344 with repo.lock():
343 with repo.lock():
345 repo.ui.status(_('%d files to transfer, %s of data\n') %
344 repo.ui.status(_('%d files to transfer, %s of data\n') %
346 (filecount, util.bytecount(bytecount)))
345 (filecount, util.bytecount(bytecount)))
347 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
346 progress = repo.ui.makeprogress(_('clone'), total=bytecount,
348 unit=_('bytes'))
347 unit=_('bytes'))
349 progress.update(0)
348 progress.update(0)
350 start = util.timer()
349 start = util.timer()
351
350
352 # TODO: get rid of (potential) inconsistency
351 # TODO: get rid of (potential) inconsistency
353 #
352 #
354 # If transaction is started and any @filecache property is
353 # If transaction is started and any @filecache property is
355 # changed at this point, it causes inconsistency between
354 # changed at this point, it causes inconsistency between
356 # in-memory cached property and streamclone-ed file on the
355 # in-memory cached property and streamclone-ed file on the
357 # disk. Nested transaction prevents transaction scope "clone"
356 # disk. Nested transaction prevents transaction scope "clone"
358 # below from writing in-memory changes out at the end of it,
357 # below from writing in-memory changes out at the end of it,
359 # even though in-memory changes are discarded at the end of it
358 # even though in-memory changes are discarded at the end of it
360 # regardless of transaction nesting.
359 # regardless of transaction nesting.
361 #
360 #
362 # But transaction nesting can't be simply prohibited, because
361 # But transaction nesting can't be simply prohibited, because
363 # nesting occurs also in ordinary case (e.g. enabling
362 # nesting occurs also in ordinary case (e.g. enabling
364 # clonebundles).
363 # clonebundles).
365
364
366 with repo.transaction('clone'):
365 with repo.transaction('clone'):
367 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
366 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
368 for i in pycompat.xrange(filecount):
367 for i in pycompat.xrange(filecount):
369 # XXX doesn't support '\n' or '\r' in filenames
368 # XXX doesn't support '\n' or '\r' in filenames
370 l = fp.readline()
369 l = fp.readline()
371 try:
370 try:
372 name, size = l.split('\0', 1)
371 name, size = l.split('\0', 1)
373 size = int(size)
372 size = int(size)
374 except (ValueError, TypeError):
373 except (ValueError, TypeError):
375 raise error.ResponseError(
374 raise error.ResponseError(
376 _('unexpected response from remote server:'), l)
375 _('unexpected response from remote server:'), l)
377 if repo.ui.debugflag:
376 if repo.ui.debugflag:
378 repo.ui.debug('adding %s (%s)\n' %
377 repo.ui.debug('adding %s (%s)\n' %
379 (name, util.bytecount(size)))
378 (name, util.bytecount(size)))
380 # for backwards compat, name was partially encoded
379 # for backwards compat, name was partially encoded
381 path = store.decodedir(name)
380 path = store.decodedir(name)
382 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
381 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
383 for chunk in util.filechunkiter(fp, limit=size):
382 for chunk in util.filechunkiter(fp, limit=size):
384 progress.increment(step=len(chunk))
383 progress.increment(step=len(chunk))
385 ofp.write(chunk)
384 ofp.write(chunk)
386
385
387 # force @filecache properties to be reloaded from
386 # force @filecache properties to be reloaded from
388 # streamclone-ed file at next access
387 # streamclone-ed file at next access
389 repo.invalidate(clearfilecache=True)
388 repo.invalidate(clearfilecache=True)
390
389
391 elapsed = util.timer() - start
390 elapsed = util.timer() - start
392 if elapsed <= 0:
391 if elapsed <= 0:
393 elapsed = 0.001
392 elapsed = 0.001
394 progress.complete()
393 progress.complete()
395 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
394 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
396 (util.bytecount(bytecount), elapsed,
395 (util.bytecount(bytecount), elapsed,
397 util.bytecount(bytecount / elapsed)))
396 util.bytecount(bytecount / elapsed)))
398
397
399 def readbundle1header(fp):
398 def readbundle1header(fp):
400 compression = fp.read(2)
399 compression = fp.read(2)
401 if compression != 'UN':
400 if compression != 'UN':
402 raise error.Abort(_('only uncompressed stream clone bundles are '
401 raise error.Abort(_('only uncompressed stream clone bundles are '
403 'supported; got %s') % compression)
402 'supported; got %s') % compression)
404
403
405 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
404 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
406 requireslen = struct.unpack('>H', fp.read(2))[0]
405 requireslen = struct.unpack('>H', fp.read(2))[0]
407 requires = fp.read(requireslen)
406 requires = fp.read(requireslen)
408
407
409 if not requires.endswith('\0'):
408 if not requires.endswith('\0'):
410 raise error.Abort(_('malformed stream clone bundle: '
409 raise error.Abort(_('malformed stream clone bundle: '
411 'requirements not properly encoded'))
410 'requirements not properly encoded'))
412
411
413 requirements = set(requires.rstrip('\0').split(','))
412 requirements = set(requires.rstrip('\0').split(','))
414
413
415 return filecount, bytecount, requirements
414 return filecount, bytecount, requirements
416
415
417 def applybundlev1(repo, fp):
416 def applybundlev1(repo, fp):
418 """Apply the content from a stream clone bundle version 1.
417 """Apply the content from a stream clone bundle version 1.
419
418
420 We assume the 4 byte header has been read and validated and the file handle
419 We assume the 4 byte header has been read and validated and the file handle
421 is at the 2 byte compression identifier.
420 is at the 2 byte compression identifier.
422 """
421 """
423 if len(repo):
422 if len(repo):
424 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
423 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
425 'repo'))
424 'repo'))
426
425
427 filecount, bytecount, requirements = readbundle1header(fp)
426 filecount, bytecount, requirements = readbundle1header(fp)
428 missingreqs = requirements - repo.supportedformats
427 missingreqs = requirements - repo.supportedformats
429 if missingreqs:
428 if missingreqs:
430 raise error.Abort(_('unable to apply stream clone: '
429 raise error.Abort(_('unable to apply stream clone: '
431 'unsupported format: %s') %
430 'unsupported format: %s') %
432 ', '.join(sorted(missingreqs)))
431 ', '.join(sorted(missingreqs)))
433
432
434 consumev1(repo, fp, filecount, bytecount)
433 consumev1(repo, fp, filecount, bytecount)
435
434
436 class streamcloneapplier(object):
435 class streamcloneapplier(object):
437 """Class to manage applying streaming clone bundles.
436 """Class to manage applying streaming clone bundles.
438
437
439 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
438 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
440 readers to perform bundle type-specific functionality.
439 readers to perform bundle type-specific functionality.
441 """
440 """
442 def __init__(self, fh):
441 def __init__(self, fh):
443 self._fh = fh
442 self._fh = fh
444
443
445 def apply(self, repo):
444 def apply(self, repo):
446 return applybundlev1(repo, self._fh)
445 return applybundlev1(repo, self._fh)
447
446
448 # type of file to stream
447 # type of file to stream
449 _fileappend = 0 # append only file
448 _fileappend = 0 # append only file
450 _filefull = 1 # full snapshot file
449 _filefull = 1 # full snapshot file
451
450
452 # Source of the file
451 # Source of the file
453 _srcstore = 's' # store (svfs)
452 _srcstore = 's' # store (svfs)
454 _srccache = 'c' # cache (cache)
453 _srccache = 'c' # cache (cache)
455
454
456 # This is it's own function so extensions can override it.
455 # This is it's own function so extensions can override it.
457 def _walkstreamfullstorefiles(repo):
456 def _walkstreamfullstorefiles(repo):
458 """list snapshot file from the store"""
457 """list snapshot file from the store"""
459 fnames = []
458 fnames = []
460 if not repo.publishing():
459 if not repo.publishing():
461 fnames.append('phaseroots')
460 fnames.append('phaseroots')
462 return fnames
461 return fnames
463
462
464 def _filterfull(entry, copy, vfsmap):
463 def _filterfull(entry, copy, vfsmap):
465 """actually copy the snapshot files"""
464 """actually copy the snapshot files"""
466 src, name, ftype, data = entry
465 src, name, ftype, data = entry
467 if ftype != _filefull:
466 if ftype != _filefull:
468 return entry
467 return entry
469 return (src, name, ftype, copy(vfsmap[src].join(name)))
468 return (src, name, ftype, copy(vfsmap[src].join(name)))
470
469
471 @contextlib.contextmanager
470 @contextlib.contextmanager
472 def maketempcopies():
471 def maketempcopies():
473 """return a function to temporary copy file"""
472 """return a function to temporary copy file"""
474 files = []
473 files = []
475 try:
474 try:
476 def copy(src):
475 def copy(src):
477 fd, dst = pycompat.mkstemp()
476 fd, dst = pycompat.mkstemp()
478 os.close(fd)
477 os.close(fd)
479 files.append(dst)
478 files.append(dst)
480 util.copyfiles(src, dst, hardlink=True)
479 util.copyfiles(src, dst, hardlink=True)
481 return dst
480 return dst
482 yield copy
481 yield copy
483 finally:
482 finally:
484 for tmp in files:
483 for tmp in files:
485 util.tryunlink(tmp)
484 util.tryunlink(tmp)
486
485
487 def _makemap(repo):
486 def _makemap(repo):
488 """make a (src -> vfs) map for the repo"""
487 """make a (src -> vfs) map for the repo"""
489 vfsmap = {
488 vfsmap = {
490 _srcstore: repo.svfs,
489 _srcstore: repo.svfs,
491 _srccache: repo.cachevfs,
490 _srccache: repo.cachevfs,
492 }
491 }
493 # we keep repo.vfs out of the on purpose, ther are too many danger there
492 # we keep repo.vfs out of the on purpose, ther are too many danger there
494 # (eg: .hg/hgrc)
493 # (eg: .hg/hgrc)
495 assert repo.vfs not in vfsmap.values()
494 assert repo.vfs not in vfsmap.values()
496
495
497 return vfsmap
496 return vfsmap
498
497
499 def _emit2(repo, entries, totalfilesize):
498 def _emit2(repo, entries, totalfilesize):
500 """actually emit the stream bundle"""
499 """actually emit the stream bundle"""
501 vfsmap = _makemap(repo)
500 vfsmap = _makemap(repo)
502 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
501 progress = repo.ui.makeprogress(_('bundle'), total=totalfilesize,
503 unit=_('bytes'))
502 unit=_('bytes'))
504 progress.update(0)
503 progress.update(0)
505 with maketempcopies() as copy, progress:
504 with maketempcopies() as copy, progress:
506 # copy is delayed until we are in the try
505 # copy is delayed until we are in the try
507 entries = [_filterfull(e, copy, vfsmap) for e in entries]
506 entries = [_filterfull(e, copy, vfsmap) for e in entries]
508 yield None # this release the lock on the repository
507 yield None # this release the lock on the repository
509 seen = 0
508 seen = 0
510
509
511 for src, name, ftype, data in entries:
510 for src, name, ftype, data in entries:
512 vfs = vfsmap[src]
511 vfs = vfsmap[src]
513 yield src
512 yield src
514 yield util.uvarintencode(len(name))
513 yield util.uvarintencode(len(name))
515 if ftype == _fileappend:
514 if ftype == _fileappend:
516 fp = vfs(name)
515 fp = vfs(name)
517 size = data
516 size = data
518 elif ftype == _filefull:
517 elif ftype == _filefull:
519 fp = open(data, 'rb')
518 fp = open(data, 'rb')
520 size = util.fstat(fp).st_size
519 size = util.fstat(fp).st_size
521 try:
520 try:
522 yield util.uvarintencode(size)
521 yield util.uvarintencode(size)
523 yield name
522 yield name
524 if size <= 65536:
523 if size <= 65536:
525 chunks = (fp.read(size),)
524 chunks = (fp.read(size),)
526 else:
525 else:
527 chunks = util.filechunkiter(fp, limit=size)
526 chunks = util.filechunkiter(fp, limit=size)
528 for chunk in chunks:
527 for chunk in chunks:
529 seen += len(chunk)
528 seen += len(chunk)
530 progress.update(seen)
529 progress.update(seen)
531 yield chunk
530 yield chunk
532 finally:
531 finally:
533 fp.close()
532 fp.close()
534
533
535 def generatev2(repo, includes, excludes, includeobsmarkers):
534 def generatev2(repo, includes, excludes, includeobsmarkers):
536 """Emit content for version 2 of a streaming clone.
535 """Emit content for version 2 of a streaming clone.
537
536
538 the data stream consists the following entries:
537 the data stream consists the following entries:
539 1) A char representing the file destination (eg: store or cache)
538 1) A char representing the file destination (eg: store or cache)
540 2) A varint containing the length of the filename
539 2) A varint containing the length of the filename
541 3) A varint containing the length of file data
540 3) A varint containing the length of file data
542 4) N bytes containing the filename (the internal, store-agnostic form)
541 4) N bytes containing the filename (the internal, store-agnostic form)
543 5) N bytes containing the file data
542 5) N bytes containing the file data
544
543
545 Returns a 3-tuple of (file count, file size, data iterator).
544 Returns a 3-tuple of (file count, file size, data iterator).
546 """
545 """
547
546
548 with repo.lock():
547 with repo.lock():
549
548
550 entries = []
549 entries = []
551 totalfilesize = 0
550 totalfilesize = 0
552
551
553 matcher = None
552 matcher = None
554 if includes or excludes:
553 if includes or excludes:
555 matcher = narrowspec.match(repo.root, includes, excludes)
554 matcher = narrowspec.match(repo.root, includes, excludes)
556
555
557 repo.ui.debug('scanning\n')
556 repo.ui.debug('scanning\n')
558 for name, ename, size in _walkstreamfiles(repo, matcher):
557 for name, ename, size in _walkstreamfiles(repo, matcher):
559 if size:
558 if size:
560 entries.append((_srcstore, name, _fileappend, size))
559 entries.append((_srcstore, name, _fileappend, size))
561 totalfilesize += size
560 totalfilesize += size
562 for name in _walkstreamfullstorefiles(repo):
561 for name in _walkstreamfullstorefiles(repo):
563 if repo.svfs.exists(name):
562 if repo.svfs.exists(name):
564 totalfilesize += repo.svfs.lstat(name).st_size
563 totalfilesize += repo.svfs.lstat(name).st_size
565 entries.append((_srcstore, name, _filefull, None))
564 entries.append((_srcstore, name, _filefull, None))
566 if includeobsmarkers and repo.svfs.exists('obsstore'):
565 if includeobsmarkers and repo.svfs.exists('obsstore'):
567 totalfilesize += repo.svfs.lstat('obsstore').st_size
566 totalfilesize += repo.svfs.lstat('obsstore').st_size
568 entries.append((_srcstore, 'obsstore', _filefull, None))
567 entries.append((_srcstore, 'obsstore', _filefull, None))
569 for name in cacheutil.cachetocopy(repo):
568 for name in cacheutil.cachetocopy(repo):
570 if repo.cachevfs.exists(name):
569 if repo.cachevfs.exists(name):
571 totalfilesize += repo.cachevfs.lstat(name).st_size
570 totalfilesize += repo.cachevfs.lstat(name).st_size
572 entries.append((_srccache, name, _filefull, None))
571 entries.append((_srccache, name, _filefull, None))
573
572
574 chunks = _emit2(repo, entries, totalfilesize)
573 chunks = _emit2(repo, entries, totalfilesize)
575 first = next(chunks)
574 first = next(chunks)
576 assert first is None
575 assert first is None
577
576
578 return len(entries), totalfilesize, chunks
577 return len(entries), totalfilesize, chunks
579
578
580 @contextlib.contextmanager
579 @contextlib.contextmanager
581 def nested(*ctxs):
580 def nested(*ctxs):
582 this = ctxs[0]
581 this = ctxs[0]
583 rest = ctxs[1:]
582 rest = ctxs[1:]
584 with this:
583 with this:
585 if rest:
584 if rest:
586 with nested(*rest):
585 with nested(*rest):
587 yield
586 yield
588 else:
587 else:
589 yield
588 yield
590
589
591 def consumev2(repo, fp, filecount, filesize):
590 def consumev2(repo, fp, filecount, filesize):
592 """Apply the contents from a version 2 streaming clone.
591 """Apply the contents from a version 2 streaming clone.
593
592
594 Data is read from an object that only needs to provide a ``read(size)``
593 Data is read from an object that only needs to provide a ``read(size)``
595 method.
594 method.
596 """
595 """
597 with repo.lock():
596 with repo.lock():
598 repo.ui.status(_('%d files to transfer, %s of data\n') %
597 repo.ui.status(_('%d files to transfer, %s of data\n') %
599 (filecount, util.bytecount(filesize)))
598 (filecount, util.bytecount(filesize)))
600
599
601 start = util.timer()
600 start = util.timer()
602 progress = repo.ui.makeprogress(_('clone'), total=filesize,
601 progress = repo.ui.makeprogress(_('clone'), total=filesize,
603 unit=_('bytes'))
602 unit=_('bytes'))
604 progress.update(0)
603 progress.update(0)
605
604
606 vfsmap = _makemap(repo)
605 vfsmap = _makemap(repo)
607
606
608 with repo.transaction('clone'):
607 with repo.transaction('clone'):
609 ctxs = (vfs.backgroundclosing(repo.ui)
608 ctxs = (vfs.backgroundclosing(repo.ui)
610 for vfs in vfsmap.values())
609 for vfs in vfsmap.values())
611 with nested(*ctxs):
610 with nested(*ctxs):
612 for i in range(filecount):
611 for i in range(filecount):
613 src = util.readexactly(fp, 1)
612 src = util.readexactly(fp, 1)
614 vfs = vfsmap[src]
613 vfs = vfsmap[src]
615 namelen = util.uvarintdecodestream(fp)
614 namelen = util.uvarintdecodestream(fp)
616 datalen = util.uvarintdecodestream(fp)
615 datalen = util.uvarintdecodestream(fp)
617
616
618 name = util.readexactly(fp, namelen)
617 name = util.readexactly(fp, namelen)
619
618
620 if repo.ui.debugflag:
619 if repo.ui.debugflag:
621 repo.ui.debug('adding [%s] %s (%s)\n' %
620 repo.ui.debug('adding [%s] %s (%s)\n' %
622 (src, name, util.bytecount(datalen)))
621 (src, name, util.bytecount(datalen)))
623
622
624 with vfs(name, 'w') as ofp:
623 with vfs(name, 'w') as ofp:
625 for chunk in util.filechunkiter(fp, limit=datalen):
624 for chunk in util.filechunkiter(fp, limit=datalen):
626 progress.increment(step=len(chunk))
625 progress.increment(step=len(chunk))
627 ofp.write(chunk)
626 ofp.write(chunk)
628
627
629 # force @filecache properties to be reloaded from
628 # force @filecache properties to be reloaded from
630 # streamclone-ed file at next access
629 # streamclone-ed file at next access
631 repo.invalidate(clearfilecache=True)
630 repo.invalidate(clearfilecache=True)
632
631
633 elapsed = util.timer() - start
632 elapsed = util.timer() - start
634 if elapsed <= 0:
633 if elapsed <= 0:
635 elapsed = 0.001
634 elapsed = 0.001
636 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
635 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
637 (util.bytecount(progress.pos), elapsed,
636 (util.bytecount(progress.pos), elapsed,
638 util.bytecount(progress.pos / elapsed)))
637 util.bytecount(progress.pos / elapsed)))
639 progress.complete()
638 progress.complete()
640
639
641 def applybundlev2(repo, fp, filecount, filesize, requirements):
640 def applybundlev2(repo, fp, filecount, filesize, requirements):
642 from . import localrepo
641 from . import localrepo
643
642
644 missingreqs = [r for r in requirements if r not in repo.supported]
643 missingreqs = [r for r in requirements if r not in repo.supported]
645 if missingreqs:
644 if missingreqs:
646 raise error.Abort(_('unable to apply stream clone: '
645 raise error.Abort(_('unable to apply stream clone: '
647 'unsupported format: %s') %
646 'unsupported format: %s') %
648 ', '.join(sorted(missingreqs)))
647 ', '.join(sorted(missingreqs)))
649
648
650 consumev2(repo, fp, filecount, filesize)
649 consumev2(repo, fp, filecount, filesize)
651
650
652 # new requirements = old non-format requirements +
651 # new requirements = old non-format requirements +
653 # new format-related remote requirements
652 # new format-related remote requirements
654 # requirements from the streamed-in repository
653 # requirements from the streamed-in repository
655 repo.requirements = set(requirements) | (
654 repo.requirements = set(requirements) | (
656 repo.requirements - repo.supportedformats)
655 repo.requirements - repo.supportedformats)
657 repo.svfs.options = localrepo.resolvestorevfsoptions(
656 repo.svfs.options = localrepo.resolvestorevfsoptions(
658 repo.ui, repo.requirements, repo.features)
657 repo.ui, repo.requirements, repo.features)
659 repo._writerequirements()
658 repo._writerequirements()
General Comments 0
You need to be logged in to leave comments. Login now