##// END OF EJS Templates
localrepo: pass root manifest into manifestlog.__init__...
Gregory Szorc -
r39799:5ccd7913 default
parent child Browse files
Show More
@@ -1,2024 +1,2026 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import gc
23 import gc
24 import os
24 import os
25 import random
25 import random
26 import struct
26 import struct
27 import sys
27 import sys
28 import threading
28 import threading
29 import time
29 import time
30 from mercurial import (
30 from mercurial import (
31 changegroup,
31 changegroup,
32 cmdutil,
32 cmdutil,
33 commands,
33 commands,
34 copies,
34 copies,
35 error,
35 error,
36 extensions,
36 extensions,
37 mdiff,
37 mdiff,
38 merge,
38 merge,
39 revlog,
39 revlog,
40 util,
40 util,
41 )
41 )
42
42
43 # for "historical portability":
43 # for "historical portability":
44 # try to import modules separately (in dict order), and ignore
44 # try to import modules separately (in dict order), and ignore
45 # failure, because these aren't available with early Mercurial
45 # failure, because these aren't available with early Mercurial
46 try:
46 try:
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 except ImportError:
48 except ImportError:
49 pass
49 pass
50 try:
50 try:
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 dir(registrar) # forcibly load it
56 dir(registrar) # forcibly load it
57 except ImportError:
57 except ImportError:
58 registrar = None
58 registrar = None
59 try:
59 try:
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 except ImportError:
61 except ImportError:
62 pass
62 pass
63 try:
63 try:
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
67 try:
68 from mercurial import pycompat
68 from mercurial import pycompat
69 getargspec = pycompat.getargspec # added to module after 4.5
69 getargspec = pycompat.getargspec # added to module after 4.5
70 except (ImportError, AttributeError):
70 except (ImportError, AttributeError):
71 import inspect
71 import inspect
72 getargspec = inspect.getargspec
72 getargspec = inspect.getargspec
73
73
74 try:
74 try:
75 # 4.7+
75 # 4.7+
76 queue = pycompat.queue.Queue
76 queue = pycompat.queue.Queue
77 except (AttributeError, ImportError):
77 except (AttributeError, ImportError):
78 # <4.7.
78 # <4.7.
79 try:
79 try:
80 queue = pycompat.queue
80 queue = pycompat.queue
81 except (AttributeError, ImportError):
81 except (AttributeError, ImportError):
82 queue = util.queue
82 queue = util.queue
83
83
84 try:
84 try:
85 from mercurial import logcmdutil
85 from mercurial import logcmdutil
86 makelogtemplater = logcmdutil.maketemplater
86 makelogtemplater = logcmdutil.maketemplater
87 except (AttributeError, ImportError):
87 except (AttributeError, ImportError):
88 try:
88 try:
89 makelogtemplater = cmdutil.makelogtemplater
89 makelogtemplater = cmdutil.makelogtemplater
90 except (AttributeError, ImportError):
90 except (AttributeError, ImportError):
91 makelogtemplater = None
91 makelogtemplater = None
92
92
93 # for "historical portability":
93 # for "historical portability":
94 # define util.safehasattr forcibly, because util.safehasattr has been
94 # define util.safehasattr forcibly, because util.safehasattr has been
95 # available since 1.9.3 (or 94b200a11cf7)
95 # available since 1.9.3 (or 94b200a11cf7)
96 _undefined = object()
96 _undefined = object()
97 def safehasattr(thing, attr):
97 def safehasattr(thing, attr):
98 return getattr(thing, attr, _undefined) is not _undefined
98 return getattr(thing, attr, _undefined) is not _undefined
99 setattr(util, 'safehasattr', safehasattr)
99 setattr(util, 'safehasattr', safehasattr)
100
100
101 # for "historical portability":
101 # for "historical portability":
102 # define util.timer forcibly, because util.timer has been available
102 # define util.timer forcibly, because util.timer has been available
103 # since ae5d60bb70c9
103 # since ae5d60bb70c9
104 if safehasattr(time, 'perf_counter'):
104 if safehasattr(time, 'perf_counter'):
105 util.timer = time.perf_counter
105 util.timer = time.perf_counter
106 elif os.name == b'nt':
106 elif os.name == b'nt':
107 util.timer = time.clock
107 util.timer = time.clock
108 else:
108 else:
109 util.timer = time.time
109 util.timer = time.time
110
110
111 # for "historical portability":
111 # for "historical portability":
112 # use locally defined empty option list, if formatteropts isn't
112 # use locally defined empty option list, if formatteropts isn't
113 # available, because commands.formatteropts has been available since
113 # available, because commands.formatteropts has been available since
114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
115 # available since 2.2 (or ae5f92e154d3)
115 # available since 2.2 (or ae5f92e154d3)
116 formatteropts = getattr(cmdutil, "formatteropts",
116 formatteropts = getattr(cmdutil, "formatteropts",
117 getattr(commands, "formatteropts", []))
117 getattr(commands, "formatteropts", []))
118
118
119 # for "historical portability":
119 # for "historical portability":
120 # use locally defined option list, if debugrevlogopts isn't available,
120 # use locally defined option list, if debugrevlogopts isn't available,
121 # because commands.debugrevlogopts has been available since 3.7 (or
121 # because commands.debugrevlogopts has been available since 3.7 (or
122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
123 # since 1.9 (or a79fea6b3e77).
123 # since 1.9 (or a79fea6b3e77).
124 revlogopts = getattr(cmdutil, "debugrevlogopts",
124 revlogopts = getattr(cmdutil, "debugrevlogopts",
125 getattr(commands, "debugrevlogopts", [
125 getattr(commands, "debugrevlogopts", [
126 (b'c', b'changelog', False, (b'open changelog')),
126 (b'c', b'changelog', False, (b'open changelog')),
127 (b'm', b'manifest', False, (b'open manifest')),
127 (b'm', b'manifest', False, (b'open manifest')),
128 (b'', b'dir', False, (b'open directory manifest')),
128 (b'', b'dir', False, (b'open directory manifest')),
129 ]))
129 ]))
130
130
131 cmdtable = {}
131 cmdtable = {}
132
132
133 # for "historical portability":
133 # for "historical portability":
134 # define parsealiases locally, because cmdutil.parsealiases has been
134 # define parsealiases locally, because cmdutil.parsealiases has been
135 # available since 1.5 (or 6252852b4332)
135 # available since 1.5 (or 6252852b4332)
136 def parsealiases(cmd):
136 def parsealiases(cmd):
137 return cmd.lstrip(b"^").split(b"|")
137 return cmd.lstrip(b"^").split(b"|")
138
138
139 if safehasattr(registrar, 'command'):
139 if safehasattr(registrar, 'command'):
140 command = registrar.command(cmdtable)
140 command = registrar.command(cmdtable)
141 elif safehasattr(cmdutil, 'command'):
141 elif safehasattr(cmdutil, 'command'):
142 command = cmdutil.command(cmdtable)
142 command = cmdutil.command(cmdtable)
143 if b'norepo' not in getargspec(command).args:
143 if b'norepo' not in getargspec(command).args:
144 # for "historical portability":
144 # for "historical portability":
145 # wrap original cmdutil.command, because "norepo" option has
145 # wrap original cmdutil.command, because "norepo" option has
146 # been available since 3.1 (or 75a96326cecb)
146 # been available since 3.1 (or 75a96326cecb)
147 _command = command
147 _command = command
148 def command(name, options=(), synopsis=None, norepo=False):
148 def command(name, options=(), synopsis=None, norepo=False):
149 if norepo:
149 if norepo:
150 commands.norepo += b' %s' % b' '.join(parsealiases(name))
150 commands.norepo += b' %s' % b' '.join(parsealiases(name))
151 return _command(name, list(options), synopsis)
151 return _command(name, list(options), synopsis)
152 else:
152 else:
153 # for "historical portability":
153 # for "historical portability":
154 # define "@command" annotation locally, because cmdutil.command
154 # define "@command" annotation locally, because cmdutil.command
155 # has been available since 1.9 (or 2daa5179e73f)
155 # has been available since 1.9 (or 2daa5179e73f)
156 def command(name, options=(), synopsis=None, norepo=False):
156 def command(name, options=(), synopsis=None, norepo=False):
157 def decorator(func):
157 def decorator(func):
158 if synopsis:
158 if synopsis:
159 cmdtable[name] = func, list(options), synopsis
159 cmdtable[name] = func, list(options), synopsis
160 else:
160 else:
161 cmdtable[name] = func, list(options)
161 cmdtable[name] = func, list(options)
162 if norepo:
162 if norepo:
163 commands.norepo += b' %s' % b' '.join(parsealiases(name))
163 commands.norepo += b' %s' % b' '.join(parsealiases(name))
164 return func
164 return func
165 return decorator
165 return decorator
166
166
167 try:
167 try:
168 import mercurial.registrar
168 import mercurial.registrar
169 import mercurial.configitems
169 import mercurial.configitems
170 configtable = {}
170 configtable = {}
171 configitem = mercurial.registrar.configitem(configtable)
171 configitem = mercurial.registrar.configitem(configtable)
172 configitem(b'perf', b'presleep',
172 configitem(b'perf', b'presleep',
173 default=mercurial.configitems.dynamicdefault,
173 default=mercurial.configitems.dynamicdefault,
174 )
174 )
175 configitem(b'perf', b'stub',
175 configitem(b'perf', b'stub',
176 default=mercurial.configitems.dynamicdefault,
176 default=mercurial.configitems.dynamicdefault,
177 )
177 )
178 configitem(b'perf', b'parentscount',
178 configitem(b'perf', b'parentscount',
179 default=mercurial.configitems.dynamicdefault,
179 default=mercurial.configitems.dynamicdefault,
180 )
180 )
181 configitem(b'perf', b'all-timing',
181 configitem(b'perf', b'all-timing',
182 default=mercurial.configitems.dynamicdefault,
182 default=mercurial.configitems.dynamicdefault,
183 )
183 )
184 except (ImportError, AttributeError):
184 except (ImportError, AttributeError):
185 pass
185 pass
186
186
187 def getlen(ui):
187 def getlen(ui):
188 if ui.configbool(b"perf", b"stub", False):
188 if ui.configbool(b"perf", b"stub", False):
189 return lambda x: 1
189 return lambda x: 1
190 return len
190 return len
191
191
192 def gettimer(ui, opts=None):
192 def gettimer(ui, opts=None):
193 """return a timer function and formatter: (timer, formatter)
193 """return a timer function and formatter: (timer, formatter)
194
194
195 This function exists to gather the creation of formatter in a single
195 This function exists to gather the creation of formatter in a single
196 place instead of duplicating it in all performance commands."""
196 place instead of duplicating it in all performance commands."""
197
197
198 # enforce an idle period before execution to counteract power management
198 # enforce an idle period before execution to counteract power management
199 # experimental config: perf.presleep
199 # experimental config: perf.presleep
200 time.sleep(getint(ui, b"perf", b"presleep", 1))
200 time.sleep(getint(ui, b"perf", b"presleep", 1))
201
201
202 if opts is None:
202 if opts is None:
203 opts = {}
203 opts = {}
204 # redirect all to stderr unless buffer api is in use
204 # redirect all to stderr unless buffer api is in use
205 if not ui._buffers:
205 if not ui._buffers:
206 ui = ui.copy()
206 ui = ui.copy()
207 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
207 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
208 if uifout:
208 if uifout:
209 # for "historical portability":
209 # for "historical portability":
210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
211 uifout.set(ui.ferr)
211 uifout.set(ui.ferr)
212
212
213 # get a formatter
213 # get a formatter
214 uiformatter = getattr(ui, 'formatter', None)
214 uiformatter = getattr(ui, 'formatter', None)
215 if uiformatter:
215 if uiformatter:
216 fm = uiformatter(b'perf', opts)
216 fm = uiformatter(b'perf', opts)
217 else:
217 else:
218 # for "historical portability":
218 # for "historical portability":
219 # define formatter locally, because ui.formatter has been
219 # define formatter locally, because ui.formatter has been
220 # available since 2.2 (or ae5f92e154d3)
220 # available since 2.2 (or ae5f92e154d3)
221 from mercurial import node
221 from mercurial import node
222 class defaultformatter(object):
222 class defaultformatter(object):
223 """Minimized composition of baseformatter and plainformatter
223 """Minimized composition of baseformatter and plainformatter
224 """
224 """
225 def __init__(self, ui, topic, opts):
225 def __init__(self, ui, topic, opts):
226 self._ui = ui
226 self._ui = ui
227 if ui.debugflag:
227 if ui.debugflag:
228 self.hexfunc = node.hex
228 self.hexfunc = node.hex
229 else:
229 else:
230 self.hexfunc = node.short
230 self.hexfunc = node.short
231 def __nonzero__(self):
231 def __nonzero__(self):
232 return False
232 return False
233 __bool__ = __nonzero__
233 __bool__ = __nonzero__
234 def startitem(self):
234 def startitem(self):
235 pass
235 pass
236 def data(self, **data):
236 def data(self, **data):
237 pass
237 pass
238 def write(self, fields, deftext, *fielddata, **opts):
238 def write(self, fields, deftext, *fielddata, **opts):
239 self._ui.write(deftext % fielddata, **opts)
239 self._ui.write(deftext % fielddata, **opts)
240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
241 if cond:
241 if cond:
242 self._ui.write(deftext % fielddata, **opts)
242 self._ui.write(deftext % fielddata, **opts)
243 def plain(self, text, **opts):
243 def plain(self, text, **opts):
244 self._ui.write(text, **opts)
244 self._ui.write(text, **opts)
245 def end(self):
245 def end(self):
246 pass
246 pass
247 fm = defaultformatter(ui, b'perf', opts)
247 fm = defaultformatter(ui, b'perf', opts)
248
248
249 # stub function, runs code only once instead of in a loop
249 # stub function, runs code only once instead of in a loop
250 # experimental config: perf.stub
250 # experimental config: perf.stub
251 if ui.configbool(b"perf", b"stub", False):
251 if ui.configbool(b"perf", b"stub", False):
252 return functools.partial(stub_timer, fm), fm
252 return functools.partial(stub_timer, fm), fm
253
253
254 # experimental config: perf.all-timing
254 # experimental config: perf.all-timing
255 displayall = ui.configbool(b"perf", b"all-timing", False)
255 displayall = ui.configbool(b"perf", b"all-timing", False)
256 return functools.partial(_timer, fm, displayall=displayall), fm
256 return functools.partial(_timer, fm, displayall=displayall), fm
257
257
258 def stub_timer(fm, func, title=None):
258 def stub_timer(fm, func, title=None):
259 func()
259 func()
260
260
261 def _timer(fm, func, title=None, displayall=False):
261 def _timer(fm, func, title=None, displayall=False):
262 gc.collect()
262 gc.collect()
263 results = []
263 results = []
264 begin = util.timer()
264 begin = util.timer()
265 count = 0
265 count = 0
266 while True:
266 while True:
267 ostart = os.times()
267 ostart = os.times()
268 cstart = util.timer()
268 cstart = util.timer()
269 r = func()
269 r = func()
270 cstop = util.timer()
270 cstop = util.timer()
271 ostop = os.times()
271 ostop = os.times()
272 count += 1
272 count += 1
273 a, b = ostart, ostop
273 a, b = ostart, ostop
274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
275 if cstop - begin > 3 and count >= 100:
275 if cstop - begin > 3 and count >= 100:
276 break
276 break
277 if cstop - begin > 10 and count >= 3:
277 if cstop - begin > 10 and count >= 3:
278 break
278 break
279
279
280 fm.startitem()
280 fm.startitem()
281
281
282 if title:
282 if title:
283 fm.write(b'title', b'! %s\n', title)
283 fm.write(b'title', b'! %s\n', title)
284 if r:
284 if r:
285 fm.write(b'result', b'! result: %s\n', r)
285 fm.write(b'result', b'! result: %s\n', r)
286 def display(role, entry):
286 def display(role, entry):
287 prefix = b''
287 prefix = b''
288 if role != b'best':
288 if role != b'best':
289 prefix = b'%s.' % role
289 prefix = b'%s.' % role
290 fm.plain(b'!')
290 fm.plain(b'!')
291 fm.write(prefix + b'wall', b' wall %f', entry[0])
291 fm.write(prefix + b'wall', b' wall %f', entry[0])
292 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
292 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
293 fm.write(prefix + b'user', b' user %f', entry[1])
293 fm.write(prefix + b'user', b' user %f', entry[1])
294 fm.write(prefix + b'sys', b' sys %f', entry[2])
294 fm.write(prefix + b'sys', b' sys %f', entry[2])
295 fm.write(prefix + b'count', b' (%s of %d)', role, count)
295 fm.write(prefix + b'count', b' (%s of %d)', role, count)
296 fm.plain(b'\n')
296 fm.plain(b'\n')
297 results.sort()
297 results.sort()
298 min_val = results[0]
298 min_val = results[0]
299 display(b'best', min_val)
299 display(b'best', min_val)
300 if displayall:
300 if displayall:
301 max_val = results[-1]
301 max_val = results[-1]
302 display(b'max', max_val)
302 display(b'max', max_val)
303 avg = tuple([sum(x) / count for x in zip(*results)])
303 avg = tuple([sum(x) / count for x in zip(*results)])
304 display(b'avg', avg)
304 display(b'avg', avg)
305 median = results[len(results) // 2]
305 median = results[len(results) // 2]
306 display(b'median', median)
306 display(b'median', median)
307
307
308 # utilities for historical portability
308 # utilities for historical portability
309
309
310 def getint(ui, section, name, default):
310 def getint(ui, section, name, default):
311 # for "historical portability":
311 # for "historical portability":
312 # ui.configint has been available since 1.9 (or fa2b596db182)
312 # ui.configint has been available since 1.9 (or fa2b596db182)
313 v = ui.config(section, name, None)
313 v = ui.config(section, name, None)
314 if v is None:
314 if v is None:
315 return default
315 return default
316 try:
316 try:
317 return int(v)
317 return int(v)
318 except ValueError:
318 except ValueError:
319 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
319 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
320 % (section, name, v))
320 % (section, name, v))
321
321
322 def safeattrsetter(obj, name, ignoremissing=False):
322 def safeattrsetter(obj, name, ignoremissing=False):
323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
324
324
325 This function is aborted, if 'obj' doesn't have 'name' attribute
325 This function is aborted, if 'obj' doesn't have 'name' attribute
326 at runtime. This avoids overlooking removal of an attribute, which
326 at runtime. This avoids overlooking removal of an attribute, which
327 breaks assumption of performance measurement, in the future.
327 breaks assumption of performance measurement, in the future.
328
328
329 This function returns the object to (1) assign a new value, and
329 This function returns the object to (1) assign a new value, and
330 (2) restore an original value to the attribute.
330 (2) restore an original value to the attribute.
331
331
332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
333 abortion, and this function returns None. This is useful to
333 abortion, and this function returns None. This is useful to
334 examine an attribute, which isn't ensured in all Mercurial
334 examine an attribute, which isn't ensured in all Mercurial
335 versions.
335 versions.
336 """
336 """
337 if not util.safehasattr(obj, name):
337 if not util.safehasattr(obj, name):
338 if ignoremissing:
338 if ignoremissing:
339 return None
339 return None
340 raise error.Abort((b"missing attribute %s of %s might break assumption"
340 raise error.Abort((b"missing attribute %s of %s might break assumption"
341 b" of performance measurement") % (name, obj))
341 b" of performance measurement") % (name, obj))
342
342
343 origvalue = getattr(obj, name)
343 origvalue = getattr(obj, name)
344 class attrutil(object):
344 class attrutil(object):
345 def set(self, newvalue):
345 def set(self, newvalue):
346 setattr(obj, name, newvalue)
346 setattr(obj, name, newvalue)
347 def restore(self):
347 def restore(self):
348 setattr(obj, name, origvalue)
348 setattr(obj, name, origvalue)
349
349
350 return attrutil()
350 return attrutil()
351
351
352 # utilities to examine each internal API changes
352 # utilities to examine each internal API changes
353
353
354 def getbranchmapsubsettable():
354 def getbranchmapsubsettable():
355 # for "historical portability":
355 # for "historical portability":
356 # subsettable is defined in:
356 # subsettable is defined in:
357 # - branchmap since 2.9 (or 175c6fd8cacc)
357 # - branchmap since 2.9 (or 175c6fd8cacc)
358 # - repoview since 2.5 (or 59a9f18d4587)
358 # - repoview since 2.5 (or 59a9f18d4587)
359 for mod in (branchmap, repoview):
359 for mod in (branchmap, repoview):
360 subsettable = getattr(mod, 'subsettable', None)
360 subsettable = getattr(mod, 'subsettable', None)
361 if subsettable:
361 if subsettable:
362 return subsettable
362 return subsettable
363
363
364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
365 # branchmap and repoview modules exist, but subsettable attribute
365 # branchmap and repoview modules exist, but subsettable attribute
366 # doesn't)
366 # doesn't)
367 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
367 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
368 hint=b"use 2.5 or later")
368 hint=b"use 2.5 or later")
369
369
370 def getsvfs(repo):
370 def getsvfs(repo):
371 """Return appropriate object to access files under .hg/store
371 """Return appropriate object to access files under .hg/store
372 """
372 """
373 # for "historical portability":
373 # for "historical portability":
374 # repo.svfs has been available since 2.3 (or 7034365089bf)
374 # repo.svfs has been available since 2.3 (or 7034365089bf)
375 svfs = getattr(repo, 'svfs', None)
375 svfs = getattr(repo, 'svfs', None)
376 if svfs:
376 if svfs:
377 return svfs
377 return svfs
378 else:
378 else:
379 return getattr(repo, 'sopener')
379 return getattr(repo, 'sopener')
380
380
381 def getvfs(repo):
381 def getvfs(repo):
382 """Return appropriate object to access files under .hg
382 """Return appropriate object to access files under .hg
383 """
383 """
384 # for "historical portability":
384 # for "historical portability":
385 # repo.vfs has been available since 2.3 (or 7034365089bf)
385 # repo.vfs has been available since 2.3 (or 7034365089bf)
386 vfs = getattr(repo, 'vfs', None)
386 vfs = getattr(repo, 'vfs', None)
387 if vfs:
387 if vfs:
388 return vfs
388 return vfs
389 else:
389 else:
390 return getattr(repo, 'opener')
390 return getattr(repo, 'opener')
391
391
392 def repocleartagscachefunc(repo):
392 def repocleartagscachefunc(repo):
393 """Return the function to clear tags cache according to repo internal API
393 """Return the function to clear tags cache according to repo internal API
394 """
394 """
395 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
395 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
396 # in this case, setattr(repo, '_tagscache', None) or so isn't
396 # in this case, setattr(repo, '_tagscache', None) or so isn't
397 # correct way to clear tags cache, because existing code paths
397 # correct way to clear tags cache, because existing code paths
398 # expect _tagscache to be a structured object.
398 # expect _tagscache to be a structured object.
399 def clearcache():
399 def clearcache():
400 # _tagscache has been filteredpropertycache since 2.5 (or
400 # _tagscache has been filteredpropertycache since 2.5 (or
401 # 98c867ac1330), and delattr() can't work in such case
401 # 98c867ac1330), and delattr() can't work in such case
402 if b'_tagscache' in vars(repo):
402 if b'_tagscache' in vars(repo):
403 del repo.__dict__[b'_tagscache']
403 del repo.__dict__[b'_tagscache']
404 return clearcache
404 return clearcache
405
405
406 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
406 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
407 if repotags: # since 1.4 (or 5614a628d173)
407 if repotags: # since 1.4 (or 5614a628d173)
408 return lambda : repotags.set(None)
408 return lambda : repotags.set(None)
409
409
410 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
410 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
411 if repotagscache: # since 0.6 (or d7df759d0e97)
411 if repotagscache: # since 0.6 (or d7df759d0e97)
412 return lambda : repotagscache.set(None)
412 return lambda : repotagscache.set(None)
413
413
414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
415 # this point, but it isn't so problematic, because:
415 # this point, but it isn't so problematic, because:
416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
417 # in perftags() causes failure soon
417 # in perftags() causes failure soon
418 # - perf.py itself has been available since 1.1 (or eb240755386d)
418 # - perf.py itself has been available since 1.1 (or eb240755386d)
419 raise error.Abort((b"tags API of this hg command is unknown"))
419 raise error.Abort((b"tags API of this hg command is unknown"))
420
420
421 # utilities to clear cache
421 # utilities to clear cache
422
422
423 def clearfilecache(repo, attrname):
423 def clearfilecache(repo, attrname):
424 unfi = repo.unfiltered()
424 unfi = repo.unfiltered()
425 if attrname in vars(unfi):
425 if attrname in vars(unfi):
426 delattr(unfi, attrname)
426 delattr(unfi, attrname)
427 unfi._filecache.pop(attrname, None)
427 unfi._filecache.pop(attrname, None)
428
428
429 # perf commands
429 # perf commands
430
430
431 @command(b'perfwalk', formatteropts)
431 @command(b'perfwalk', formatteropts)
432 def perfwalk(ui, repo, *pats, **opts):
432 def perfwalk(ui, repo, *pats, **opts):
433 timer, fm = gettimer(ui, opts)
433 timer, fm = gettimer(ui, opts)
434 m = scmutil.match(repo[None], pats, {})
434 m = scmutil.match(repo[None], pats, {})
435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
436 ignored=False))))
436 ignored=False))))
437 fm.end()
437 fm.end()
438
438
439 @command(b'perfannotate', formatteropts)
439 @command(b'perfannotate', formatteropts)
440 def perfannotate(ui, repo, f, **opts):
440 def perfannotate(ui, repo, f, **opts):
441 timer, fm = gettimer(ui, opts)
441 timer, fm = gettimer(ui, opts)
442 fc = repo[b'.'][f]
442 fc = repo[b'.'][f]
443 timer(lambda: len(fc.annotate(True)))
443 timer(lambda: len(fc.annotate(True)))
444 fm.end()
444 fm.end()
445
445
446 @command(b'perfstatus',
446 @command(b'perfstatus',
447 [(b'u', b'unknown', False,
447 [(b'u', b'unknown', False,
448 b'ask status to look for unknown files')] + formatteropts)
448 b'ask status to look for unknown files')] + formatteropts)
449 def perfstatus(ui, repo, **opts):
449 def perfstatus(ui, repo, **opts):
450 #m = match.always(repo.root, repo.getcwd())
450 #m = match.always(repo.root, repo.getcwd())
451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
452 # False))))
452 # False))))
453 timer, fm = gettimer(ui, opts)
453 timer, fm = gettimer(ui, opts)
454 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
454 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
455 fm.end()
455 fm.end()
456
456
457 @command(b'perfaddremove', formatteropts)
457 @command(b'perfaddremove', formatteropts)
458 def perfaddremove(ui, repo, **opts):
458 def perfaddremove(ui, repo, **opts):
459 timer, fm = gettimer(ui, opts)
459 timer, fm = gettimer(ui, opts)
460 try:
460 try:
461 oldquiet = repo.ui.quiet
461 oldquiet = repo.ui.quiet
462 repo.ui.quiet = True
462 repo.ui.quiet = True
463 matcher = scmutil.match(repo[None])
463 matcher = scmutil.match(repo[None])
464 opts[b'dry_run'] = True
464 opts[b'dry_run'] = True
465 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
465 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
466 finally:
466 finally:
467 repo.ui.quiet = oldquiet
467 repo.ui.quiet = oldquiet
468 fm.end()
468 fm.end()
469
469
470 def clearcaches(cl):
470 def clearcaches(cl):
471 # behave somewhat consistently across internal API changes
471 # behave somewhat consistently across internal API changes
472 if util.safehasattr(cl, b'clearcaches'):
472 if util.safehasattr(cl, b'clearcaches'):
473 cl.clearcaches()
473 cl.clearcaches()
474 elif util.safehasattr(cl, b'_nodecache'):
474 elif util.safehasattr(cl, b'_nodecache'):
475 from mercurial.node import nullid, nullrev
475 from mercurial.node import nullid, nullrev
476 cl._nodecache = {nullid: nullrev}
476 cl._nodecache = {nullid: nullrev}
477 cl._nodepos = None
477 cl._nodepos = None
478
478
479 @command(b'perfheads', formatteropts)
479 @command(b'perfheads', formatteropts)
480 def perfheads(ui, repo, **opts):
480 def perfheads(ui, repo, **opts):
481 timer, fm = gettimer(ui, opts)
481 timer, fm = gettimer(ui, opts)
482 cl = repo.changelog
482 cl = repo.changelog
483 def d():
483 def d():
484 len(cl.headrevs())
484 len(cl.headrevs())
485 clearcaches(cl)
485 clearcaches(cl)
486 timer(d)
486 timer(d)
487 fm.end()
487 fm.end()
488
488
489 @command(b'perftags', formatteropts)
489 @command(b'perftags', formatteropts)
490 def perftags(ui, repo, **opts):
490 def perftags(ui, repo, **opts):
491 import mercurial.changelog
491 import mercurial.changelog
492 import mercurial.manifest
492 import mercurial.manifest
493 timer, fm = gettimer(ui, opts)
493 timer, fm = gettimer(ui, opts)
494 svfs = getsvfs(repo)
494 svfs = getsvfs(repo)
495 repocleartagscache = repocleartagscachefunc(repo)
495 repocleartagscache = repocleartagscachefunc(repo)
496 def t():
496 def t():
497 repo.changelog = mercurial.changelog.changelog(svfs)
497 repo.changelog = mercurial.changelog.changelog(svfs)
498 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
498 rootmanifest = mercurial.manifest.manifestrevlog(svfs)
499 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
500 rootmanifest)
499 repocleartagscache()
501 repocleartagscache()
500 return len(repo.tags())
502 return len(repo.tags())
501 timer(t)
503 timer(t)
502 fm.end()
504 fm.end()
503
505
504 @command(b'perfancestors', formatteropts)
506 @command(b'perfancestors', formatteropts)
505 def perfancestors(ui, repo, **opts):
507 def perfancestors(ui, repo, **opts):
506 timer, fm = gettimer(ui, opts)
508 timer, fm = gettimer(ui, opts)
507 heads = repo.changelog.headrevs()
509 heads = repo.changelog.headrevs()
508 def d():
510 def d():
509 for a in repo.changelog.ancestors(heads):
511 for a in repo.changelog.ancestors(heads):
510 pass
512 pass
511 timer(d)
513 timer(d)
512 fm.end()
514 fm.end()
513
515
514 @command(b'perfancestorset', formatteropts)
516 @command(b'perfancestorset', formatteropts)
515 def perfancestorset(ui, repo, revset, **opts):
517 def perfancestorset(ui, repo, revset, **opts):
516 timer, fm = gettimer(ui, opts)
518 timer, fm = gettimer(ui, opts)
517 revs = repo.revs(revset)
519 revs = repo.revs(revset)
518 heads = repo.changelog.headrevs()
520 heads = repo.changelog.headrevs()
519 def d():
521 def d():
520 s = repo.changelog.ancestors(heads)
522 s = repo.changelog.ancestors(heads)
521 for rev in revs:
523 for rev in revs:
522 rev in s
524 rev in s
523 timer(d)
525 timer(d)
524 fm.end()
526 fm.end()
525
527
526 @command(b'perfbookmarks', formatteropts)
528 @command(b'perfbookmarks', formatteropts)
527 def perfbookmarks(ui, repo, **opts):
529 def perfbookmarks(ui, repo, **opts):
528 """benchmark parsing bookmarks from disk to memory"""
530 """benchmark parsing bookmarks from disk to memory"""
529 timer, fm = gettimer(ui, opts)
531 timer, fm = gettimer(ui, opts)
530 def d():
532 def d():
531 clearfilecache(repo, b'_bookmarks')
533 clearfilecache(repo, b'_bookmarks')
532 repo._bookmarks
534 repo._bookmarks
533 timer(d)
535 timer(d)
534 fm.end()
536 fm.end()
535
537
536 @command(b'perfbundleread', formatteropts, b'BUNDLE')
538 @command(b'perfbundleread', formatteropts, b'BUNDLE')
537 def perfbundleread(ui, repo, bundlepath, **opts):
539 def perfbundleread(ui, repo, bundlepath, **opts):
538 """Benchmark reading of bundle files.
540 """Benchmark reading of bundle files.
539
541
540 This command is meant to isolate the I/O part of bundle reading as
542 This command is meant to isolate the I/O part of bundle reading as
541 much as possible.
543 much as possible.
542 """
544 """
543 from mercurial import (
545 from mercurial import (
544 bundle2,
546 bundle2,
545 exchange,
547 exchange,
546 streamclone,
548 streamclone,
547 )
549 )
548
550
549 def makebench(fn):
551 def makebench(fn):
550 def run():
552 def run():
551 with open(bundlepath, b'rb') as fh:
553 with open(bundlepath, b'rb') as fh:
552 bundle = exchange.readbundle(ui, fh, bundlepath)
554 bundle = exchange.readbundle(ui, fh, bundlepath)
553 fn(bundle)
555 fn(bundle)
554
556
555 return run
557 return run
556
558
557 def makereadnbytes(size):
559 def makereadnbytes(size):
558 def run():
560 def run():
559 with open(bundlepath, b'rb') as fh:
561 with open(bundlepath, b'rb') as fh:
560 bundle = exchange.readbundle(ui, fh, bundlepath)
562 bundle = exchange.readbundle(ui, fh, bundlepath)
561 while bundle.read(size):
563 while bundle.read(size):
562 pass
564 pass
563
565
564 return run
566 return run
565
567
566 def makestdioread(size):
568 def makestdioread(size):
567 def run():
569 def run():
568 with open(bundlepath, b'rb') as fh:
570 with open(bundlepath, b'rb') as fh:
569 while fh.read(size):
571 while fh.read(size):
570 pass
572 pass
571
573
572 return run
574 return run
573
575
574 # bundle1
576 # bundle1
575
577
576 def deltaiter(bundle):
578 def deltaiter(bundle):
577 for delta in bundle.deltaiter():
579 for delta in bundle.deltaiter():
578 pass
580 pass
579
581
580 def iterchunks(bundle):
582 def iterchunks(bundle):
581 for chunk in bundle.getchunks():
583 for chunk in bundle.getchunks():
582 pass
584 pass
583
585
584 # bundle2
586 # bundle2
585
587
586 def forwardchunks(bundle):
588 def forwardchunks(bundle):
587 for chunk in bundle._forwardchunks():
589 for chunk in bundle._forwardchunks():
588 pass
590 pass
589
591
590 def iterparts(bundle):
592 def iterparts(bundle):
591 for part in bundle.iterparts():
593 for part in bundle.iterparts():
592 pass
594 pass
593
595
594 def iterpartsseekable(bundle):
596 def iterpartsseekable(bundle):
595 for part in bundle.iterparts(seekable=True):
597 for part in bundle.iterparts(seekable=True):
596 pass
598 pass
597
599
598 def seek(bundle):
600 def seek(bundle):
599 for part in bundle.iterparts(seekable=True):
601 for part in bundle.iterparts(seekable=True):
600 part.seek(0, os.SEEK_END)
602 part.seek(0, os.SEEK_END)
601
603
602 def makepartreadnbytes(size):
604 def makepartreadnbytes(size):
603 def run():
605 def run():
604 with open(bundlepath, b'rb') as fh:
606 with open(bundlepath, b'rb') as fh:
605 bundle = exchange.readbundle(ui, fh, bundlepath)
607 bundle = exchange.readbundle(ui, fh, bundlepath)
606 for part in bundle.iterparts():
608 for part in bundle.iterparts():
607 while part.read(size):
609 while part.read(size):
608 pass
610 pass
609
611
610 return run
612 return run
611
613
612 benches = [
614 benches = [
613 (makestdioread(8192), b'read(8k)'),
615 (makestdioread(8192), b'read(8k)'),
614 (makestdioread(16384), b'read(16k)'),
616 (makestdioread(16384), b'read(16k)'),
615 (makestdioread(32768), b'read(32k)'),
617 (makestdioread(32768), b'read(32k)'),
616 (makestdioread(131072), b'read(128k)'),
618 (makestdioread(131072), b'read(128k)'),
617 ]
619 ]
618
620
619 with open(bundlepath, b'rb') as fh:
621 with open(bundlepath, b'rb') as fh:
620 bundle = exchange.readbundle(ui, fh, bundlepath)
622 bundle = exchange.readbundle(ui, fh, bundlepath)
621
623
622 if isinstance(bundle, changegroup.cg1unpacker):
624 if isinstance(bundle, changegroup.cg1unpacker):
623 benches.extend([
625 benches.extend([
624 (makebench(deltaiter), b'cg1 deltaiter()'),
626 (makebench(deltaiter), b'cg1 deltaiter()'),
625 (makebench(iterchunks), b'cg1 getchunks()'),
627 (makebench(iterchunks), b'cg1 getchunks()'),
626 (makereadnbytes(8192), b'cg1 read(8k)'),
628 (makereadnbytes(8192), b'cg1 read(8k)'),
627 (makereadnbytes(16384), b'cg1 read(16k)'),
629 (makereadnbytes(16384), b'cg1 read(16k)'),
628 (makereadnbytes(32768), b'cg1 read(32k)'),
630 (makereadnbytes(32768), b'cg1 read(32k)'),
629 (makereadnbytes(131072), b'cg1 read(128k)'),
631 (makereadnbytes(131072), b'cg1 read(128k)'),
630 ])
632 ])
631 elif isinstance(bundle, bundle2.unbundle20):
633 elif isinstance(bundle, bundle2.unbundle20):
632 benches.extend([
634 benches.extend([
633 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
635 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
634 (makebench(iterparts), b'bundle2 iterparts()'),
636 (makebench(iterparts), b'bundle2 iterparts()'),
635 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
637 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
636 (makebench(seek), b'bundle2 part seek()'),
638 (makebench(seek), b'bundle2 part seek()'),
637 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
639 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
638 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
640 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
639 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
641 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
640 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
642 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
641 ])
643 ])
642 elif isinstance(bundle, streamclone.streamcloneapplier):
644 elif isinstance(bundle, streamclone.streamcloneapplier):
643 raise error.Abort(b'stream clone bundles not supported')
645 raise error.Abort(b'stream clone bundles not supported')
644 else:
646 else:
645 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
647 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
646
648
647 for fn, title in benches:
649 for fn, title in benches:
648 timer, fm = gettimer(ui, opts)
650 timer, fm = gettimer(ui, opts)
649 timer(fn, title=title)
651 timer(fn, title=title)
650 fm.end()
652 fm.end()
651
653
652 @command(b'perfchangegroupchangelog', formatteropts +
654 @command(b'perfchangegroupchangelog', formatteropts +
653 [(b'', b'version', b'02', b'changegroup version'),
655 [(b'', b'version', b'02', b'changegroup version'),
654 (b'r', b'rev', b'', b'revisions to add to changegroup')])
656 (b'r', b'rev', b'', b'revisions to add to changegroup')])
655 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
657 def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
656 """Benchmark producing a changelog group for a changegroup.
658 """Benchmark producing a changelog group for a changegroup.
657
659
658 This measures the time spent processing the changelog during a
660 This measures the time spent processing the changelog during a
659 bundle operation. This occurs during `hg bundle` and on a server
661 bundle operation. This occurs during `hg bundle` and on a server
660 processing a `getbundle` wire protocol request (handles clones
662 processing a `getbundle` wire protocol request (handles clones
661 and pull requests).
663 and pull requests).
662
664
663 By default, all revisions are added to the changegroup.
665 By default, all revisions are added to the changegroup.
664 """
666 """
665 cl = repo.changelog
667 cl = repo.changelog
666 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
668 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
667 bundler = changegroup.getbundler(version, repo)
669 bundler = changegroup.getbundler(version, repo)
668
670
669 def d():
671 def d():
670 state, chunks = bundler._generatechangelog(cl, nodes)
672 state, chunks = bundler._generatechangelog(cl, nodes)
671 for chunk in chunks:
673 for chunk in chunks:
672 pass
674 pass
673
675
674 timer, fm = gettimer(ui, opts)
676 timer, fm = gettimer(ui, opts)
675
677
676 # Terminal printing can interfere with timing. So disable it.
678 # Terminal printing can interfere with timing. So disable it.
677 with ui.configoverride({(b'progress', b'disable'): True}):
679 with ui.configoverride({(b'progress', b'disable'): True}):
678 timer(d)
680 timer(d)
679
681
680 fm.end()
682 fm.end()
681
683
682 @command(b'perfdirs', formatteropts)
684 @command(b'perfdirs', formatteropts)
683 def perfdirs(ui, repo, **opts):
685 def perfdirs(ui, repo, **opts):
684 timer, fm = gettimer(ui, opts)
686 timer, fm = gettimer(ui, opts)
685 dirstate = repo.dirstate
687 dirstate = repo.dirstate
686 b'a' in dirstate
688 b'a' in dirstate
687 def d():
689 def d():
688 dirstate.hasdir(b'a')
690 dirstate.hasdir(b'a')
689 del dirstate._map._dirs
691 del dirstate._map._dirs
690 timer(d)
692 timer(d)
691 fm.end()
693 fm.end()
692
694
693 @command(b'perfdirstate', formatteropts)
695 @command(b'perfdirstate', formatteropts)
694 def perfdirstate(ui, repo, **opts):
696 def perfdirstate(ui, repo, **opts):
695 timer, fm = gettimer(ui, opts)
697 timer, fm = gettimer(ui, opts)
696 b"a" in repo.dirstate
698 b"a" in repo.dirstate
697 def d():
699 def d():
698 repo.dirstate.invalidate()
700 repo.dirstate.invalidate()
699 b"a" in repo.dirstate
701 b"a" in repo.dirstate
700 timer(d)
702 timer(d)
701 fm.end()
703 fm.end()
702
704
703 @command(b'perfdirstatedirs', formatteropts)
705 @command(b'perfdirstatedirs', formatteropts)
704 def perfdirstatedirs(ui, repo, **opts):
706 def perfdirstatedirs(ui, repo, **opts):
705 timer, fm = gettimer(ui, opts)
707 timer, fm = gettimer(ui, opts)
706 b"a" in repo.dirstate
708 b"a" in repo.dirstate
707 def d():
709 def d():
708 repo.dirstate.hasdir(b"a")
710 repo.dirstate.hasdir(b"a")
709 del repo.dirstate._map._dirs
711 del repo.dirstate._map._dirs
710 timer(d)
712 timer(d)
711 fm.end()
713 fm.end()
712
714
713 @command(b'perfdirstatefoldmap', formatteropts)
715 @command(b'perfdirstatefoldmap', formatteropts)
714 def perfdirstatefoldmap(ui, repo, **opts):
716 def perfdirstatefoldmap(ui, repo, **opts):
715 timer, fm = gettimer(ui, opts)
717 timer, fm = gettimer(ui, opts)
716 dirstate = repo.dirstate
718 dirstate = repo.dirstate
717 b'a' in dirstate
719 b'a' in dirstate
718 def d():
720 def d():
719 dirstate._map.filefoldmap.get(b'a')
721 dirstate._map.filefoldmap.get(b'a')
720 del dirstate._map.filefoldmap
722 del dirstate._map.filefoldmap
721 timer(d)
723 timer(d)
722 fm.end()
724 fm.end()
723
725
724 @command(b'perfdirfoldmap', formatteropts)
726 @command(b'perfdirfoldmap', formatteropts)
725 def perfdirfoldmap(ui, repo, **opts):
727 def perfdirfoldmap(ui, repo, **opts):
726 timer, fm = gettimer(ui, opts)
728 timer, fm = gettimer(ui, opts)
727 dirstate = repo.dirstate
729 dirstate = repo.dirstate
728 b'a' in dirstate
730 b'a' in dirstate
729 def d():
731 def d():
730 dirstate._map.dirfoldmap.get(b'a')
732 dirstate._map.dirfoldmap.get(b'a')
731 del dirstate._map.dirfoldmap
733 del dirstate._map.dirfoldmap
732 del dirstate._map._dirs
734 del dirstate._map._dirs
733 timer(d)
735 timer(d)
734 fm.end()
736 fm.end()
735
737
736 @command(b'perfdirstatewrite', formatteropts)
738 @command(b'perfdirstatewrite', formatteropts)
737 def perfdirstatewrite(ui, repo, **opts):
739 def perfdirstatewrite(ui, repo, **opts):
738 timer, fm = gettimer(ui, opts)
740 timer, fm = gettimer(ui, opts)
739 ds = repo.dirstate
741 ds = repo.dirstate
740 b"a" in ds
742 b"a" in ds
741 def d():
743 def d():
742 ds._dirty = True
744 ds._dirty = True
743 ds.write(repo.currenttransaction())
745 ds.write(repo.currenttransaction())
744 timer(d)
746 timer(d)
745 fm.end()
747 fm.end()
746
748
747 @command(b'perfmergecalculate',
749 @command(b'perfmergecalculate',
748 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
750 [(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts)
749 def perfmergecalculate(ui, repo, rev, **opts):
751 def perfmergecalculate(ui, repo, rev, **opts):
750 timer, fm = gettimer(ui, opts)
752 timer, fm = gettimer(ui, opts)
751 wctx = repo[None]
753 wctx = repo[None]
752 rctx = scmutil.revsingle(repo, rev, rev)
754 rctx = scmutil.revsingle(repo, rev, rev)
753 ancestor = wctx.ancestor(rctx)
755 ancestor = wctx.ancestor(rctx)
754 # we don't want working dir files to be stat'd in the benchmark, so prime
756 # we don't want working dir files to be stat'd in the benchmark, so prime
755 # that cache
757 # that cache
756 wctx.dirty()
758 wctx.dirty()
757 def d():
759 def d():
758 # acceptremote is True because we don't want prompts in the middle of
760 # acceptremote is True because we don't want prompts in the middle of
759 # our benchmark
761 # our benchmark
760 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
762 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
761 acceptremote=True, followcopies=True)
763 acceptremote=True, followcopies=True)
762 timer(d)
764 timer(d)
763 fm.end()
765 fm.end()
764
766
765 @command(b'perfpathcopies', [], b"REV REV")
767 @command(b'perfpathcopies', [], b"REV REV")
766 def perfpathcopies(ui, repo, rev1, rev2, **opts):
768 def perfpathcopies(ui, repo, rev1, rev2, **opts):
767 timer, fm = gettimer(ui, opts)
769 timer, fm = gettimer(ui, opts)
768 ctx1 = scmutil.revsingle(repo, rev1, rev1)
770 ctx1 = scmutil.revsingle(repo, rev1, rev1)
769 ctx2 = scmutil.revsingle(repo, rev2, rev2)
771 ctx2 = scmutil.revsingle(repo, rev2, rev2)
770 def d():
772 def d():
771 copies.pathcopies(ctx1, ctx2)
773 copies.pathcopies(ctx1, ctx2)
772 timer(d)
774 timer(d)
773 fm.end()
775 fm.end()
774
776
775 @command(b'perfphases',
777 @command(b'perfphases',
776 [(b'', b'full', False, b'include file reading time too'),
778 [(b'', b'full', False, b'include file reading time too'),
777 ], b"")
779 ], b"")
778 def perfphases(ui, repo, **opts):
780 def perfphases(ui, repo, **opts):
779 """benchmark phasesets computation"""
781 """benchmark phasesets computation"""
780 timer, fm = gettimer(ui, opts)
782 timer, fm = gettimer(ui, opts)
781 _phases = repo._phasecache
783 _phases = repo._phasecache
782 full = opts.get(b'full')
784 full = opts.get(b'full')
783 def d():
785 def d():
784 phases = _phases
786 phases = _phases
785 if full:
787 if full:
786 clearfilecache(repo, b'_phasecache')
788 clearfilecache(repo, b'_phasecache')
787 phases = repo._phasecache
789 phases = repo._phasecache
788 phases.invalidate()
790 phases.invalidate()
789 phases.loadphaserevs(repo)
791 phases.loadphaserevs(repo)
790 timer(d)
792 timer(d)
791 fm.end()
793 fm.end()
792
794
793 @command(b'perfphasesremote',
795 @command(b'perfphasesremote',
794 [], b"[DEST]")
796 [], b"[DEST]")
795 def perfphasesremote(ui, repo, dest=None, **opts):
797 def perfphasesremote(ui, repo, dest=None, **opts):
796 """benchmark time needed to analyse phases of the remote server"""
798 """benchmark time needed to analyse phases of the remote server"""
797 from mercurial.node import (
799 from mercurial.node import (
798 bin,
800 bin,
799 )
801 )
800 from mercurial import (
802 from mercurial import (
801 exchange,
803 exchange,
802 hg,
804 hg,
803 phases,
805 phases,
804 )
806 )
805 timer, fm = gettimer(ui, opts)
807 timer, fm = gettimer(ui, opts)
806
808
807 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
809 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
808 if not path:
810 if not path:
809 raise error.Abort((b'default repository not configured!'),
811 raise error.Abort((b'default repository not configured!'),
810 hint=(b"see 'hg help config.paths'"))
812 hint=(b"see 'hg help config.paths'"))
811 dest = path.pushloc or path.loc
813 dest = path.pushloc or path.loc
812 branches = (path.branch, opts.get(b'branch') or [])
814 branches = (path.branch, opts.get(b'branch') or [])
813 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
815 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
814 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
816 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
815 other = hg.peer(repo, opts, dest)
817 other = hg.peer(repo, opts, dest)
816
818
817 # easier to perform discovery through the operation
819 # easier to perform discovery through the operation
818 op = exchange.pushoperation(repo, other)
820 op = exchange.pushoperation(repo, other)
819 exchange._pushdiscoverychangeset(op)
821 exchange._pushdiscoverychangeset(op)
820
822
821 remotesubset = op.fallbackheads
823 remotesubset = op.fallbackheads
822
824
823 with other.commandexecutor() as e:
825 with other.commandexecutor() as e:
824 remotephases = e.callcommand(b'listkeys',
826 remotephases = e.callcommand(b'listkeys',
825 {b'namespace': b'phases'}).result()
827 {b'namespace': b'phases'}).result()
826 del other
828 del other
827 publishing = remotephases.get(b'publishing', False)
829 publishing = remotephases.get(b'publishing', False)
828 if publishing:
830 if publishing:
829 ui.status((b'publishing: yes\n'))
831 ui.status((b'publishing: yes\n'))
830 else:
832 else:
831 ui.status((b'publishing: no\n'))
833 ui.status((b'publishing: no\n'))
832
834
833 nodemap = repo.changelog.nodemap
835 nodemap = repo.changelog.nodemap
834 nonpublishroots = 0
836 nonpublishroots = 0
835 for nhex, phase in remotephases.iteritems():
837 for nhex, phase in remotephases.iteritems():
836 if nhex == b'publishing': # ignore data related to publish option
838 if nhex == b'publishing': # ignore data related to publish option
837 continue
839 continue
838 node = bin(nhex)
840 node = bin(nhex)
839 if node in nodemap and int(phase):
841 if node in nodemap and int(phase):
840 nonpublishroots += 1
842 nonpublishroots += 1
841 ui.status((b'number of roots: %d\n') % len(remotephases))
843 ui.status((b'number of roots: %d\n') % len(remotephases))
842 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
844 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
843 def d():
845 def d():
844 phases.remotephasessummary(repo,
846 phases.remotephasessummary(repo,
845 remotesubset,
847 remotesubset,
846 remotephases)
848 remotephases)
847 timer(d)
849 timer(d)
848 fm.end()
850 fm.end()
849
851
850 @command(b'perfmanifest',[
852 @command(b'perfmanifest',[
851 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
853 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
852 (b'', b'clear-disk', False, b'clear on-disk caches too'),
854 (b'', b'clear-disk', False, b'clear on-disk caches too'),
853 ], b'REV|NODE')
855 ], b'REV|NODE')
854 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
856 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
855 """benchmark the time to read a manifest from disk and return a usable
857 """benchmark the time to read a manifest from disk and return a usable
856 dict-like object
858 dict-like object
857
859
858 Manifest caches are cleared before retrieval."""
860 Manifest caches are cleared before retrieval."""
859 timer, fm = gettimer(ui, opts)
861 timer, fm = gettimer(ui, opts)
860 if not manifest_rev:
862 if not manifest_rev:
861 ctx = scmutil.revsingle(repo, rev, rev)
863 ctx = scmutil.revsingle(repo, rev, rev)
862 t = ctx.manifestnode()
864 t = ctx.manifestnode()
863 else:
865 else:
864 from mercurial.node import bin
866 from mercurial.node import bin
865
867
866 if len(rev) == 40:
868 if len(rev) == 40:
867 t = bin(rev)
869 t = bin(rev)
868 else:
870 else:
869 try:
871 try:
870 rev = int(rev)
872 rev = int(rev)
871
873
872 if util.safehasattr(repo.manifestlog, b'getstorage'):
874 if util.safehasattr(repo.manifestlog, b'getstorage'):
873 t = repo.manifestlog.getstorage(b'').node(rev)
875 t = repo.manifestlog.getstorage(b'').node(rev)
874 else:
876 else:
875 t = repo.manifestlog._revlog.lookup(rev)
877 t = repo.manifestlog._revlog.lookup(rev)
876 except ValueError:
878 except ValueError:
877 raise error.Abort(b'manifest revision must be integer or full '
879 raise error.Abort(b'manifest revision must be integer or full '
878 b'node')
880 b'node')
879 def d():
881 def d():
880 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
882 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
881 repo.manifestlog[t].read()
883 repo.manifestlog[t].read()
882 timer(d)
884 timer(d)
883 fm.end()
885 fm.end()
884
886
885 @command(b'perfchangeset', formatteropts)
887 @command(b'perfchangeset', formatteropts)
886 def perfchangeset(ui, repo, rev, **opts):
888 def perfchangeset(ui, repo, rev, **opts):
887 timer, fm = gettimer(ui, opts)
889 timer, fm = gettimer(ui, opts)
888 n = scmutil.revsingle(repo, rev).node()
890 n = scmutil.revsingle(repo, rev).node()
889 def d():
891 def d():
890 repo.changelog.read(n)
892 repo.changelog.read(n)
891 #repo.changelog._cache = None
893 #repo.changelog._cache = None
892 timer(d)
894 timer(d)
893 fm.end()
895 fm.end()
894
896
895 @command(b'perfindex', formatteropts)
897 @command(b'perfindex', formatteropts)
896 def perfindex(ui, repo, **opts):
898 def perfindex(ui, repo, **opts):
897 import mercurial.revlog
899 import mercurial.revlog
898 timer, fm = gettimer(ui, opts)
900 timer, fm = gettimer(ui, opts)
899 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
901 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
900 n = repo[b"tip"].node()
902 n = repo[b"tip"].node()
901 svfs = getsvfs(repo)
903 svfs = getsvfs(repo)
902 def d():
904 def d():
903 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
905 cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
904 cl.rev(n)
906 cl.rev(n)
905 timer(d)
907 timer(d)
906 fm.end()
908 fm.end()
907
909
908 @command(b'perfstartup', formatteropts)
910 @command(b'perfstartup', formatteropts)
909 def perfstartup(ui, repo, **opts):
911 def perfstartup(ui, repo, **opts):
910 timer, fm = gettimer(ui, opts)
912 timer, fm = gettimer(ui, opts)
911 cmd = sys.argv[0]
913 cmd = sys.argv[0]
912 def d():
914 def d():
913 if os.name != b'nt':
915 if os.name != b'nt':
914 os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
916 os.system(b"HGRCPATH= %s version -q > /dev/null" % cmd)
915 else:
917 else:
916 os.environ[b'HGRCPATH'] = b' '
918 os.environ[b'HGRCPATH'] = b' '
917 os.system(b"%s version -q > NUL" % cmd)
919 os.system(b"%s version -q > NUL" % cmd)
918 timer(d)
920 timer(d)
919 fm.end()
921 fm.end()
920
922
921 @command(b'perfparents', formatteropts)
923 @command(b'perfparents', formatteropts)
922 def perfparents(ui, repo, **opts):
924 def perfparents(ui, repo, **opts):
923 timer, fm = gettimer(ui, opts)
925 timer, fm = gettimer(ui, opts)
924 # control the number of commits perfparents iterates over
926 # control the number of commits perfparents iterates over
925 # experimental config: perf.parentscount
927 # experimental config: perf.parentscount
926 count = getint(ui, b"perf", b"parentscount", 1000)
928 count = getint(ui, b"perf", b"parentscount", 1000)
927 if len(repo.changelog) < count:
929 if len(repo.changelog) < count:
928 raise error.Abort(b"repo needs %d commits for this test" % count)
930 raise error.Abort(b"repo needs %d commits for this test" % count)
929 repo = repo.unfiltered()
931 repo = repo.unfiltered()
930 nl = [repo.changelog.node(i) for i in xrange(count)]
932 nl = [repo.changelog.node(i) for i in xrange(count)]
931 def d():
933 def d():
932 for n in nl:
934 for n in nl:
933 repo.changelog.parents(n)
935 repo.changelog.parents(n)
934 timer(d)
936 timer(d)
935 fm.end()
937 fm.end()
936
938
937 @command(b'perfctxfiles', formatteropts)
939 @command(b'perfctxfiles', formatteropts)
938 def perfctxfiles(ui, repo, x, **opts):
940 def perfctxfiles(ui, repo, x, **opts):
939 x = int(x)
941 x = int(x)
940 timer, fm = gettimer(ui, opts)
942 timer, fm = gettimer(ui, opts)
941 def d():
943 def d():
942 len(repo[x].files())
944 len(repo[x].files())
943 timer(d)
945 timer(d)
944 fm.end()
946 fm.end()
945
947
946 @command(b'perfrawfiles', formatteropts)
948 @command(b'perfrawfiles', formatteropts)
947 def perfrawfiles(ui, repo, x, **opts):
949 def perfrawfiles(ui, repo, x, **opts):
948 x = int(x)
950 x = int(x)
949 timer, fm = gettimer(ui, opts)
951 timer, fm = gettimer(ui, opts)
950 cl = repo.changelog
952 cl = repo.changelog
951 def d():
953 def d():
952 len(cl.read(x)[3])
954 len(cl.read(x)[3])
953 timer(d)
955 timer(d)
954 fm.end()
956 fm.end()
955
957
956 @command(b'perflookup', formatteropts)
958 @command(b'perflookup', formatteropts)
957 def perflookup(ui, repo, rev, **opts):
959 def perflookup(ui, repo, rev, **opts):
958 timer, fm = gettimer(ui, opts)
960 timer, fm = gettimer(ui, opts)
959 timer(lambda: len(repo.lookup(rev)))
961 timer(lambda: len(repo.lookup(rev)))
960 fm.end()
962 fm.end()
961
963
962 @command(b'perflinelogedits',
964 @command(b'perflinelogedits',
963 [(b'n', b'edits', 10000, b'number of edits'),
965 [(b'n', b'edits', 10000, b'number of edits'),
964 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
966 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
965 ], norepo=True)
967 ], norepo=True)
966 def perflinelogedits(ui, **opts):
968 def perflinelogedits(ui, **opts):
967 from mercurial import linelog
969 from mercurial import linelog
968
970
969 edits = opts[b'edits']
971 edits = opts[b'edits']
970 maxhunklines = opts[b'max_hunk_lines']
972 maxhunklines = opts[b'max_hunk_lines']
971
973
972 maxb1 = 100000
974 maxb1 = 100000
973 random.seed(0)
975 random.seed(0)
974 randint = random.randint
976 randint = random.randint
975 currentlines = 0
977 currentlines = 0
976 arglist = []
978 arglist = []
977 for rev in xrange(edits):
979 for rev in xrange(edits):
978 a1 = randint(0, currentlines)
980 a1 = randint(0, currentlines)
979 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
981 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
980 b1 = randint(0, maxb1)
982 b1 = randint(0, maxb1)
981 b2 = randint(b1, b1 + maxhunklines)
983 b2 = randint(b1, b1 + maxhunklines)
982 currentlines += (b2 - b1) - (a2 - a1)
984 currentlines += (b2 - b1) - (a2 - a1)
983 arglist.append((rev, a1, a2, b1, b2))
985 arglist.append((rev, a1, a2, b1, b2))
984
986
985 def d():
987 def d():
986 ll = linelog.linelog()
988 ll = linelog.linelog()
987 for args in arglist:
989 for args in arglist:
988 ll.replacelines(*args)
990 ll.replacelines(*args)
989
991
990 timer, fm = gettimer(ui, opts)
992 timer, fm = gettimer(ui, opts)
991 timer(d)
993 timer(d)
992 fm.end()
994 fm.end()
993
995
994 @command(b'perfrevrange', formatteropts)
996 @command(b'perfrevrange', formatteropts)
995 def perfrevrange(ui, repo, *specs, **opts):
997 def perfrevrange(ui, repo, *specs, **opts):
996 timer, fm = gettimer(ui, opts)
998 timer, fm = gettimer(ui, opts)
997 revrange = scmutil.revrange
999 revrange = scmutil.revrange
998 timer(lambda: len(revrange(repo, specs)))
1000 timer(lambda: len(revrange(repo, specs)))
999 fm.end()
1001 fm.end()
1000
1002
1001 @command(b'perfnodelookup', formatteropts)
1003 @command(b'perfnodelookup', formatteropts)
1002 def perfnodelookup(ui, repo, rev, **opts):
1004 def perfnodelookup(ui, repo, rev, **opts):
1003 timer, fm = gettimer(ui, opts)
1005 timer, fm = gettimer(ui, opts)
1004 import mercurial.revlog
1006 import mercurial.revlog
1005 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1007 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1006 n = scmutil.revsingle(repo, rev).node()
1008 n = scmutil.revsingle(repo, rev).node()
1007 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1009 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1008 def d():
1010 def d():
1009 cl.rev(n)
1011 cl.rev(n)
1010 clearcaches(cl)
1012 clearcaches(cl)
1011 timer(d)
1013 timer(d)
1012 fm.end()
1014 fm.end()
1013
1015
1014 @command(b'perflog',
1016 @command(b'perflog',
1015 [(b'', b'rename', False, b'ask log to follow renames')
1017 [(b'', b'rename', False, b'ask log to follow renames')
1016 ] + formatteropts)
1018 ] + formatteropts)
1017 def perflog(ui, repo, rev=None, **opts):
1019 def perflog(ui, repo, rev=None, **opts):
1018 if rev is None:
1020 if rev is None:
1019 rev=[]
1021 rev=[]
1020 timer, fm = gettimer(ui, opts)
1022 timer, fm = gettimer(ui, opts)
1021 ui.pushbuffer()
1023 ui.pushbuffer()
1022 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1024 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1023 copies=opts.get(b'rename')))
1025 copies=opts.get(b'rename')))
1024 ui.popbuffer()
1026 ui.popbuffer()
1025 fm.end()
1027 fm.end()
1026
1028
1027 @command(b'perfmoonwalk', formatteropts)
1029 @command(b'perfmoonwalk', formatteropts)
1028 def perfmoonwalk(ui, repo, **opts):
1030 def perfmoonwalk(ui, repo, **opts):
1029 """benchmark walking the changelog backwards
1031 """benchmark walking the changelog backwards
1030
1032
1031 This also loads the changelog data for each revision in the changelog.
1033 This also loads the changelog data for each revision in the changelog.
1032 """
1034 """
1033 timer, fm = gettimer(ui, opts)
1035 timer, fm = gettimer(ui, opts)
1034 def moonwalk():
1036 def moonwalk():
1035 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1037 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1036 ctx = repo[i]
1038 ctx = repo[i]
1037 ctx.branch() # read changelog data (in addition to the index)
1039 ctx.branch() # read changelog data (in addition to the index)
1038 timer(moonwalk)
1040 timer(moonwalk)
1039 fm.end()
1041 fm.end()
1040
1042
1041 @command(b'perftemplating',
1043 @command(b'perftemplating',
1042 [(b'r', b'rev', [], b'revisions to run the template on'),
1044 [(b'r', b'rev', [], b'revisions to run the template on'),
1043 ] + formatteropts)
1045 ] + formatteropts)
1044 def perftemplating(ui, repo, testedtemplate=None, **opts):
1046 def perftemplating(ui, repo, testedtemplate=None, **opts):
1045 """test the rendering time of a given template"""
1047 """test the rendering time of a given template"""
1046 if makelogtemplater is None:
1048 if makelogtemplater is None:
1047 raise error.Abort((b"perftemplating not available with this Mercurial"),
1049 raise error.Abort((b"perftemplating not available with this Mercurial"),
1048 hint=b"use 4.3 or later")
1050 hint=b"use 4.3 or later")
1049
1051
1050 nullui = ui.copy()
1052 nullui = ui.copy()
1051 nullui.fout = open(os.devnull, b'wb')
1053 nullui.fout = open(os.devnull, b'wb')
1052 nullui.disablepager()
1054 nullui.disablepager()
1053 revs = opts.get(b'rev')
1055 revs = opts.get(b'rev')
1054 if not revs:
1056 if not revs:
1055 revs = [b'all()']
1057 revs = [b'all()']
1056 revs = list(scmutil.revrange(repo, revs))
1058 revs = list(scmutil.revrange(repo, revs))
1057
1059
1058 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1060 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1059 b' {author|person}: {desc|firstline}\n')
1061 b' {author|person}: {desc|firstline}\n')
1060 if testedtemplate is None:
1062 if testedtemplate is None:
1061 testedtemplate = defaulttemplate
1063 testedtemplate = defaulttemplate
1062 displayer = makelogtemplater(nullui, repo, testedtemplate)
1064 displayer = makelogtemplater(nullui, repo, testedtemplate)
1063 def format():
1065 def format():
1064 for r in revs:
1066 for r in revs:
1065 ctx = repo[r]
1067 ctx = repo[r]
1066 displayer.show(ctx)
1068 displayer.show(ctx)
1067 displayer.flush(ctx)
1069 displayer.flush(ctx)
1068
1070
1069 timer, fm = gettimer(ui, opts)
1071 timer, fm = gettimer(ui, opts)
1070 timer(format)
1072 timer(format)
1071 fm.end()
1073 fm.end()
1072
1074
1073 @command(b'perfcca', formatteropts)
1075 @command(b'perfcca', formatteropts)
1074 def perfcca(ui, repo, **opts):
1076 def perfcca(ui, repo, **opts):
1075 timer, fm = gettimer(ui, opts)
1077 timer, fm = gettimer(ui, opts)
1076 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1078 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1077 fm.end()
1079 fm.end()
1078
1080
1079 @command(b'perffncacheload', formatteropts)
1081 @command(b'perffncacheload', formatteropts)
1080 def perffncacheload(ui, repo, **opts):
1082 def perffncacheload(ui, repo, **opts):
1081 timer, fm = gettimer(ui, opts)
1083 timer, fm = gettimer(ui, opts)
1082 s = repo.store
1084 s = repo.store
1083 def d():
1085 def d():
1084 s.fncache._load()
1086 s.fncache._load()
1085 timer(d)
1087 timer(d)
1086 fm.end()
1088 fm.end()
1087
1089
1088 @command(b'perffncachewrite', formatteropts)
1090 @command(b'perffncachewrite', formatteropts)
1089 def perffncachewrite(ui, repo, **opts):
1091 def perffncachewrite(ui, repo, **opts):
1090 timer, fm = gettimer(ui, opts)
1092 timer, fm = gettimer(ui, opts)
1091 s = repo.store
1093 s = repo.store
1092 lock = repo.lock()
1094 lock = repo.lock()
1093 s.fncache._load()
1095 s.fncache._load()
1094 tr = repo.transaction(b'perffncachewrite')
1096 tr = repo.transaction(b'perffncachewrite')
1095 tr.addbackup(b'fncache')
1097 tr.addbackup(b'fncache')
1096 def d():
1098 def d():
1097 s.fncache._dirty = True
1099 s.fncache._dirty = True
1098 s.fncache.write(tr)
1100 s.fncache.write(tr)
1099 timer(d)
1101 timer(d)
1100 tr.close()
1102 tr.close()
1101 lock.release()
1103 lock.release()
1102 fm.end()
1104 fm.end()
1103
1105
1104 @command(b'perffncacheencode', formatteropts)
1106 @command(b'perffncacheencode', formatteropts)
1105 def perffncacheencode(ui, repo, **opts):
1107 def perffncacheencode(ui, repo, **opts):
1106 timer, fm = gettimer(ui, opts)
1108 timer, fm = gettimer(ui, opts)
1107 s = repo.store
1109 s = repo.store
1108 s.fncache._load()
1110 s.fncache._load()
1109 def d():
1111 def d():
1110 for p in s.fncache.entries:
1112 for p in s.fncache.entries:
1111 s.encode(p)
1113 s.encode(p)
1112 timer(d)
1114 timer(d)
1113 fm.end()
1115 fm.end()
1114
1116
1115 def _bdiffworker(q, blocks, xdiff, ready, done):
1117 def _bdiffworker(q, blocks, xdiff, ready, done):
1116 while not done.is_set():
1118 while not done.is_set():
1117 pair = q.get()
1119 pair = q.get()
1118 while pair is not None:
1120 while pair is not None:
1119 if xdiff:
1121 if xdiff:
1120 mdiff.bdiff.xdiffblocks(*pair)
1122 mdiff.bdiff.xdiffblocks(*pair)
1121 elif blocks:
1123 elif blocks:
1122 mdiff.bdiff.blocks(*pair)
1124 mdiff.bdiff.blocks(*pair)
1123 else:
1125 else:
1124 mdiff.textdiff(*pair)
1126 mdiff.textdiff(*pair)
1125 q.task_done()
1127 q.task_done()
1126 pair = q.get()
1128 pair = q.get()
1127 q.task_done() # for the None one
1129 q.task_done() # for the None one
1128 with ready:
1130 with ready:
1129 ready.wait()
1131 ready.wait()
1130
1132
1131 def _manifestrevision(repo, mnode):
1133 def _manifestrevision(repo, mnode):
1132 ml = repo.manifestlog
1134 ml = repo.manifestlog
1133
1135
1134 if util.safehasattr(ml, b'getstorage'):
1136 if util.safehasattr(ml, b'getstorage'):
1135 store = ml.getstorage(b'')
1137 store = ml.getstorage(b'')
1136 else:
1138 else:
1137 store = ml._revlog
1139 store = ml._revlog
1138
1140
1139 return store.revision(mnode)
1141 return store.revision(mnode)
1140
1142
1141 @command(b'perfbdiff', revlogopts + formatteropts + [
1143 @command(b'perfbdiff', revlogopts + formatteropts + [
1142 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1144 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1143 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1145 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1144 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1146 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1145 (b'', b'blocks', False, b'test computing diffs into blocks'),
1147 (b'', b'blocks', False, b'test computing diffs into blocks'),
1146 (b'', b'xdiff', False, b'use xdiff algorithm'),
1148 (b'', b'xdiff', False, b'use xdiff algorithm'),
1147 ],
1149 ],
1148
1150
1149 b'-c|-m|FILE REV')
1151 b'-c|-m|FILE REV')
1150 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1152 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1151 """benchmark a bdiff between revisions
1153 """benchmark a bdiff between revisions
1152
1154
1153 By default, benchmark a bdiff between its delta parent and itself.
1155 By default, benchmark a bdiff between its delta parent and itself.
1154
1156
1155 With ``--count``, benchmark bdiffs between delta parents and self for N
1157 With ``--count``, benchmark bdiffs between delta parents and self for N
1156 revisions starting at the specified revision.
1158 revisions starting at the specified revision.
1157
1159
1158 With ``--alldata``, assume the requested revision is a changeset and
1160 With ``--alldata``, assume the requested revision is a changeset and
1159 measure bdiffs for all changes related to that changeset (manifest
1161 measure bdiffs for all changes related to that changeset (manifest
1160 and filelogs).
1162 and filelogs).
1161 """
1163 """
1162 opts = pycompat.byteskwargs(opts)
1164 opts = pycompat.byteskwargs(opts)
1163
1165
1164 if opts[b'xdiff'] and not opts[b'blocks']:
1166 if opts[b'xdiff'] and not opts[b'blocks']:
1165 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1167 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1166
1168
1167 if opts[b'alldata']:
1169 if opts[b'alldata']:
1168 opts[b'changelog'] = True
1170 opts[b'changelog'] = True
1169
1171
1170 if opts.get(b'changelog') or opts.get(b'manifest'):
1172 if opts.get(b'changelog') or opts.get(b'manifest'):
1171 file_, rev = None, file_
1173 file_, rev = None, file_
1172 elif rev is None:
1174 elif rev is None:
1173 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1175 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1174
1176
1175 blocks = opts[b'blocks']
1177 blocks = opts[b'blocks']
1176 xdiff = opts[b'xdiff']
1178 xdiff = opts[b'xdiff']
1177 textpairs = []
1179 textpairs = []
1178
1180
1179 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1181 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1180
1182
1181 startrev = r.rev(r.lookup(rev))
1183 startrev = r.rev(r.lookup(rev))
1182 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1184 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1183 if opts[b'alldata']:
1185 if opts[b'alldata']:
1184 # Load revisions associated with changeset.
1186 # Load revisions associated with changeset.
1185 ctx = repo[rev]
1187 ctx = repo[rev]
1186 mtext = _manifestrevision(repo, ctx.manifestnode())
1188 mtext = _manifestrevision(repo, ctx.manifestnode())
1187 for pctx in ctx.parents():
1189 for pctx in ctx.parents():
1188 pman = _manifestrevision(repo, pctx.manifestnode())
1190 pman = _manifestrevision(repo, pctx.manifestnode())
1189 textpairs.append((pman, mtext))
1191 textpairs.append((pman, mtext))
1190
1192
1191 # Load filelog revisions by iterating manifest delta.
1193 # Load filelog revisions by iterating manifest delta.
1192 man = ctx.manifest()
1194 man = ctx.manifest()
1193 pman = ctx.p1().manifest()
1195 pman = ctx.p1().manifest()
1194 for filename, change in pman.diff(man).items():
1196 for filename, change in pman.diff(man).items():
1195 fctx = repo.file(filename)
1197 fctx = repo.file(filename)
1196 f1 = fctx.revision(change[0][0] or -1)
1198 f1 = fctx.revision(change[0][0] or -1)
1197 f2 = fctx.revision(change[1][0] or -1)
1199 f2 = fctx.revision(change[1][0] or -1)
1198 textpairs.append((f1, f2))
1200 textpairs.append((f1, f2))
1199 else:
1201 else:
1200 dp = r.deltaparent(rev)
1202 dp = r.deltaparent(rev)
1201 textpairs.append((r.revision(dp), r.revision(rev)))
1203 textpairs.append((r.revision(dp), r.revision(rev)))
1202
1204
1203 withthreads = threads > 0
1205 withthreads = threads > 0
1204 if not withthreads:
1206 if not withthreads:
1205 def d():
1207 def d():
1206 for pair in textpairs:
1208 for pair in textpairs:
1207 if xdiff:
1209 if xdiff:
1208 mdiff.bdiff.xdiffblocks(*pair)
1210 mdiff.bdiff.xdiffblocks(*pair)
1209 elif blocks:
1211 elif blocks:
1210 mdiff.bdiff.blocks(*pair)
1212 mdiff.bdiff.blocks(*pair)
1211 else:
1213 else:
1212 mdiff.textdiff(*pair)
1214 mdiff.textdiff(*pair)
1213 else:
1215 else:
1214 q = queue()
1216 q = queue()
1215 for i in xrange(threads):
1217 for i in xrange(threads):
1216 q.put(None)
1218 q.put(None)
1217 ready = threading.Condition()
1219 ready = threading.Condition()
1218 done = threading.Event()
1220 done = threading.Event()
1219 for i in xrange(threads):
1221 for i in xrange(threads):
1220 threading.Thread(target=_bdiffworker,
1222 threading.Thread(target=_bdiffworker,
1221 args=(q, blocks, xdiff, ready, done)).start()
1223 args=(q, blocks, xdiff, ready, done)).start()
1222 q.join()
1224 q.join()
1223 def d():
1225 def d():
1224 for pair in textpairs:
1226 for pair in textpairs:
1225 q.put(pair)
1227 q.put(pair)
1226 for i in xrange(threads):
1228 for i in xrange(threads):
1227 q.put(None)
1229 q.put(None)
1228 with ready:
1230 with ready:
1229 ready.notify_all()
1231 ready.notify_all()
1230 q.join()
1232 q.join()
1231 timer, fm = gettimer(ui, opts)
1233 timer, fm = gettimer(ui, opts)
1232 timer(d)
1234 timer(d)
1233 fm.end()
1235 fm.end()
1234
1236
1235 if withthreads:
1237 if withthreads:
1236 done.set()
1238 done.set()
1237 for i in xrange(threads):
1239 for i in xrange(threads):
1238 q.put(None)
1240 q.put(None)
1239 with ready:
1241 with ready:
1240 ready.notify_all()
1242 ready.notify_all()
1241
1243
1242 @command(b'perfunidiff', revlogopts + formatteropts + [
1244 @command(b'perfunidiff', revlogopts + formatteropts + [
1243 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1245 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1244 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1246 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1245 ], b'-c|-m|FILE REV')
1247 ], b'-c|-m|FILE REV')
1246 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1248 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1247 """benchmark a unified diff between revisions
1249 """benchmark a unified diff between revisions
1248
1250
1249 This doesn't include any copy tracing - it's just a unified diff
1251 This doesn't include any copy tracing - it's just a unified diff
1250 of the texts.
1252 of the texts.
1251
1253
1252 By default, benchmark a diff between its delta parent and itself.
1254 By default, benchmark a diff between its delta parent and itself.
1253
1255
1254 With ``--count``, benchmark diffs between delta parents and self for N
1256 With ``--count``, benchmark diffs between delta parents and self for N
1255 revisions starting at the specified revision.
1257 revisions starting at the specified revision.
1256
1258
1257 With ``--alldata``, assume the requested revision is a changeset and
1259 With ``--alldata``, assume the requested revision is a changeset and
1258 measure diffs for all changes related to that changeset (manifest
1260 measure diffs for all changes related to that changeset (manifest
1259 and filelogs).
1261 and filelogs).
1260 """
1262 """
1261 if opts[b'alldata']:
1263 if opts[b'alldata']:
1262 opts[b'changelog'] = True
1264 opts[b'changelog'] = True
1263
1265
1264 if opts.get(b'changelog') or opts.get(b'manifest'):
1266 if opts.get(b'changelog') or opts.get(b'manifest'):
1265 file_, rev = None, file_
1267 file_, rev = None, file_
1266 elif rev is None:
1268 elif rev is None:
1267 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1269 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1268
1270
1269 textpairs = []
1271 textpairs = []
1270
1272
1271 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1273 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1272
1274
1273 startrev = r.rev(r.lookup(rev))
1275 startrev = r.rev(r.lookup(rev))
1274 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1276 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1275 if opts[b'alldata']:
1277 if opts[b'alldata']:
1276 # Load revisions associated with changeset.
1278 # Load revisions associated with changeset.
1277 ctx = repo[rev]
1279 ctx = repo[rev]
1278 mtext = _manifestrevision(repo, ctx.manifestnode())
1280 mtext = _manifestrevision(repo, ctx.manifestnode())
1279 for pctx in ctx.parents():
1281 for pctx in ctx.parents():
1280 pman = _manifestrevision(repo, pctx.manifestnode())
1282 pman = _manifestrevision(repo, pctx.manifestnode())
1281 textpairs.append((pman, mtext))
1283 textpairs.append((pman, mtext))
1282
1284
1283 # Load filelog revisions by iterating manifest delta.
1285 # Load filelog revisions by iterating manifest delta.
1284 man = ctx.manifest()
1286 man = ctx.manifest()
1285 pman = ctx.p1().manifest()
1287 pman = ctx.p1().manifest()
1286 for filename, change in pman.diff(man).items():
1288 for filename, change in pman.diff(man).items():
1287 fctx = repo.file(filename)
1289 fctx = repo.file(filename)
1288 f1 = fctx.revision(change[0][0] or -1)
1290 f1 = fctx.revision(change[0][0] or -1)
1289 f2 = fctx.revision(change[1][0] or -1)
1291 f2 = fctx.revision(change[1][0] or -1)
1290 textpairs.append((f1, f2))
1292 textpairs.append((f1, f2))
1291 else:
1293 else:
1292 dp = r.deltaparent(rev)
1294 dp = r.deltaparent(rev)
1293 textpairs.append((r.revision(dp), r.revision(rev)))
1295 textpairs.append((r.revision(dp), r.revision(rev)))
1294
1296
1295 def d():
1297 def d():
1296 for left, right in textpairs:
1298 for left, right in textpairs:
1297 # The date strings don't matter, so we pass empty strings.
1299 # The date strings don't matter, so we pass empty strings.
1298 headerlines, hunks = mdiff.unidiff(
1300 headerlines, hunks = mdiff.unidiff(
1299 left, b'', right, b'', b'left', b'right', binary=False)
1301 left, b'', right, b'', b'left', b'right', binary=False)
1300 # consume iterators in roughly the way patch.py does
1302 # consume iterators in roughly the way patch.py does
1301 b'\n'.join(headerlines)
1303 b'\n'.join(headerlines)
1302 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1304 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1303 timer, fm = gettimer(ui, opts)
1305 timer, fm = gettimer(ui, opts)
1304 timer(d)
1306 timer(d)
1305 fm.end()
1307 fm.end()
1306
1308
1307 @command(b'perfdiffwd', formatteropts)
1309 @command(b'perfdiffwd', formatteropts)
1308 def perfdiffwd(ui, repo, **opts):
1310 def perfdiffwd(ui, repo, **opts):
1309 """Profile diff of working directory changes"""
1311 """Profile diff of working directory changes"""
1310 timer, fm = gettimer(ui, opts)
1312 timer, fm = gettimer(ui, opts)
1311 options = {
1313 options = {
1312 b'w': b'ignore_all_space',
1314 b'w': b'ignore_all_space',
1313 b'b': b'ignore_space_change',
1315 b'b': b'ignore_space_change',
1314 b'B': b'ignore_blank_lines',
1316 b'B': b'ignore_blank_lines',
1315 }
1317 }
1316
1318
1317 for diffopt in (b'', b'w', b'b', b'B', b'wB'):
1319 for diffopt in (b'', b'w', b'b', b'B', b'wB'):
1318 opts = dict((options[c], b'1') for c in diffopt)
1320 opts = dict((options[c], b'1') for c in diffopt)
1319 def d():
1321 def d():
1320 ui.pushbuffer()
1322 ui.pushbuffer()
1321 commands.diff(ui, repo, **opts)
1323 commands.diff(ui, repo, **opts)
1322 ui.popbuffer()
1324 ui.popbuffer()
1323 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1325 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1324 timer(d, title)
1326 timer(d, title)
1325 fm.end()
1327 fm.end()
1326
1328
1327 @command(b'perfrevlogindex', revlogopts + formatteropts,
1329 @command(b'perfrevlogindex', revlogopts + formatteropts,
1328 b'-c|-m|FILE')
1330 b'-c|-m|FILE')
1329 def perfrevlogindex(ui, repo, file_=None, **opts):
1331 def perfrevlogindex(ui, repo, file_=None, **opts):
1330 """Benchmark operations against a revlog index.
1332 """Benchmark operations against a revlog index.
1331
1333
1332 This tests constructing a revlog instance, reading index data,
1334 This tests constructing a revlog instance, reading index data,
1333 parsing index data, and performing various operations related to
1335 parsing index data, and performing various operations related to
1334 index data.
1336 index data.
1335 """
1337 """
1336
1338
1337 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1339 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1338
1340
1339 opener = getattr(rl, 'opener') # trick linter
1341 opener = getattr(rl, 'opener') # trick linter
1340 indexfile = rl.indexfile
1342 indexfile = rl.indexfile
1341 data = opener.read(indexfile)
1343 data = opener.read(indexfile)
1342
1344
1343 header = struct.unpack(b'>I', data[0:4])[0]
1345 header = struct.unpack(b'>I', data[0:4])[0]
1344 version = header & 0xFFFF
1346 version = header & 0xFFFF
1345 if version == 1:
1347 if version == 1:
1346 revlogio = revlog.revlogio()
1348 revlogio = revlog.revlogio()
1347 inline = header & (1 << 16)
1349 inline = header & (1 << 16)
1348 else:
1350 else:
1349 raise error.Abort((b'unsupported revlog version: %d') % version)
1351 raise error.Abort((b'unsupported revlog version: %d') % version)
1350
1352
1351 rllen = len(rl)
1353 rllen = len(rl)
1352
1354
1353 node0 = rl.node(0)
1355 node0 = rl.node(0)
1354 node25 = rl.node(rllen // 4)
1356 node25 = rl.node(rllen // 4)
1355 node50 = rl.node(rllen // 2)
1357 node50 = rl.node(rllen // 2)
1356 node75 = rl.node(rllen // 4 * 3)
1358 node75 = rl.node(rllen // 4 * 3)
1357 node100 = rl.node(rllen - 1)
1359 node100 = rl.node(rllen - 1)
1358
1360
1359 allrevs = range(rllen)
1361 allrevs = range(rllen)
1360 allrevsrev = list(reversed(allrevs))
1362 allrevsrev = list(reversed(allrevs))
1361 allnodes = [rl.node(rev) for rev in range(rllen)]
1363 allnodes = [rl.node(rev) for rev in range(rllen)]
1362 allnodesrev = list(reversed(allnodes))
1364 allnodesrev = list(reversed(allnodes))
1363
1365
1364 def constructor():
1366 def constructor():
1365 revlog.revlog(opener, indexfile)
1367 revlog.revlog(opener, indexfile)
1366
1368
1367 def read():
1369 def read():
1368 with opener(indexfile) as fh:
1370 with opener(indexfile) as fh:
1369 fh.read()
1371 fh.read()
1370
1372
1371 def parseindex():
1373 def parseindex():
1372 revlogio.parseindex(data, inline)
1374 revlogio.parseindex(data, inline)
1373
1375
1374 def getentry(revornode):
1376 def getentry(revornode):
1375 index = revlogio.parseindex(data, inline)[0]
1377 index = revlogio.parseindex(data, inline)[0]
1376 index[revornode]
1378 index[revornode]
1377
1379
1378 def getentries(revs, count=1):
1380 def getentries(revs, count=1):
1379 index = revlogio.parseindex(data, inline)[0]
1381 index = revlogio.parseindex(data, inline)[0]
1380
1382
1381 for i in range(count):
1383 for i in range(count):
1382 for rev in revs:
1384 for rev in revs:
1383 index[rev]
1385 index[rev]
1384
1386
1385 def resolvenode(node):
1387 def resolvenode(node):
1386 nodemap = revlogio.parseindex(data, inline)[1]
1388 nodemap = revlogio.parseindex(data, inline)[1]
1387 # This only works for the C code.
1389 # This only works for the C code.
1388 if nodemap is None:
1390 if nodemap is None:
1389 return
1391 return
1390
1392
1391 try:
1393 try:
1392 nodemap[node]
1394 nodemap[node]
1393 except error.RevlogError:
1395 except error.RevlogError:
1394 pass
1396 pass
1395
1397
1396 def resolvenodes(nodes, count=1):
1398 def resolvenodes(nodes, count=1):
1397 nodemap = revlogio.parseindex(data, inline)[1]
1399 nodemap = revlogio.parseindex(data, inline)[1]
1398 if nodemap is None:
1400 if nodemap is None:
1399 return
1401 return
1400
1402
1401 for i in range(count):
1403 for i in range(count):
1402 for node in nodes:
1404 for node in nodes:
1403 try:
1405 try:
1404 nodemap[node]
1406 nodemap[node]
1405 except error.RevlogError:
1407 except error.RevlogError:
1406 pass
1408 pass
1407
1409
1408 benches = [
1410 benches = [
1409 (constructor, b'revlog constructor'),
1411 (constructor, b'revlog constructor'),
1410 (read, b'read'),
1412 (read, b'read'),
1411 (parseindex, b'create index object'),
1413 (parseindex, b'create index object'),
1412 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1414 (lambda: getentry(0), b'retrieve index entry for rev 0'),
1413 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1415 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
1414 (lambda: resolvenode(node0), b'look up node at rev 0'),
1416 (lambda: resolvenode(node0), b'look up node at rev 0'),
1415 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1417 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
1416 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1418 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
1417 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1419 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
1418 (lambda: resolvenode(node100), b'look up node at tip'),
1420 (lambda: resolvenode(node100), b'look up node at tip'),
1419 # 2x variation is to measure caching impact.
1421 # 2x variation is to measure caching impact.
1420 (lambda: resolvenodes(allnodes),
1422 (lambda: resolvenodes(allnodes),
1421 b'look up all nodes (forward)'),
1423 b'look up all nodes (forward)'),
1422 (lambda: resolvenodes(allnodes, 2),
1424 (lambda: resolvenodes(allnodes, 2),
1423 b'look up all nodes 2x (forward)'),
1425 b'look up all nodes 2x (forward)'),
1424 (lambda: resolvenodes(allnodesrev),
1426 (lambda: resolvenodes(allnodesrev),
1425 b'look up all nodes (reverse)'),
1427 b'look up all nodes (reverse)'),
1426 (lambda: resolvenodes(allnodesrev, 2),
1428 (lambda: resolvenodes(allnodesrev, 2),
1427 b'look up all nodes 2x (reverse)'),
1429 b'look up all nodes 2x (reverse)'),
1428 (lambda: getentries(allrevs),
1430 (lambda: getentries(allrevs),
1429 b'retrieve all index entries (forward)'),
1431 b'retrieve all index entries (forward)'),
1430 (lambda: getentries(allrevs, 2),
1432 (lambda: getentries(allrevs, 2),
1431 b'retrieve all index entries 2x (forward)'),
1433 b'retrieve all index entries 2x (forward)'),
1432 (lambda: getentries(allrevsrev),
1434 (lambda: getentries(allrevsrev),
1433 b'retrieve all index entries (reverse)'),
1435 b'retrieve all index entries (reverse)'),
1434 (lambda: getentries(allrevsrev, 2),
1436 (lambda: getentries(allrevsrev, 2),
1435 b'retrieve all index entries 2x (reverse)'),
1437 b'retrieve all index entries 2x (reverse)'),
1436 ]
1438 ]
1437
1439
1438 for fn, title in benches:
1440 for fn, title in benches:
1439 timer, fm = gettimer(ui, opts)
1441 timer, fm = gettimer(ui, opts)
1440 timer(fn, title=title)
1442 timer(fn, title=title)
1441 fm.end()
1443 fm.end()
1442
1444
1443 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1445 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
1444 [(b'd', b'dist', 100, b'distance between the revisions'),
1446 [(b'd', b'dist', 100, b'distance between the revisions'),
1445 (b's', b'startrev', 0, b'revision to start reading at'),
1447 (b's', b'startrev', 0, b'revision to start reading at'),
1446 (b'', b'reverse', False, b'read in reverse')],
1448 (b'', b'reverse', False, b'read in reverse')],
1447 b'-c|-m|FILE')
1449 b'-c|-m|FILE')
1448 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1450 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1449 **opts):
1451 **opts):
1450 """Benchmark reading a series of revisions from a revlog.
1452 """Benchmark reading a series of revisions from a revlog.
1451
1453
1452 By default, we read every ``-d/--dist`` revision from 0 to tip of
1454 By default, we read every ``-d/--dist`` revision from 0 to tip of
1453 the specified revlog.
1455 the specified revlog.
1454
1456
1455 The start revision can be defined via ``-s/--startrev``.
1457 The start revision can be defined via ``-s/--startrev``.
1456 """
1458 """
1457 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1459 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
1458 rllen = getlen(ui)(rl)
1460 rllen = getlen(ui)(rl)
1459
1461
1460 def d():
1462 def d():
1461 rl.clearcaches()
1463 rl.clearcaches()
1462
1464
1463 beginrev = startrev
1465 beginrev = startrev
1464 endrev = rllen
1466 endrev = rllen
1465 dist = opts[b'dist']
1467 dist = opts[b'dist']
1466
1468
1467 if reverse:
1469 if reverse:
1468 beginrev, endrev = endrev, beginrev
1470 beginrev, endrev = endrev, beginrev
1469 dist = -1 * dist
1471 dist = -1 * dist
1470
1472
1471 for x in xrange(beginrev, endrev, dist):
1473 for x in xrange(beginrev, endrev, dist):
1472 # Old revisions don't support passing int.
1474 # Old revisions don't support passing int.
1473 n = rl.node(x)
1475 n = rl.node(x)
1474 rl.revision(n)
1476 rl.revision(n)
1475
1477
1476 timer, fm = gettimer(ui, opts)
1478 timer, fm = gettimer(ui, opts)
1477 timer(d)
1479 timer(d)
1478 fm.end()
1480 fm.end()
1479
1481
1480 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1482 @command(b'perfrevlogchunks', revlogopts + formatteropts +
1481 [(b'e', b'engines', b'', b'compression engines to use'),
1483 [(b'e', b'engines', b'', b'compression engines to use'),
1482 (b's', b'startrev', 0, b'revision to start at')],
1484 (b's', b'startrev', 0, b'revision to start at')],
1483 b'-c|-m|FILE')
1485 b'-c|-m|FILE')
1484 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1486 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1485 """Benchmark operations on revlog chunks.
1487 """Benchmark operations on revlog chunks.
1486
1488
1487 Logically, each revlog is a collection of fulltext revisions. However,
1489 Logically, each revlog is a collection of fulltext revisions. However,
1488 stored within each revlog are "chunks" of possibly compressed data. This
1490 stored within each revlog are "chunks" of possibly compressed data. This
1489 data needs to be read and decompressed or compressed and written.
1491 data needs to be read and decompressed or compressed and written.
1490
1492
1491 This command measures the time it takes to read+decompress and recompress
1493 This command measures the time it takes to read+decompress and recompress
1492 chunks in a revlog. It effectively isolates I/O and compression performance.
1494 chunks in a revlog. It effectively isolates I/O and compression performance.
1493 For measurements of higher-level operations like resolving revisions,
1495 For measurements of higher-level operations like resolving revisions,
1494 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1496 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1495 """
1497 """
1496 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1498 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
1497
1499
1498 # _chunkraw was renamed to _getsegmentforrevs.
1500 # _chunkraw was renamed to _getsegmentforrevs.
1499 try:
1501 try:
1500 segmentforrevs = rl._getsegmentforrevs
1502 segmentforrevs = rl._getsegmentforrevs
1501 except AttributeError:
1503 except AttributeError:
1502 segmentforrevs = rl._chunkraw
1504 segmentforrevs = rl._chunkraw
1503
1505
1504 # Verify engines argument.
1506 # Verify engines argument.
1505 if engines:
1507 if engines:
1506 engines = set(e.strip() for e in engines.split(b','))
1508 engines = set(e.strip() for e in engines.split(b','))
1507 for engine in engines:
1509 for engine in engines:
1508 try:
1510 try:
1509 util.compressionengines[engine]
1511 util.compressionengines[engine]
1510 except KeyError:
1512 except KeyError:
1511 raise error.Abort(b'unknown compression engine: %s' % engine)
1513 raise error.Abort(b'unknown compression engine: %s' % engine)
1512 else:
1514 else:
1513 engines = []
1515 engines = []
1514 for e in util.compengines:
1516 for e in util.compengines:
1515 engine = util.compengines[e]
1517 engine = util.compengines[e]
1516 try:
1518 try:
1517 if engine.available():
1519 if engine.available():
1518 engine.revlogcompressor().compress(b'dummy')
1520 engine.revlogcompressor().compress(b'dummy')
1519 engines.append(e)
1521 engines.append(e)
1520 except NotImplementedError:
1522 except NotImplementedError:
1521 pass
1523 pass
1522
1524
1523 revs = list(rl.revs(startrev, len(rl) - 1))
1525 revs = list(rl.revs(startrev, len(rl) - 1))
1524
1526
1525 def rlfh(rl):
1527 def rlfh(rl):
1526 if rl._inline:
1528 if rl._inline:
1527 return getsvfs(repo)(rl.indexfile)
1529 return getsvfs(repo)(rl.indexfile)
1528 else:
1530 else:
1529 return getsvfs(repo)(rl.datafile)
1531 return getsvfs(repo)(rl.datafile)
1530
1532
1531 def doread():
1533 def doread():
1532 rl.clearcaches()
1534 rl.clearcaches()
1533 for rev in revs:
1535 for rev in revs:
1534 segmentforrevs(rev, rev)
1536 segmentforrevs(rev, rev)
1535
1537
1536 def doreadcachedfh():
1538 def doreadcachedfh():
1537 rl.clearcaches()
1539 rl.clearcaches()
1538 fh = rlfh(rl)
1540 fh = rlfh(rl)
1539 for rev in revs:
1541 for rev in revs:
1540 segmentforrevs(rev, rev, df=fh)
1542 segmentforrevs(rev, rev, df=fh)
1541
1543
1542 def doreadbatch():
1544 def doreadbatch():
1543 rl.clearcaches()
1545 rl.clearcaches()
1544 segmentforrevs(revs[0], revs[-1])
1546 segmentforrevs(revs[0], revs[-1])
1545
1547
1546 def doreadbatchcachedfh():
1548 def doreadbatchcachedfh():
1547 rl.clearcaches()
1549 rl.clearcaches()
1548 fh = rlfh(rl)
1550 fh = rlfh(rl)
1549 segmentforrevs(revs[0], revs[-1], df=fh)
1551 segmentforrevs(revs[0], revs[-1], df=fh)
1550
1552
1551 def dochunk():
1553 def dochunk():
1552 rl.clearcaches()
1554 rl.clearcaches()
1553 fh = rlfh(rl)
1555 fh = rlfh(rl)
1554 for rev in revs:
1556 for rev in revs:
1555 rl._chunk(rev, df=fh)
1557 rl._chunk(rev, df=fh)
1556
1558
1557 chunks = [None]
1559 chunks = [None]
1558
1560
1559 def dochunkbatch():
1561 def dochunkbatch():
1560 rl.clearcaches()
1562 rl.clearcaches()
1561 fh = rlfh(rl)
1563 fh = rlfh(rl)
1562 # Save chunks as a side-effect.
1564 # Save chunks as a side-effect.
1563 chunks[0] = rl._chunks(revs, df=fh)
1565 chunks[0] = rl._chunks(revs, df=fh)
1564
1566
1565 def docompress(compressor):
1567 def docompress(compressor):
1566 rl.clearcaches()
1568 rl.clearcaches()
1567
1569
1568 try:
1570 try:
1569 # Swap in the requested compression engine.
1571 # Swap in the requested compression engine.
1570 oldcompressor = rl._compressor
1572 oldcompressor = rl._compressor
1571 rl._compressor = compressor
1573 rl._compressor = compressor
1572 for chunk in chunks[0]:
1574 for chunk in chunks[0]:
1573 rl.compress(chunk)
1575 rl.compress(chunk)
1574 finally:
1576 finally:
1575 rl._compressor = oldcompressor
1577 rl._compressor = oldcompressor
1576
1578
1577 benches = [
1579 benches = [
1578 (lambda: doread(), b'read'),
1580 (lambda: doread(), b'read'),
1579 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1581 (lambda: doreadcachedfh(), b'read w/ reused fd'),
1580 (lambda: doreadbatch(), b'read batch'),
1582 (lambda: doreadbatch(), b'read batch'),
1581 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1583 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
1582 (lambda: dochunk(), b'chunk'),
1584 (lambda: dochunk(), b'chunk'),
1583 (lambda: dochunkbatch(), b'chunk batch'),
1585 (lambda: dochunkbatch(), b'chunk batch'),
1584 ]
1586 ]
1585
1587
1586 for engine in sorted(engines):
1588 for engine in sorted(engines):
1587 compressor = util.compengines[engine].revlogcompressor()
1589 compressor = util.compengines[engine].revlogcompressor()
1588 benches.append((functools.partial(docompress, compressor),
1590 benches.append((functools.partial(docompress, compressor),
1589 b'compress w/ %s' % engine))
1591 b'compress w/ %s' % engine))
1590
1592
1591 for fn, title in benches:
1593 for fn, title in benches:
1592 timer, fm = gettimer(ui, opts)
1594 timer, fm = gettimer(ui, opts)
1593 timer(fn, title=title)
1595 timer(fn, title=title)
1594 fm.end()
1596 fm.end()
1595
1597
1596 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1598 @command(b'perfrevlogrevision', revlogopts + formatteropts +
1597 [(b'', b'cache', False, b'use caches instead of clearing')],
1599 [(b'', b'cache', False, b'use caches instead of clearing')],
1598 b'-c|-m|FILE REV')
1600 b'-c|-m|FILE REV')
1599 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1601 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1600 """Benchmark obtaining a revlog revision.
1602 """Benchmark obtaining a revlog revision.
1601
1603
1602 Obtaining a revlog revision consists of roughly the following steps:
1604 Obtaining a revlog revision consists of roughly the following steps:
1603
1605
1604 1. Compute the delta chain
1606 1. Compute the delta chain
1605 2. Obtain the raw chunks for that delta chain
1607 2. Obtain the raw chunks for that delta chain
1606 3. Decompress each raw chunk
1608 3. Decompress each raw chunk
1607 4. Apply binary patches to obtain fulltext
1609 4. Apply binary patches to obtain fulltext
1608 5. Verify hash of fulltext
1610 5. Verify hash of fulltext
1609
1611
1610 This command measures the time spent in each of these phases.
1612 This command measures the time spent in each of these phases.
1611 """
1613 """
1612 if opts.get(b'changelog') or opts.get(b'manifest'):
1614 if opts.get(b'changelog') or opts.get(b'manifest'):
1613 file_, rev = None, file_
1615 file_, rev = None, file_
1614 elif rev is None:
1616 elif rev is None:
1615 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1617 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
1616
1618
1617 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1619 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
1618
1620
1619 # _chunkraw was renamed to _getsegmentforrevs.
1621 # _chunkraw was renamed to _getsegmentforrevs.
1620 try:
1622 try:
1621 segmentforrevs = r._getsegmentforrevs
1623 segmentforrevs = r._getsegmentforrevs
1622 except AttributeError:
1624 except AttributeError:
1623 segmentforrevs = r._chunkraw
1625 segmentforrevs = r._chunkraw
1624
1626
1625 node = r.lookup(rev)
1627 node = r.lookup(rev)
1626 rev = r.rev(node)
1628 rev = r.rev(node)
1627
1629
1628 def getrawchunks(data, chain):
1630 def getrawchunks(data, chain):
1629 start = r.start
1631 start = r.start
1630 length = r.length
1632 length = r.length
1631 inline = r._inline
1633 inline = r._inline
1632 iosize = r._io.size
1634 iosize = r._io.size
1633 buffer = util.buffer
1635 buffer = util.buffer
1634 offset = start(chain[0])
1636 offset = start(chain[0])
1635
1637
1636 chunks = []
1638 chunks = []
1637 ladd = chunks.append
1639 ladd = chunks.append
1638
1640
1639 for rev in chain:
1641 for rev in chain:
1640 chunkstart = start(rev)
1642 chunkstart = start(rev)
1641 if inline:
1643 if inline:
1642 chunkstart += (rev + 1) * iosize
1644 chunkstart += (rev + 1) * iosize
1643 chunklength = length(rev)
1645 chunklength = length(rev)
1644 ladd(buffer(data, chunkstart - offset, chunklength))
1646 ladd(buffer(data, chunkstart - offset, chunklength))
1645
1647
1646 return chunks
1648 return chunks
1647
1649
1648 def dodeltachain(rev):
1650 def dodeltachain(rev):
1649 if not cache:
1651 if not cache:
1650 r.clearcaches()
1652 r.clearcaches()
1651 r._deltachain(rev)
1653 r._deltachain(rev)
1652
1654
1653 def doread(chain):
1655 def doread(chain):
1654 if not cache:
1656 if not cache:
1655 r.clearcaches()
1657 r.clearcaches()
1656 segmentforrevs(chain[0], chain[-1])
1658 segmentforrevs(chain[0], chain[-1])
1657
1659
1658 def dorawchunks(data, chain):
1660 def dorawchunks(data, chain):
1659 if not cache:
1661 if not cache:
1660 r.clearcaches()
1662 r.clearcaches()
1661 getrawchunks(data, chain)
1663 getrawchunks(data, chain)
1662
1664
1663 def dodecompress(chunks):
1665 def dodecompress(chunks):
1664 decomp = r.decompress
1666 decomp = r.decompress
1665 for chunk in chunks:
1667 for chunk in chunks:
1666 decomp(chunk)
1668 decomp(chunk)
1667
1669
1668 def dopatch(text, bins):
1670 def dopatch(text, bins):
1669 if not cache:
1671 if not cache:
1670 r.clearcaches()
1672 r.clearcaches()
1671 mdiff.patches(text, bins)
1673 mdiff.patches(text, bins)
1672
1674
1673 def dohash(text):
1675 def dohash(text):
1674 if not cache:
1676 if not cache:
1675 r.clearcaches()
1677 r.clearcaches()
1676 r.checkhash(text, node, rev=rev)
1678 r.checkhash(text, node, rev=rev)
1677
1679
1678 def dorevision():
1680 def dorevision():
1679 if not cache:
1681 if not cache:
1680 r.clearcaches()
1682 r.clearcaches()
1681 r.revision(node)
1683 r.revision(node)
1682
1684
1683 chain = r._deltachain(rev)[0]
1685 chain = r._deltachain(rev)[0]
1684 data = segmentforrevs(chain[0], chain[-1])[1]
1686 data = segmentforrevs(chain[0], chain[-1])[1]
1685 rawchunks = getrawchunks(data, chain)
1687 rawchunks = getrawchunks(data, chain)
1686 bins = r._chunks(chain)
1688 bins = r._chunks(chain)
1687 text = str(bins[0])
1689 text = str(bins[0])
1688 bins = bins[1:]
1690 bins = bins[1:]
1689 text = mdiff.patches(text, bins)
1691 text = mdiff.patches(text, bins)
1690
1692
1691 benches = [
1693 benches = [
1692 (lambda: dorevision(), b'full'),
1694 (lambda: dorevision(), b'full'),
1693 (lambda: dodeltachain(rev), b'deltachain'),
1695 (lambda: dodeltachain(rev), b'deltachain'),
1694 (lambda: doread(chain), b'read'),
1696 (lambda: doread(chain), b'read'),
1695 (lambda: dorawchunks(data, chain), b'rawchunks'),
1697 (lambda: dorawchunks(data, chain), b'rawchunks'),
1696 (lambda: dodecompress(rawchunks), b'decompress'),
1698 (lambda: dodecompress(rawchunks), b'decompress'),
1697 (lambda: dopatch(text, bins), b'patch'),
1699 (lambda: dopatch(text, bins), b'patch'),
1698 (lambda: dohash(text), b'hash'),
1700 (lambda: dohash(text), b'hash'),
1699 ]
1701 ]
1700
1702
1701 for fn, title in benches:
1703 for fn, title in benches:
1702 timer, fm = gettimer(ui, opts)
1704 timer, fm = gettimer(ui, opts)
1703 timer(fn, title=title)
1705 timer(fn, title=title)
1704 fm.end()
1706 fm.end()
1705
1707
1706 @command(b'perfrevset',
1708 @command(b'perfrevset',
1707 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
1709 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
1708 (b'', b'contexts', False, b'obtain changectx for each revision')]
1710 (b'', b'contexts', False, b'obtain changectx for each revision')]
1709 + formatteropts, b"REVSET")
1711 + formatteropts, b"REVSET")
1710 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1712 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1711 """benchmark the execution time of a revset
1713 """benchmark the execution time of a revset
1712
1714
1713 Use the --clean option if need to evaluate the impact of build volatile
1715 Use the --clean option if need to evaluate the impact of build volatile
1714 revisions set cache on the revset execution. Volatile cache hold filtered
1716 revisions set cache on the revset execution. Volatile cache hold filtered
1715 and obsolete related cache."""
1717 and obsolete related cache."""
1716 timer, fm = gettimer(ui, opts)
1718 timer, fm = gettimer(ui, opts)
1717 def d():
1719 def d():
1718 if clear:
1720 if clear:
1719 repo.invalidatevolatilesets()
1721 repo.invalidatevolatilesets()
1720 if contexts:
1722 if contexts:
1721 for ctx in repo.set(expr): pass
1723 for ctx in repo.set(expr): pass
1722 else:
1724 else:
1723 for r in repo.revs(expr): pass
1725 for r in repo.revs(expr): pass
1724 timer(d)
1726 timer(d)
1725 fm.end()
1727 fm.end()
1726
1728
1727 @command(b'perfvolatilesets',
1729 @command(b'perfvolatilesets',
1728 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
1730 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
1729 ] + formatteropts)
1731 ] + formatteropts)
1730 def perfvolatilesets(ui, repo, *names, **opts):
1732 def perfvolatilesets(ui, repo, *names, **opts):
1731 """benchmark the computation of various volatile set
1733 """benchmark the computation of various volatile set
1732
1734
1733 Volatile set computes element related to filtering and obsolescence."""
1735 Volatile set computes element related to filtering and obsolescence."""
1734 timer, fm = gettimer(ui, opts)
1736 timer, fm = gettimer(ui, opts)
1735 repo = repo.unfiltered()
1737 repo = repo.unfiltered()
1736
1738
1737 def getobs(name):
1739 def getobs(name):
1738 def d():
1740 def d():
1739 repo.invalidatevolatilesets()
1741 repo.invalidatevolatilesets()
1740 if opts[b'clear_obsstore']:
1742 if opts[b'clear_obsstore']:
1741 clearfilecache(repo, b'obsstore')
1743 clearfilecache(repo, b'obsstore')
1742 obsolete.getrevs(repo, name)
1744 obsolete.getrevs(repo, name)
1743 return d
1745 return d
1744
1746
1745 allobs = sorted(obsolete.cachefuncs)
1747 allobs = sorted(obsolete.cachefuncs)
1746 if names:
1748 if names:
1747 allobs = [n for n in allobs if n in names]
1749 allobs = [n for n in allobs if n in names]
1748
1750
1749 for name in allobs:
1751 for name in allobs:
1750 timer(getobs(name), title=name)
1752 timer(getobs(name), title=name)
1751
1753
1752 def getfiltered(name):
1754 def getfiltered(name):
1753 def d():
1755 def d():
1754 repo.invalidatevolatilesets()
1756 repo.invalidatevolatilesets()
1755 if opts[b'clear_obsstore']:
1757 if opts[b'clear_obsstore']:
1756 clearfilecache(repo, b'obsstore')
1758 clearfilecache(repo, b'obsstore')
1757 repoview.filterrevs(repo, name)
1759 repoview.filterrevs(repo, name)
1758 return d
1760 return d
1759
1761
1760 allfilter = sorted(repoview.filtertable)
1762 allfilter = sorted(repoview.filtertable)
1761 if names:
1763 if names:
1762 allfilter = [n for n in allfilter if n in names]
1764 allfilter = [n for n in allfilter if n in names]
1763
1765
1764 for name in allfilter:
1766 for name in allfilter:
1765 timer(getfiltered(name), title=name)
1767 timer(getfiltered(name), title=name)
1766 fm.end()
1768 fm.end()
1767
1769
1768 @command(b'perfbranchmap',
1770 @command(b'perfbranchmap',
1769 [(b'f', b'full', False,
1771 [(b'f', b'full', False,
1770 b'Includes build time of subset'),
1772 b'Includes build time of subset'),
1771 (b'', b'clear-revbranch', False,
1773 (b'', b'clear-revbranch', False,
1772 b'purge the revbranch cache between computation'),
1774 b'purge the revbranch cache between computation'),
1773 ] + formatteropts)
1775 ] + formatteropts)
1774 def perfbranchmap(ui, repo, *filternames, **opts):
1776 def perfbranchmap(ui, repo, *filternames, **opts):
1775 """benchmark the update of a branchmap
1777 """benchmark the update of a branchmap
1776
1778
1777 This benchmarks the full repo.branchmap() call with read and write disabled
1779 This benchmarks the full repo.branchmap() call with read and write disabled
1778 """
1780 """
1779 full = opts.get(b"full", False)
1781 full = opts.get(b"full", False)
1780 clear_revbranch = opts.get(b"clear_revbranch", False)
1782 clear_revbranch = opts.get(b"clear_revbranch", False)
1781 timer, fm = gettimer(ui, opts)
1783 timer, fm = gettimer(ui, opts)
1782 def getbranchmap(filtername):
1784 def getbranchmap(filtername):
1783 """generate a benchmark function for the filtername"""
1785 """generate a benchmark function for the filtername"""
1784 if filtername is None:
1786 if filtername is None:
1785 view = repo
1787 view = repo
1786 else:
1788 else:
1787 view = repo.filtered(filtername)
1789 view = repo.filtered(filtername)
1788 def d():
1790 def d():
1789 if clear_revbranch:
1791 if clear_revbranch:
1790 repo.revbranchcache()._clear()
1792 repo.revbranchcache()._clear()
1791 if full:
1793 if full:
1792 view._branchcaches.clear()
1794 view._branchcaches.clear()
1793 else:
1795 else:
1794 view._branchcaches.pop(filtername, None)
1796 view._branchcaches.pop(filtername, None)
1795 view.branchmap()
1797 view.branchmap()
1796 return d
1798 return d
1797 # add filter in smaller subset to bigger subset
1799 # add filter in smaller subset to bigger subset
1798 possiblefilters = set(repoview.filtertable)
1800 possiblefilters = set(repoview.filtertable)
1799 if filternames:
1801 if filternames:
1800 possiblefilters &= set(filternames)
1802 possiblefilters &= set(filternames)
1801 subsettable = getbranchmapsubsettable()
1803 subsettable = getbranchmapsubsettable()
1802 allfilters = []
1804 allfilters = []
1803 while possiblefilters:
1805 while possiblefilters:
1804 for name in possiblefilters:
1806 for name in possiblefilters:
1805 subset = subsettable.get(name)
1807 subset = subsettable.get(name)
1806 if subset not in possiblefilters:
1808 if subset not in possiblefilters:
1807 break
1809 break
1808 else:
1810 else:
1809 assert False, b'subset cycle %s!' % possiblefilters
1811 assert False, b'subset cycle %s!' % possiblefilters
1810 allfilters.append(name)
1812 allfilters.append(name)
1811 possiblefilters.remove(name)
1813 possiblefilters.remove(name)
1812
1814
1813 # warm the cache
1815 # warm the cache
1814 if not full:
1816 if not full:
1815 for name in allfilters:
1817 for name in allfilters:
1816 repo.filtered(name).branchmap()
1818 repo.filtered(name).branchmap()
1817 if not filternames or b'unfiltered' in filternames:
1819 if not filternames or b'unfiltered' in filternames:
1818 # add unfiltered
1820 # add unfiltered
1819 allfilters.append(None)
1821 allfilters.append(None)
1820
1822
1821 branchcacheread = safeattrsetter(branchmap, b'read')
1823 branchcacheread = safeattrsetter(branchmap, b'read')
1822 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
1824 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
1823 branchcacheread.set(lambda repo: None)
1825 branchcacheread.set(lambda repo: None)
1824 branchcachewrite.set(lambda bc, repo: None)
1826 branchcachewrite.set(lambda bc, repo: None)
1825 try:
1827 try:
1826 for name in allfilters:
1828 for name in allfilters:
1827 printname = name
1829 printname = name
1828 if name is None:
1830 if name is None:
1829 printname = b'unfiltered'
1831 printname = b'unfiltered'
1830 timer(getbranchmap(name), title=str(printname))
1832 timer(getbranchmap(name), title=str(printname))
1831 finally:
1833 finally:
1832 branchcacheread.restore()
1834 branchcacheread.restore()
1833 branchcachewrite.restore()
1835 branchcachewrite.restore()
1834 fm.end()
1836 fm.end()
1835
1837
1836 @command(b'perfbranchmapload', [
1838 @command(b'perfbranchmapload', [
1837 (b'f', b'filter', b'', b'Specify repoview filter'),
1839 (b'f', b'filter', b'', b'Specify repoview filter'),
1838 (b'', b'list', False, b'List brachmap filter caches'),
1840 (b'', b'list', False, b'List brachmap filter caches'),
1839 ] + formatteropts)
1841 ] + formatteropts)
1840 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
1842 def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
1841 """benchmark reading the branchmap"""
1843 """benchmark reading the branchmap"""
1842 if list:
1844 if list:
1843 for name, kind, st in repo.cachevfs.readdir(stat=True):
1845 for name, kind, st in repo.cachevfs.readdir(stat=True):
1844 if name.startswith(b'branch2'):
1846 if name.startswith(b'branch2'):
1845 filtername = name.partition(b'-')[2] or b'unfiltered'
1847 filtername = name.partition(b'-')[2] or b'unfiltered'
1846 ui.status(b'%s - %s\n'
1848 ui.status(b'%s - %s\n'
1847 % (filtername, util.bytecount(st.st_size)))
1849 % (filtername, util.bytecount(st.st_size)))
1848 return
1850 return
1849 if filter:
1851 if filter:
1850 repo = repoview.repoview(repo, filter)
1852 repo = repoview.repoview(repo, filter)
1851 else:
1853 else:
1852 repo = repo.unfiltered()
1854 repo = repo.unfiltered()
1853 # try once without timer, the filter may not be cached
1855 # try once without timer, the filter may not be cached
1854 if branchmap.read(repo) is None:
1856 if branchmap.read(repo) is None:
1855 raise error.Abort(b'No brachmap cached for %s repo'
1857 raise error.Abort(b'No brachmap cached for %s repo'
1856 % (filter or b'unfiltered'))
1858 % (filter or b'unfiltered'))
1857 timer, fm = gettimer(ui, opts)
1859 timer, fm = gettimer(ui, opts)
1858 timer(lambda: branchmap.read(repo) and None)
1860 timer(lambda: branchmap.read(repo) and None)
1859 fm.end()
1861 fm.end()
1860
1862
1861 @command(b'perfloadmarkers')
1863 @command(b'perfloadmarkers')
1862 def perfloadmarkers(ui, repo):
1864 def perfloadmarkers(ui, repo):
1863 """benchmark the time to parse the on-disk markers for a repo
1865 """benchmark the time to parse the on-disk markers for a repo
1864
1866
1865 Result is the number of markers in the repo."""
1867 Result is the number of markers in the repo."""
1866 timer, fm = gettimer(ui)
1868 timer, fm = gettimer(ui)
1867 svfs = getsvfs(repo)
1869 svfs = getsvfs(repo)
1868 timer(lambda: len(obsolete.obsstore(svfs)))
1870 timer(lambda: len(obsolete.obsstore(svfs)))
1869 fm.end()
1871 fm.end()
1870
1872
1871 @command(b'perflrucachedict', formatteropts +
1873 @command(b'perflrucachedict', formatteropts +
1872 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
1874 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
1873 (b'', b'mincost', 0, b'smallest cost of items in cache'),
1875 (b'', b'mincost', 0, b'smallest cost of items in cache'),
1874 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
1876 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
1875 (b'', b'size', 4, b'size of cache'),
1877 (b'', b'size', 4, b'size of cache'),
1876 (b'', b'gets', 10000, b'number of key lookups'),
1878 (b'', b'gets', 10000, b'number of key lookups'),
1877 (b'', b'sets', 10000, b'number of key sets'),
1879 (b'', b'sets', 10000, b'number of key sets'),
1878 (b'', b'mixed', 10000, b'number of mixed mode operations'),
1880 (b'', b'mixed', 10000, b'number of mixed mode operations'),
1879 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
1881 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
1880 norepo=True)
1882 norepo=True)
1881 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
1883 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
1882 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
1884 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
1883 def doinit():
1885 def doinit():
1884 for i in xrange(10000):
1886 for i in xrange(10000):
1885 util.lrucachedict(size)
1887 util.lrucachedict(size)
1886
1888
1887 costrange = list(range(mincost, maxcost + 1))
1889 costrange = list(range(mincost, maxcost + 1))
1888
1890
1889 values = []
1891 values = []
1890 for i in xrange(size):
1892 for i in xrange(size):
1891 values.append(random.randint(0, sys.maxint))
1893 values.append(random.randint(0, sys.maxint))
1892
1894
1893 # Get mode fills the cache and tests raw lookup performance with no
1895 # Get mode fills the cache and tests raw lookup performance with no
1894 # eviction.
1896 # eviction.
1895 getseq = []
1897 getseq = []
1896 for i in xrange(gets):
1898 for i in xrange(gets):
1897 getseq.append(random.choice(values))
1899 getseq.append(random.choice(values))
1898
1900
1899 def dogets():
1901 def dogets():
1900 d = util.lrucachedict(size)
1902 d = util.lrucachedict(size)
1901 for v in values:
1903 for v in values:
1902 d[v] = v
1904 d[v] = v
1903 for key in getseq:
1905 for key in getseq:
1904 value = d[key]
1906 value = d[key]
1905 value # silence pyflakes warning
1907 value # silence pyflakes warning
1906
1908
1907 def dogetscost():
1909 def dogetscost():
1908 d = util.lrucachedict(size, maxcost=costlimit)
1910 d = util.lrucachedict(size, maxcost=costlimit)
1909 for i, v in enumerate(values):
1911 for i, v in enumerate(values):
1910 d.insert(v, v, cost=costs[i])
1912 d.insert(v, v, cost=costs[i])
1911 for key in getseq:
1913 for key in getseq:
1912 try:
1914 try:
1913 value = d[key]
1915 value = d[key]
1914 value # silence pyflakes warning
1916 value # silence pyflakes warning
1915 except KeyError:
1917 except KeyError:
1916 pass
1918 pass
1917
1919
1918 # Set mode tests insertion speed with cache eviction.
1920 # Set mode tests insertion speed with cache eviction.
1919 setseq = []
1921 setseq = []
1920 costs = []
1922 costs = []
1921 for i in xrange(sets):
1923 for i in xrange(sets):
1922 setseq.append(random.randint(0, sys.maxint))
1924 setseq.append(random.randint(0, sys.maxint))
1923 costs.append(random.choice(costrange))
1925 costs.append(random.choice(costrange))
1924
1926
1925 def doinserts():
1927 def doinserts():
1926 d = util.lrucachedict(size)
1928 d = util.lrucachedict(size)
1927 for v in setseq:
1929 for v in setseq:
1928 d.insert(v, v)
1930 d.insert(v, v)
1929
1931
1930 def doinsertscost():
1932 def doinsertscost():
1931 d = util.lrucachedict(size, maxcost=costlimit)
1933 d = util.lrucachedict(size, maxcost=costlimit)
1932 for i, v in enumerate(setseq):
1934 for i, v in enumerate(setseq):
1933 d.insert(v, v, cost=costs[i])
1935 d.insert(v, v, cost=costs[i])
1934
1936
1935 def dosets():
1937 def dosets():
1936 d = util.lrucachedict(size)
1938 d = util.lrucachedict(size)
1937 for v in setseq:
1939 for v in setseq:
1938 d[v] = v
1940 d[v] = v
1939
1941
1940 # Mixed mode randomly performs gets and sets with eviction.
1942 # Mixed mode randomly performs gets and sets with eviction.
1941 mixedops = []
1943 mixedops = []
1942 for i in xrange(mixed):
1944 for i in xrange(mixed):
1943 r = random.randint(0, 100)
1945 r = random.randint(0, 100)
1944 if r < mixedgetfreq:
1946 if r < mixedgetfreq:
1945 op = 0
1947 op = 0
1946 else:
1948 else:
1947 op = 1
1949 op = 1
1948
1950
1949 mixedops.append((op,
1951 mixedops.append((op,
1950 random.randint(0, size * 2),
1952 random.randint(0, size * 2),
1951 random.choice(costrange)))
1953 random.choice(costrange)))
1952
1954
1953 def domixed():
1955 def domixed():
1954 d = util.lrucachedict(size)
1956 d = util.lrucachedict(size)
1955
1957
1956 for op, v, cost in mixedops:
1958 for op, v, cost in mixedops:
1957 if op == 0:
1959 if op == 0:
1958 try:
1960 try:
1959 d[v]
1961 d[v]
1960 except KeyError:
1962 except KeyError:
1961 pass
1963 pass
1962 else:
1964 else:
1963 d[v] = v
1965 d[v] = v
1964
1966
1965 def domixedcost():
1967 def domixedcost():
1966 d = util.lrucachedict(size, maxcost=costlimit)
1968 d = util.lrucachedict(size, maxcost=costlimit)
1967
1969
1968 for op, v, cost in mixedops:
1970 for op, v, cost in mixedops:
1969 if op == 0:
1971 if op == 0:
1970 try:
1972 try:
1971 d[v]
1973 d[v]
1972 except KeyError:
1974 except KeyError:
1973 pass
1975 pass
1974 else:
1976 else:
1975 d.insert(v, v, cost=cost)
1977 d.insert(v, v, cost=cost)
1976
1978
1977 benches = [
1979 benches = [
1978 (doinit, b'init'),
1980 (doinit, b'init'),
1979 ]
1981 ]
1980
1982
1981 if costlimit:
1983 if costlimit:
1982 benches.extend([
1984 benches.extend([
1983 (dogetscost, b'gets w/ cost limit'),
1985 (dogetscost, b'gets w/ cost limit'),
1984 (doinsertscost, b'inserts w/ cost limit'),
1986 (doinsertscost, b'inserts w/ cost limit'),
1985 (domixedcost, b'mixed w/ cost limit'),
1987 (domixedcost, b'mixed w/ cost limit'),
1986 ])
1988 ])
1987 else:
1989 else:
1988 benches.extend([
1990 benches.extend([
1989 (dogets, b'gets'),
1991 (dogets, b'gets'),
1990 (doinserts, b'inserts'),
1992 (doinserts, b'inserts'),
1991 (dosets, b'sets'),
1993 (dosets, b'sets'),
1992 (domixed, b'mixed')
1994 (domixed, b'mixed')
1993 ])
1995 ])
1994
1996
1995 for fn, title in benches:
1997 for fn, title in benches:
1996 timer, fm = gettimer(ui, opts)
1998 timer, fm = gettimer(ui, opts)
1997 timer(fn, title=title)
1999 timer(fn, title=title)
1998 fm.end()
2000 fm.end()
1999
2001
2000 @command(b'perfwrite', formatteropts)
2002 @command(b'perfwrite', formatteropts)
2001 def perfwrite(ui, repo, **opts):
2003 def perfwrite(ui, repo, **opts):
2002 """microbenchmark ui.write
2004 """microbenchmark ui.write
2003 """
2005 """
2004 timer, fm = gettimer(ui, opts)
2006 timer, fm = gettimer(ui, opts)
2005 def write():
2007 def write():
2006 for i in range(100000):
2008 for i in range(100000):
2007 ui.write((b'Testing write performance\n'))
2009 ui.write((b'Testing write performance\n'))
2008 timer(write)
2010 timer(write)
2009 fm.end()
2011 fm.end()
2010
2012
2011 def uisetup(ui):
2013 def uisetup(ui):
2012 if (util.safehasattr(cmdutil, b'openrevlog') and
2014 if (util.safehasattr(cmdutil, b'openrevlog') and
2013 not util.safehasattr(commands, b'debugrevlogopts')):
2015 not util.safehasattr(commands, b'debugrevlogopts')):
2014 # for "historical portability":
2016 # for "historical portability":
2015 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2017 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
2016 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2018 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
2017 # openrevlog() should cause failure, because it has been
2019 # openrevlog() should cause failure, because it has been
2018 # available since 3.5 (or 49c583ca48c4).
2020 # available since 3.5 (or 49c583ca48c4).
2019 def openrevlog(orig, repo, cmd, file_, opts):
2021 def openrevlog(orig, repo, cmd, file_, opts):
2020 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2022 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
2021 raise error.Abort(b"This version doesn't support --dir option",
2023 raise error.Abort(b"This version doesn't support --dir option",
2022 hint=b"use 3.5 or later")
2024 hint=b"use 3.5 or later")
2023 return orig(repo, cmd, file_, opts)
2025 return orig(repo, cmd, file_, opts)
2024 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
2026 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
@@ -1,657 +1,659 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import nullid
20 from .node import nullid
21
21
22 from . import (
22 from . import (
23 bundle2,
23 bundle2,
24 changegroup,
24 changegroup,
25 changelog,
25 changelog,
26 cmdutil,
26 cmdutil,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchange,
29 exchange,
30 filelog,
30 filelog,
31 localrepo,
31 localrepo,
32 manifest,
32 manifest,
33 mdiff,
33 mdiff,
34 node as nodemod,
34 node as nodemod,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revlog,
38 revlog,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 class bundlerevlog(revlog.revlog):
43 class bundlerevlog(revlog.revlog):
44 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
44 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
45 # How it works:
45 # How it works:
46 # To retrieve a revision, we need to know the offset of the revision in
46 # To retrieve a revision, we need to know the offset of the revision in
47 # the bundle (an unbundle object). We store this offset in the index
47 # the bundle (an unbundle object). We store this offset in the index
48 # (start). The base of the delta is stored in the base field.
48 # (start). The base of the delta is stored in the base field.
49 #
49 #
50 # To differentiate a rev in the bundle from a rev in the revlog, we
50 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # check revision against repotiprev.
51 # check revision against repotiprev.
52 opener = vfsmod.readonlyvfs(opener)
52 opener = vfsmod.readonlyvfs(opener)
53 revlog.revlog.__init__(self, opener, indexfile)
53 revlog.revlog.__init__(self, opener, indexfile)
54 self.bundle = cgunpacker
54 self.bundle = cgunpacker
55 n = len(self)
55 n = len(self)
56 self.repotiprev = n - 1
56 self.repotiprev = n - 1
57 self.bundlerevs = set() # used by 'bundle()' revset expression
57 self.bundlerevs = set() # used by 'bundle()' revset expression
58 for deltadata in cgunpacker.deltaiter():
58 for deltadata in cgunpacker.deltaiter():
59 node, p1, p2, cs, deltabase, delta, flags = deltadata
59 node, p1, p2, cs, deltabase, delta, flags = deltadata
60
60
61 size = len(delta)
61 size = len(delta)
62 start = cgunpacker.tell() - size
62 start = cgunpacker.tell() - size
63
63
64 link = linkmapper(cs)
64 link = linkmapper(cs)
65 if node in self.nodemap:
65 if node in self.nodemap:
66 # this can happen if two branches make the same change
66 # this can happen if two branches make the same change
67 self.bundlerevs.add(self.nodemap[node])
67 self.bundlerevs.add(self.nodemap[node])
68 continue
68 continue
69
69
70 for p in (p1, p2):
70 for p in (p1, p2):
71 if p not in self.nodemap:
71 if p not in self.nodemap:
72 raise error.LookupError(p, self.indexfile,
72 raise error.LookupError(p, self.indexfile,
73 _("unknown parent"))
73 _("unknown parent"))
74
74
75 if deltabase not in self.nodemap:
75 if deltabase not in self.nodemap:
76 raise LookupError(deltabase, self.indexfile,
76 raise LookupError(deltabase, self.indexfile,
77 _('unknown delta base'))
77 _('unknown delta base'))
78
78
79 baserev = self.rev(deltabase)
79 baserev = self.rev(deltabase)
80 # start, size, full unc. size, base (unused), link, p1, p2, node
80 # start, size, full unc. size, base (unused), link, p1, p2, node
81 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
81 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
82 self.rev(p1), self.rev(p2), node)
82 self.rev(p1), self.rev(p2), node)
83 self.index.append(e)
83 self.index.append(e)
84 self.nodemap[node] = n
84 self.nodemap[node] = n
85 self.bundlerevs.add(n)
85 self.bundlerevs.add(n)
86 n += 1
86 n += 1
87
87
88 def _chunk(self, rev, df=None):
88 def _chunk(self, rev, df=None):
89 # Warning: in case of bundle, the diff is against what we stored as
89 # Warning: in case of bundle, the diff is against what we stored as
90 # delta base, not against rev - 1
90 # delta base, not against rev - 1
91 # XXX: could use some caching
91 # XXX: could use some caching
92 if rev <= self.repotiprev:
92 if rev <= self.repotiprev:
93 return revlog.revlog._chunk(self, rev)
93 return revlog.revlog._chunk(self, rev)
94 self.bundle.seek(self.start(rev))
94 self.bundle.seek(self.start(rev))
95 return self.bundle.read(self.length(rev))
95 return self.bundle.read(self.length(rev))
96
96
97 def revdiff(self, rev1, rev2):
97 def revdiff(self, rev1, rev2):
98 """return or calculate a delta between two revisions"""
98 """return or calculate a delta between two revisions"""
99 if rev1 > self.repotiprev and rev2 > self.repotiprev:
99 if rev1 > self.repotiprev and rev2 > self.repotiprev:
100 # hot path for bundle
100 # hot path for bundle
101 revb = self.index[rev2][3]
101 revb = self.index[rev2][3]
102 if revb == rev1:
102 if revb == rev1:
103 return self._chunk(rev2)
103 return self._chunk(rev2)
104 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
104 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
105 return revlog.revlog.revdiff(self, rev1, rev2)
105 return revlog.revlog.revdiff(self, rev1, rev2)
106
106
107 return mdiff.textdiff(self.revision(rev1, raw=True),
107 return mdiff.textdiff(self.revision(rev1, raw=True),
108 self.revision(rev2, raw=True))
108 self.revision(rev2, raw=True))
109
109
110 def revision(self, nodeorrev, _df=None, raw=False):
110 def revision(self, nodeorrev, _df=None, raw=False):
111 """return an uncompressed revision of a given node or revision
111 """return an uncompressed revision of a given node or revision
112 number.
112 number.
113 """
113 """
114 if isinstance(nodeorrev, int):
114 if isinstance(nodeorrev, int):
115 rev = nodeorrev
115 rev = nodeorrev
116 node = self.node(rev)
116 node = self.node(rev)
117 else:
117 else:
118 node = nodeorrev
118 node = nodeorrev
119 rev = self.rev(node)
119 rev = self.rev(node)
120
120
121 if node == nullid:
121 if node == nullid:
122 return ""
122 return ""
123
123
124 rawtext = None
124 rawtext = None
125 chain = []
125 chain = []
126 iterrev = rev
126 iterrev = rev
127 # reconstruct the revision if it is from a changegroup
127 # reconstruct the revision if it is from a changegroup
128 while iterrev > self.repotiprev:
128 while iterrev > self.repotiprev:
129 if self._cache and self._cache[1] == iterrev:
129 if self._cache and self._cache[1] == iterrev:
130 rawtext = self._cache[2]
130 rawtext = self._cache[2]
131 break
131 break
132 chain.append(iterrev)
132 chain.append(iterrev)
133 iterrev = self.index[iterrev][3]
133 iterrev = self.index[iterrev][3]
134 if rawtext is None:
134 if rawtext is None:
135 rawtext = self.baserevision(iterrev)
135 rawtext = self.baserevision(iterrev)
136
136
137 while chain:
137 while chain:
138 delta = self._chunk(chain.pop())
138 delta = self._chunk(chain.pop())
139 rawtext = mdiff.patches(rawtext, [delta])
139 rawtext = mdiff.patches(rawtext, [delta])
140
140
141 text, validatehash = self._processflags(rawtext, self.flags(rev),
141 text, validatehash = self._processflags(rawtext, self.flags(rev),
142 'read', raw=raw)
142 'read', raw=raw)
143 if validatehash:
143 if validatehash:
144 self.checkhash(text, node, rev=rev)
144 self.checkhash(text, node, rev=rev)
145 self._cache = (node, rev, rawtext)
145 self._cache = (node, rev, rawtext)
146 return text
146 return text
147
147
148 def baserevision(self, nodeorrev):
148 def baserevision(self, nodeorrev):
149 # Revlog subclasses may override 'revision' method to modify format of
149 # Revlog subclasses may override 'revision' method to modify format of
150 # content retrieved from revlog. To use bundlerevlog with such class one
150 # content retrieved from revlog. To use bundlerevlog with such class one
151 # needs to override 'baserevision' and make more specific call here.
151 # needs to override 'baserevision' and make more specific call here.
152 return revlog.revlog.revision(self, nodeorrev, raw=True)
152 return revlog.revlog.revision(self, nodeorrev, raw=True)
153
153
154 def addrevision(self, *args, **kwargs):
154 def addrevision(self, *args, **kwargs):
155 raise NotImplementedError
155 raise NotImplementedError
156
156
157 def addgroup(self, *args, **kwargs):
157 def addgroup(self, *args, **kwargs):
158 raise NotImplementedError
158 raise NotImplementedError
159
159
160 def strip(self, *args, **kwargs):
160 def strip(self, *args, **kwargs):
161 raise NotImplementedError
161 raise NotImplementedError
162
162
163 def checksize(self):
163 def checksize(self):
164 raise NotImplementedError
164 raise NotImplementedError
165
165
166 class bundlechangelog(bundlerevlog, changelog.changelog):
166 class bundlechangelog(bundlerevlog, changelog.changelog):
167 def __init__(self, opener, cgunpacker):
167 def __init__(self, opener, cgunpacker):
168 changelog.changelog.__init__(self, opener)
168 changelog.changelog.__init__(self, opener)
169 linkmapper = lambda x: x
169 linkmapper = lambda x: x
170 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
170 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
171 linkmapper)
171 linkmapper)
172
172
173 def baserevision(self, nodeorrev):
173 def baserevision(self, nodeorrev):
174 # Although changelog doesn't override 'revision' method, some extensions
174 # Although changelog doesn't override 'revision' method, some extensions
175 # may replace this class with another that does. Same story with
175 # may replace this class with another that does. Same story with
176 # manifest and filelog classes.
176 # manifest and filelog classes.
177
177
178 # This bypasses filtering on changelog.node() and rev() because we need
178 # This bypasses filtering on changelog.node() and rev() because we need
179 # revision text of the bundle base even if it is hidden.
179 # revision text of the bundle base even if it is hidden.
180 oldfilter = self.filteredrevs
180 oldfilter = self.filteredrevs
181 try:
181 try:
182 self.filteredrevs = ()
182 self.filteredrevs = ()
183 return changelog.changelog.revision(self, nodeorrev, raw=True)
183 return changelog.changelog.revision(self, nodeorrev, raw=True)
184 finally:
184 finally:
185 self.filteredrevs = oldfilter
185 self.filteredrevs = oldfilter
186
186
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
187 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
188 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
188 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
189 dir=''):
189 dir=''):
190 manifest.manifestrevlog.__init__(self, opener, tree=dir)
190 manifest.manifestrevlog.__init__(self, opener, tree=dir)
191 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
191 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
192 linkmapper)
192 linkmapper)
193 if dirlogstarts is None:
193 if dirlogstarts is None:
194 dirlogstarts = {}
194 dirlogstarts = {}
195 if self.bundle.version == "03":
195 if self.bundle.version == "03":
196 dirlogstarts = _getfilestarts(self.bundle)
196 dirlogstarts = _getfilestarts(self.bundle)
197 self._dirlogstarts = dirlogstarts
197 self._dirlogstarts = dirlogstarts
198 self._linkmapper = linkmapper
198 self._linkmapper = linkmapper
199
199
200 def baserevision(self, nodeorrev):
200 def baserevision(self, nodeorrev):
201 node = nodeorrev
201 node = nodeorrev
202 if isinstance(node, int):
202 if isinstance(node, int):
203 node = self.node(node)
203 node = self.node(node)
204
204
205 if node in self.fulltextcache:
205 if node in self.fulltextcache:
206 result = '%s' % self.fulltextcache[node]
206 result = '%s' % self.fulltextcache[node]
207 else:
207 else:
208 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
208 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
209 return result
209 return result
210
210
211 def dirlog(self, d):
211 def dirlog(self, d):
212 if d in self._dirlogstarts:
212 if d in self._dirlogstarts:
213 self.bundle.seek(self._dirlogstarts[d])
213 self.bundle.seek(self._dirlogstarts[d])
214 return bundlemanifest(
214 return bundlemanifest(
215 self.opener, self.bundle, self._linkmapper,
215 self.opener, self.bundle, self._linkmapper,
216 self._dirlogstarts, dir=d)
216 self._dirlogstarts, dir=d)
217 return super(bundlemanifest, self).dirlog(d)
217 return super(bundlemanifest, self).dirlog(d)
218
218
219 class bundlefilelog(filelog.filelog):
219 class bundlefilelog(filelog.filelog):
220 def __init__(self, opener, path, cgunpacker, linkmapper):
220 def __init__(self, opener, path, cgunpacker, linkmapper):
221 filelog.filelog.__init__(self, opener, path)
221 filelog.filelog.__init__(self, opener, path)
222 self._revlog = bundlerevlog(opener, self.indexfile,
222 self._revlog = bundlerevlog(opener, self.indexfile,
223 cgunpacker, linkmapper)
223 cgunpacker, linkmapper)
224
224
225 def baserevision(self, nodeorrev):
225 def baserevision(self, nodeorrev):
226 return filelog.filelog.revision(self, nodeorrev, raw=True)
226 return filelog.filelog.revision(self, nodeorrev, raw=True)
227
227
228 class bundlepeer(localrepo.localpeer):
228 class bundlepeer(localrepo.localpeer):
229 def canpush(self):
229 def canpush(self):
230 return False
230 return False
231
231
232 class bundlephasecache(phases.phasecache):
232 class bundlephasecache(phases.phasecache):
233 def __init__(self, *args, **kwargs):
233 def __init__(self, *args, **kwargs):
234 super(bundlephasecache, self).__init__(*args, **kwargs)
234 super(bundlephasecache, self).__init__(*args, **kwargs)
235 if util.safehasattr(self, 'opener'):
235 if util.safehasattr(self, 'opener'):
236 self.opener = vfsmod.readonlyvfs(self.opener)
236 self.opener = vfsmod.readonlyvfs(self.opener)
237
237
238 def write(self):
238 def write(self):
239 raise NotImplementedError
239 raise NotImplementedError
240
240
241 def _write(self, fp):
241 def _write(self, fp):
242 raise NotImplementedError
242 raise NotImplementedError
243
243
244 def _updateroots(self, phase, newroots, tr):
244 def _updateroots(self, phase, newroots, tr):
245 self.phaseroots[phase] = newroots
245 self.phaseroots[phase] = newroots
246 self.invalidate()
246 self.invalidate()
247 self.dirty = True
247 self.dirty = True
248
248
249 def _getfilestarts(cgunpacker):
249 def _getfilestarts(cgunpacker):
250 filespos = {}
250 filespos = {}
251 for chunkdata in iter(cgunpacker.filelogheader, {}):
251 for chunkdata in iter(cgunpacker.filelogheader, {}):
252 fname = chunkdata['filename']
252 fname = chunkdata['filename']
253 filespos[fname] = cgunpacker.tell()
253 filespos[fname] = cgunpacker.tell()
254 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
254 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
255 pass
255 pass
256 return filespos
256 return filespos
257
257
258 class bundlerepository(object):
258 class bundlerepository(object):
259 """A repository instance that is a union of a local repo and a bundle.
259 """A repository instance that is a union of a local repo and a bundle.
260
260
261 Instances represent a read-only repository composed of a local repository
261 Instances represent a read-only repository composed of a local repository
262 with the contents of a bundle file applied. The repository instance is
262 with the contents of a bundle file applied. The repository instance is
263 conceptually similar to the state of a repository after an
263 conceptually similar to the state of a repository after an
264 ``hg unbundle`` operation. However, the contents of the bundle are never
264 ``hg unbundle`` operation. However, the contents of the bundle are never
265 applied to the actual base repository.
265 applied to the actual base repository.
266
266
267 Instances constructed directly are not usable as repository objects.
267 Instances constructed directly are not usable as repository objects.
268 Use instance() or makebundlerepository() to create instances.
268 Use instance() or makebundlerepository() to create instances.
269 """
269 """
270 def __init__(self, bundlepath, url, tempparent):
270 def __init__(self, bundlepath, url, tempparent):
271 self._tempparent = tempparent
271 self._tempparent = tempparent
272 self._url = url
272 self._url = url
273
273
274 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
274 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
275
275
276 self.tempfile = None
276 self.tempfile = None
277 f = util.posixfile(bundlepath, "rb")
277 f = util.posixfile(bundlepath, "rb")
278 bundle = exchange.readbundle(self.ui, f, bundlepath)
278 bundle = exchange.readbundle(self.ui, f, bundlepath)
279
279
280 if isinstance(bundle, bundle2.unbundle20):
280 if isinstance(bundle, bundle2.unbundle20):
281 self._bundlefile = bundle
281 self._bundlefile = bundle
282 self._cgunpacker = None
282 self._cgunpacker = None
283
283
284 cgpart = None
284 cgpart = None
285 for part in bundle.iterparts(seekable=True):
285 for part in bundle.iterparts(seekable=True):
286 if part.type == 'changegroup':
286 if part.type == 'changegroup':
287 if cgpart:
287 if cgpart:
288 raise NotImplementedError("can't process "
288 raise NotImplementedError("can't process "
289 "multiple changegroups")
289 "multiple changegroups")
290 cgpart = part
290 cgpart = part
291
291
292 self._handlebundle2part(bundle, part)
292 self._handlebundle2part(bundle, part)
293
293
294 if not cgpart:
294 if not cgpart:
295 raise error.Abort(_("No changegroups found"))
295 raise error.Abort(_("No changegroups found"))
296
296
297 # This is required to placate a later consumer, which expects
297 # This is required to placate a later consumer, which expects
298 # the payload offset to be at the beginning of the changegroup.
298 # the payload offset to be at the beginning of the changegroup.
299 # We need to do this after the iterparts() generator advances
299 # We need to do this after the iterparts() generator advances
300 # because iterparts() will seek to end of payload after the
300 # because iterparts() will seek to end of payload after the
301 # generator returns control to iterparts().
301 # generator returns control to iterparts().
302 cgpart.seek(0, os.SEEK_SET)
302 cgpart.seek(0, os.SEEK_SET)
303
303
304 elif isinstance(bundle, changegroup.cg1unpacker):
304 elif isinstance(bundle, changegroup.cg1unpacker):
305 if bundle.compressed():
305 if bundle.compressed():
306 f = self._writetempbundle(bundle.read, '.hg10un',
306 f = self._writetempbundle(bundle.read, '.hg10un',
307 header='HG10UN')
307 header='HG10UN')
308 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
308 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
309
309
310 self._bundlefile = bundle
310 self._bundlefile = bundle
311 self._cgunpacker = bundle
311 self._cgunpacker = bundle
312 else:
312 else:
313 raise error.Abort(_('bundle type %s cannot be read') %
313 raise error.Abort(_('bundle type %s cannot be read') %
314 type(bundle))
314 type(bundle))
315
315
316 # dict with the mapping 'filename' -> position in the changegroup.
316 # dict with the mapping 'filename' -> position in the changegroup.
317 self._cgfilespos = {}
317 self._cgfilespos = {}
318
318
319 self.firstnewrev = self.changelog.repotiprev + 1
319 self.firstnewrev = self.changelog.repotiprev + 1
320 phases.retractboundary(self, None, phases.draft,
320 phases.retractboundary(self, None, phases.draft,
321 [ctx.node() for ctx in self[self.firstnewrev:]])
321 [ctx.node() for ctx in self[self.firstnewrev:]])
322
322
323 def _handlebundle2part(self, bundle, part):
323 def _handlebundle2part(self, bundle, part):
324 if part.type != 'changegroup':
324 if part.type != 'changegroup':
325 return
325 return
326
326
327 cgstream = part
327 cgstream = part
328 version = part.params.get('version', '01')
328 version = part.params.get('version', '01')
329 legalcgvers = changegroup.supportedincomingversions(self)
329 legalcgvers = changegroup.supportedincomingversions(self)
330 if version not in legalcgvers:
330 if version not in legalcgvers:
331 msg = _('Unsupported changegroup version: %s')
331 msg = _('Unsupported changegroup version: %s')
332 raise error.Abort(msg % version)
332 raise error.Abort(msg % version)
333 if bundle.compressed():
333 if bundle.compressed():
334 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
334 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
335
335
336 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
336 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
337
337
338 def _writetempbundle(self, readfn, suffix, header=''):
338 def _writetempbundle(self, readfn, suffix, header=''):
339 """Write a temporary file to disk
339 """Write a temporary file to disk
340 """
340 """
341 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
341 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
342 suffix=suffix)
342 suffix=suffix)
343 self.tempfile = temp
343 self.tempfile = temp
344
344
345 with os.fdopen(fdtemp, r'wb') as fptemp:
345 with os.fdopen(fdtemp, r'wb') as fptemp:
346 fptemp.write(header)
346 fptemp.write(header)
347 while True:
347 while True:
348 chunk = readfn(2**18)
348 chunk = readfn(2**18)
349 if not chunk:
349 if not chunk:
350 break
350 break
351 fptemp.write(chunk)
351 fptemp.write(chunk)
352
352
353 return self.vfs.open(self.tempfile, mode="rb")
353 return self.vfs.open(self.tempfile, mode="rb")
354
354
355 @localrepo.unfilteredpropertycache
355 @localrepo.unfilteredpropertycache
356 def _phasecache(self):
356 def _phasecache(self):
357 return bundlephasecache(self, self._phasedefaults)
357 return bundlephasecache(self, self._phasedefaults)
358
358
359 @localrepo.unfilteredpropertycache
359 @localrepo.unfilteredpropertycache
360 def changelog(self):
360 def changelog(self):
361 # consume the header if it exists
361 # consume the header if it exists
362 self._cgunpacker.changelogheader()
362 self._cgunpacker.changelogheader()
363 c = bundlechangelog(self.svfs, self._cgunpacker)
363 c = bundlechangelog(self.svfs, self._cgunpacker)
364 self.manstart = self._cgunpacker.tell()
364 self.manstart = self._cgunpacker.tell()
365 return c
365 return c
366
366
367 def _constructmanifest(self):
367 @localrepo.unfilteredpropertycache
368 def manifestlog(self):
368 self._cgunpacker.seek(self.manstart)
369 self._cgunpacker.seek(self.manstart)
369 # consume the header if it exists
370 # consume the header if it exists
370 self._cgunpacker.manifestheader()
371 self._cgunpacker.manifestheader()
371 linkmapper = self.unfiltered().changelog.rev
372 linkmapper = self.unfiltered().changelog.rev
372 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
373 rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
373 self.filestart = self._cgunpacker.tell()
374 self.filestart = self._cgunpacker.tell()
374 return m
375
376 return manifest.manifestlog(self.svfs, self, rootstore)
375
377
376 def _consumemanifest(self):
378 def _consumemanifest(self):
377 """Consumes the manifest portion of the bundle, setting filestart so the
379 """Consumes the manifest portion of the bundle, setting filestart so the
378 file portion can be read."""
380 file portion can be read."""
379 self._cgunpacker.seek(self.manstart)
381 self._cgunpacker.seek(self.manstart)
380 self._cgunpacker.manifestheader()
382 self._cgunpacker.manifestheader()
381 for delta in self._cgunpacker.deltaiter():
383 for delta in self._cgunpacker.deltaiter():
382 pass
384 pass
383 self.filestart = self._cgunpacker.tell()
385 self.filestart = self._cgunpacker.tell()
384
386
385 @localrepo.unfilteredpropertycache
387 @localrepo.unfilteredpropertycache
386 def manstart(self):
388 def manstart(self):
387 self.changelog
389 self.changelog
388 return self.manstart
390 return self.manstart
389
391
390 @localrepo.unfilteredpropertycache
392 @localrepo.unfilteredpropertycache
391 def filestart(self):
393 def filestart(self):
392 self.manifestlog
394 self.manifestlog
393
395
394 # If filestart was not set by self.manifestlog, that means the
396 # If filestart was not set by self.manifestlog, that means the
395 # manifestlog implementation did not consume the manifests from the
397 # manifestlog implementation did not consume the manifests from the
396 # changegroup (ex: it might be consuming trees from a separate bundle2
398 # changegroup (ex: it might be consuming trees from a separate bundle2
397 # part instead). So we need to manually consume it.
399 # part instead). So we need to manually consume it.
398 if r'filestart' not in self.__dict__:
400 if r'filestart' not in self.__dict__:
399 self._consumemanifest()
401 self._consumemanifest()
400
402
401 return self.filestart
403 return self.filestart
402
404
403 def url(self):
405 def url(self):
404 return self._url
406 return self._url
405
407
406 def file(self, f):
408 def file(self, f):
407 if not self._cgfilespos:
409 if not self._cgfilespos:
408 self._cgunpacker.seek(self.filestart)
410 self._cgunpacker.seek(self.filestart)
409 self._cgfilespos = _getfilestarts(self._cgunpacker)
411 self._cgfilespos = _getfilestarts(self._cgunpacker)
410
412
411 if f in self._cgfilespos:
413 if f in self._cgfilespos:
412 self._cgunpacker.seek(self._cgfilespos[f])
414 self._cgunpacker.seek(self._cgfilespos[f])
413 linkmapper = self.unfiltered().changelog.rev
415 linkmapper = self.unfiltered().changelog.rev
414 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
416 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
415 else:
417 else:
416 return super(bundlerepository, self).file(f)
418 return super(bundlerepository, self).file(f)
417
419
418 def close(self):
420 def close(self):
419 """Close assigned bundle file immediately."""
421 """Close assigned bundle file immediately."""
420 self._bundlefile.close()
422 self._bundlefile.close()
421 if self.tempfile is not None:
423 if self.tempfile is not None:
422 self.vfs.unlink(self.tempfile)
424 self.vfs.unlink(self.tempfile)
423 if self._tempparent:
425 if self._tempparent:
424 shutil.rmtree(self._tempparent, True)
426 shutil.rmtree(self._tempparent, True)
425
427
426 def cancopy(self):
428 def cancopy(self):
427 return False
429 return False
428
430
429 def peer(self):
431 def peer(self):
430 return bundlepeer(self)
432 return bundlepeer(self)
431
433
432 def getcwd(self):
434 def getcwd(self):
433 return pycompat.getcwd() # always outside the repo
435 return pycompat.getcwd() # always outside the repo
434
436
435 # Check if parents exist in localrepo before setting
437 # Check if parents exist in localrepo before setting
436 def setparents(self, p1, p2=nullid):
438 def setparents(self, p1, p2=nullid):
437 p1rev = self.changelog.rev(p1)
439 p1rev = self.changelog.rev(p1)
438 p2rev = self.changelog.rev(p2)
440 p2rev = self.changelog.rev(p2)
439 msg = _("setting parent to node %s that only exists in the bundle\n")
441 msg = _("setting parent to node %s that only exists in the bundle\n")
440 if self.changelog.repotiprev < p1rev:
442 if self.changelog.repotiprev < p1rev:
441 self.ui.warn(msg % nodemod.hex(p1))
443 self.ui.warn(msg % nodemod.hex(p1))
442 if self.changelog.repotiprev < p2rev:
444 if self.changelog.repotiprev < p2rev:
443 self.ui.warn(msg % nodemod.hex(p2))
445 self.ui.warn(msg % nodemod.hex(p2))
444 return super(bundlerepository, self).setparents(p1, p2)
446 return super(bundlerepository, self).setparents(p1, p2)
445
447
446 def instance(ui, path, create, intents=None, createopts=None):
448 def instance(ui, path, create, intents=None, createopts=None):
447 if create:
449 if create:
448 raise error.Abort(_('cannot create new bundle repository'))
450 raise error.Abort(_('cannot create new bundle repository'))
449 # internal config: bundle.mainreporoot
451 # internal config: bundle.mainreporoot
450 parentpath = ui.config("bundle", "mainreporoot")
452 parentpath = ui.config("bundle", "mainreporoot")
451 if not parentpath:
453 if not parentpath:
452 # try to find the correct path to the working directory repo
454 # try to find the correct path to the working directory repo
453 parentpath = cmdutil.findrepo(pycompat.getcwd())
455 parentpath = cmdutil.findrepo(pycompat.getcwd())
454 if parentpath is None:
456 if parentpath is None:
455 parentpath = ''
457 parentpath = ''
456 if parentpath:
458 if parentpath:
457 # Try to make the full path relative so we get a nice, short URL.
459 # Try to make the full path relative so we get a nice, short URL.
458 # In particular, we don't want temp dir names in test outputs.
460 # In particular, we don't want temp dir names in test outputs.
459 cwd = pycompat.getcwd()
461 cwd = pycompat.getcwd()
460 if parentpath == cwd:
462 if parentpath == cwd:
461 parentpath = ''
463 parentpath = ''
462 else:
464 else:
463 cwd = pathutil.normasprefix(cwd)
465 cwd = pathutil.normasprefix(cwd)
464 if parentpath.startswith(cwd):
466 if parentpath.startswith(cwd):
465 parentpath = parentpath[len(cwd):]
467 parentpath = parentpath[len(cwd):]
466 u = util.url(path)
468 u = util.url(path)
467 path = u.localpath()
469 path = u.localpath()
468 if u.scheme == 'bundle':
470 if u.scheme == 'bundle':
469 s = path.split("+", 1)
471 s = path.split("+", 1)
470 if len(s) == 1:
472 if len(s) == 1:
471 repopath, bundlename = parentpath, s[0]
473 repopath, bundlename = parentpath, s[0]
472 else:
474 else:
473 repopath, bundlename = s
475 repopath, bundlename = s
474 else:
476 else:
475 repopath, bundlename = parentpath, path
477 repopath, bundlename = parentpath, path
476
478
477 return makebundlerepository(ui, repopath, bundlename)
479 return makebundlerepository(ui, repopath, bundlename)
478
480
479 def makebundlerepository(ui, repopath, bundlepath):
481 def makebundlerepository(ui, repopath, bundlepath):
480 """Make a bundle repository object based on repo and bundle paths."""
482 """Make a bundle repository object based on repo and bundle paths."""
481 if repopath:
483 if repopath:
482 url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
484 url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
483 else:
485 else:
484 url = 'bundle:%s' % bundlepath
486 url = 'bundle:%s' % bundlepath
485
487
486 # Because we can't make any guarantees about the type of the base
488 # Because we can't make any guarantees about the type of the base
487 # repository, we can't have a static class representing the bundle
489 # repository, we can't have a static class representing the bundle
488 # repository. We also can't make any guarantees about how to even
490 # repository. We also can't make any guarantees about how to even
489 # call the base repository's constructor!
491 # call the base repository's constructor!
490 #
492 #
491 # So, our strategy is to go through ``localrepo.instance()`` to construct
493 # So, our strategy is to go through ``localrepo.instance()`` to construct
492 # a repo instance. Then, we dynamically create a new type derived from
494 # a repo instance. Then, we dynamically create a new type derived from
493 # both it and our ``bundlerepository`` class which overrides some
495 # both it and our ``bundlerepository`` class which overrides some
494 # functionality. We then change the type of the constructed repository
496 # functionality. We then change the type of the constructed repository
495 # to this new type and initialize the bundle-specific bits of it.
497 # to this new type and initialize the bundle-specific bits of it.
496
498
497 try:
499 try:
498 parentrepo = localrepo.instance(ui, repopath, create=False)
500 parentrepo = localrepo.instance(ui, repopath, create=False)
499 tempparent = None
501 tempparent = None
500 except error.RepoError:
502 except error.RepoError:
501 tempparent = pycompat.mkdtemp()
503 tempparent = pycompat.mkdtemp()
502 try:
504 try:
503 parentrepo = localrepo.instance(ui, tempparent, create=True)
505 parentrepo = localrepo.instance(ui, tempparent, create=True)
504 except Exception:
506 except Exception:
505 shutil.rmtree(tempparent)
507 shutil.rmtree(tempparent)
506 raise
508 raise
507
509
508 class derivedbundlerepository(bundlerepository, parentrepo.__class__):
510 class derivedbundlerepository(bundlerepository, parentrepo.__class__):
509 pass
511 pass
510
512
511 repo = parentrepo
513 repo = parentrepo
512 repo.__class__ = derivedbundlerepository
514 repo.__class__ = derivedbundlerepository
513 bundlerepository.__init__(repo, bundlepath, url, tempparent)
515 bundlerepository.__init__(repo, bundlepath, url, tempparent)
514
516
515 return repo
517 return repo
516
518
517 class bundletransactionmanager(object):
519 class bundletransactionmanager(object):
518 def transaction(self):
520 def transaction(self):
519 return None
521 return None
520
522
521 def close(self):
523 def close(self):
522 raise NotImplementedError
524 raise NotImplementedError
523
525
524 def release(self):
526 def release(self):
525 raise NotImplementedError
527 raise NotImplementedError
526
528
527 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
529 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
528 force=False):
530 force=False):
529 '''obtains a bundle of changes incoming from peer
531 '''obtains a bundle of changes incoming from peer
530
532
531 "onlyheads" restricts the returned changes to those reachable from the
533 "onlyheads" restricts the returned changes to those reachable from the
532 specified heads.
534 specified heads.
533 "bundlename", if given, stores the bundle to this file path permanently;
535 "bundlename", if given, stores the bundle to this file path permanently;
534 otherwise it's stored to a temp file and gets deleted again when you call
536 otherwise it's stored to a temp file and gets deleted again when you call
535 the returned "cleanupfn".
537 the returned "cleanupfn".
536 "force" indicates whether to proceed on unrelated repos.
538 "force" indicates whether to proceed on unrelated repos.
537
539
538 Returns a tuple (local, csets, cleanupfn):
540 Returns a tuple (local, csets, cleanupfn):
539
541
540 "local" is a local repo from which to obtain the actual incoming
542 "local" is a local repo from which to obtain the actual incoming
541 changesets; it is a bundlerepo for the obtained bundle when the
543 changesets; it is a bundlerepo for the obtained bundle when the
542 original "peer" is remote.
544 original "peer" is remote.
543 "csets" lists the incoming changeset node ids.
545 "csets" lists the incoming changeset node ids.
544 "cleanupfn" must be called without arguments when you're done processing
546 "cleanupfn" must be called without arguments when you're done processing
545 the changes; it closes both the original "peer" and the one returned
547 the changes; it closes both the original "peer" and the one returned
546 here.
548 here.
547 '''
549 '''
548 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
550 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
549 force=force)
551 force=force)
550 common, incoming, rheads = tmp
552 common, incoming, rheads = tmp
551 if not incoming:
553 if not incoming:
552 try:
554 try:
553 if bundlename:
555 if bundlename:
554 os.unlink(bundlename)
556 os.unlink(bundlename)
555 except OSError:
557 except OSError:
556 pass
558 pass
557 return repo, [], peer.close
559 return repo, [], peer.close
558
560
559 commonset = set(common)
561 commonset = set(common)
560 rheads = [x for x in rheads if x not in commonset]
562 rheads = [x for x in rheads if x not in commonset]
561
563
562 bundle = None
564 bundle = None
563 bundlerepo = None
565 bundlerepo = None
564 localrepo = peer.local()
566 localrepo = peer.local()
565 if bundlename or not localrepo:
567 if bundlename or not localrepo:
566 # create a bundle (uncompressed if peer repo is not local)
568 # create a bundle (uncompressed if peer repo is not local)
567
569
568 # developer config: devel.legacy.exchange
570 # developer config: devel.legacy.exchange
569 legexc = ui.configlist('devel', 'legacy.exchange')
571 legexc = ui.configlist('devel', 'legacy.exchange')
570 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
572 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
571 canbundle2 = (not forcebundle1
573 canbundle2 = (not forcebundle1
572 and peer.capable('getbundle')
574 and peer.capable('getbundle')
573 and peer.capable('bundle2'))
575 and peer.capable('bundle2'))
574 if canbundle2:
576 if canbundle2:
575 with peer.commandexecutor() as e:
577 with peer.commandexecutor() as e:
576 b2 = e.callcommand('getbundle', {
578 b2 = e.callcommand('getbundle', {
577 'source': 'incoming',
579 'source': 'incoming',
578 'common': common,
580 'common': common,
579 'heads': rheads,
581 'heads': rheads,
580 'bundlecaps': exchange.caps20to10(repo, role='client'),
582 'bundlecaps': exchange.caps20to10(repo, role='client'),
581 'cg': True,
583 'cg': True,
582 }).result()
584 }).result()
583
585
584 fname = bundle = changegroup.writechunks(ui,
586 fname = bundle = changegroup.writechunks(ui,
585 b2._forwardchunks(),
587 b2._forwardchunks(),
586 bundlename)
588 bundlename)
587 else:
589 else:
588 if peer.capable('getbundle'):
590 if peer.capable('getbundle'):
589 with peer.commandexecutor() as e:
591 with peer.commandexecutor() as e:
590 cg = e.callcommand('getbundle', {
592 cg = e.callcommand('getbundle', {
591 'source': 'incoming',
593 'source': 'incoming',
592 'common': common,
594 'common': common,
593 'heads': rheads,
595 'heads': rheads,
594 }).result()
596 }).result()
595 elif onlyheads is None and not peer.capable('changegroupsubset'):
597 elif onlyheads is None and not peer.capable('changegroupsubset'):
596 # compat with older servers when pulling all remote heads
598 # compat with older servers when pulling all remote heads
597
599
598 with peer.commandexecutor() as e:
600 with peer.commandexecutor() as e:
599 cg = e.callcommand('changegroup', {
601 cg = e.callcommand('changegroup', {
600 'nodes': incoming,
602 'nodes': incoming,
601 'source': 'incoming',
603 'source': 'incoming',
602 }).result()
604 }).result()
603
605
604 rheads = None
606 rheads = None
605 else:
607 else:
606 with peer.commandexecutor() as e:
608 with peer.commandexecutor() as e:
607 cg = e.callcommand('changegroupsubset', {
609 cg = e.callcommand('changegroupsubset', {
608 'bases': incoming,
610 'bases': incoming,
609 'heads': rheads,
611 'heads': rheads,
610 'source': 'incoming',
612 'source': 'incoming',
611 }).result()
613 }).result()
612
614
613 if localrepo:
615 if localrepo:
614 bundletype = "HG10BZ"
616 bundletype = "HG10BZ"
615 else:
617 else:
616 bundletype = "HG10UN"
618 bundletype = "HG10UN"
617 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
619 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
618 bundletype)
620 bundletype)
619 # keep written bundle?
621 # keep written bundle?
620 if bundlename:
622 if bundlename:
621 bundle = None
623 bundle = None
622 if not localrepo:
624 if not localrepo:
623 # use the created uncompressed bundlerepo
625 # use the created uncompressed bundlerepo
624 localrepo = bundlerepo = makebundlerepository(repo. baseui,
626 localrepo = bundlerepo = makebundlerepository(repo. baseui,
625 repo.root,
627 repo.root,
626 fname)
628 fname)
627
629
628 # this repo contains local and peer now, so filter out local again
630 # this repo contains local and peer now, so filter out local again
629 common = repo.heads()
631 common = repo.heads()
630 if localrepo:
632 if localrepo:
631 # Part of common may be remotely filtered
633 # Part of common may be remotely filtered
632 # So use an unfiltered version
634 # So use an unfiltered version
633 # The discovery process probably need cleanup to avoid that
635 # The discovery process probably need cleanup to avoid that
634 localrepo = localrepo.unfiltered()
636 localrepo = localrepo.unfiltered()
635
637
636 csets = localrepo.changelog.findmissing(common, rheads)
638 csets = localrepo.changelog.findmissing(common, rheads)
637
639
638 if bundlerepo:
640 if bundlerepo:
639 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
641 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
640
642
641 with peer.commandexecutor() as e:
643 with peer.commandexecutor() as e:
642 remotephases = e.callcommand('listkeys', {
644 remotephases = e.callcommand('listkeys', {
643 'namespace': 'phases',
645 'namespace': 'phases',
644 }).result()
646 }).result()
645
647
646 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
648 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
647 pullop.trmanager = bundletransactionmanager()
649 pullop.trmanager = bundletransactionmanager()
648 exchange._pullapplyphases(pullop, remotephases)
650 exchange._pullapplyphases(pullop, remotephases)
649
651
650 def cleanup():
652 def cleanup():
651 if bundlerepo:
653 if bundlerepo:
652 bundlerepo.close()
654 bundlerepo.close()
653 if bundle:
655 if bundle:
654 os.unlink(bundle)
656 os.unlink(bundle)
655 peer.close()
657 peer.close()
656
658
657 return (localrepo, csets, cleanup)
659 return (localrepo, csets, cleanup)
@@ -1,2742 +1,2737 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
434
435 # We first validate the requirements are known.
435 # We first validate the requirements are known.
436 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
437
437
438 # Then we validate that the known set is reasonable to use together.
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
439 ensurerequirementscompatible(ui, requirements)
440
440
441 # TODO there are unhandled edge cases related to opening repositories with
441 # TODO there are unhandled edge cases related to opening repositories with
442 # shared storage. If storage is shared, we should also test for requirements
442 # shared storage. If storage is shared, we should also test for requirements
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 # that repo, as that repo may load extensions needed to open it. This is a
444 # that repo, as that repo may load extensions needed to open it. This is a
445 # bit complicated because we don't want the other hgrc to overwrite settings
445 # bit complicated because we don't want the other hgrc to overwrite settings
446 # in this hgrc.
446 # in this hgrc.
447 #
447 #
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 # file when sharing repos. But if a requirement is added after the share is
449 # file when sharing repos. But if a requirement is added after the share is
450 # performed, thereby introducing a new requirement for the opener, we may
450 # performed, thereby introducing a new requirement for the opener, we may
451 # will not see that and could encounter a run-time error interacting with
451 # will not see that and could encounter a run-time error interacting with
452 # that shared store since it has an unknown-to-us requirement.
452 # that shared store since it has an unknown-to-us requirement.
453
453
454 # At this point, we know we should be capable of opening the repository.
454 # At this point, we know we should be capable of opening the repository.
455 # Now get on with doing that.
455 # Now get on with doing that.
456
456
457 # The "store" part of the repository holds versioned data. How it is
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
465 sharedpath = hgvfs.join(sharedpath)
466
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
468
469 if not sharedvfs.exists():
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
471 b'directory %s') % sharedvfs.base)
472
472
473 storebasepath = sharedvfs.base
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
474 cachepath = sharedvfs.join(b'cache')
475 else:
475 else:
476 storebasepath = hgvfs.base
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
477 cachepath = hgvfs.join(b'cache')
478
478
479 # The store has changed over time and the exact layout is dictated by
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
480 # requirements. The store interface abstracts differences across all
481 # of them.
481 # of them.
482 store = makestore(requirements, storebasepath,
482 store = makestore(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484 hgvfs.createmode = store.createmode
484 hgvfs.createmode = store.createmode
485
485
486 storevfs = store.vfs
486 storevfs = store.vfs
487 storevfs.options = resolvestorevfsoptions(ui, requirements)
487 storevfs.options = resolvestorevfsoptions(ui, requirements)
488
488
489 # The cache vfs is used to manage cache files.
489 # The cache vfs is used to manage cache files.
490 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
490 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
491 cachevfs.createmode = store.createmode
491 cachevfs.createmode = store.createmode
492
492
493 return localrepository(
493 return localrepository(
494 baseui=baseui,
494 baseui=baseui,
495 ui=ui,
495 ui=ui,
496 origroot=path,
496 origroot=path,
497 wdirvfs=wdirvfs,
497 wdirvfs=wdirvfs,
498 hgvfs=hgvfs,
498 hgvfs=hgvfs,
499 requirements=requirements,
499 requirements=requirements,
500 supportedrequirements=supportedrequirements,
500 supportedrequirements=supportedrequirements,
501 sharedpath=storebasepath,
501 sharedpath=storebasepath,
502 store=store,
502 store=store,
503 cachevfs=cachevfs,
503 cachevfs=cachevfs,
504 intents=intents)
504 intents=intents)
505
505
506 def gathersupportedrequirements(ui):
506 def gathersupportedrequirements(ui):
507 """Determine the complete set of recognized requirements."""
507 """Determine the complete set of recognized requirements."""
508 # Start with all requirements supported by this file.
508 # Start with all requirements supported by this file.
509 supported = set(localrepository._basesupported)
509 supported = set(localrepository._basesupported)
510
510
511 # Execute ``featuresetupfuncs`` entries if they belong to an extension
511 # Execute ``featuresetupfuncs`` entries if they belong to an extension
512 # relevant to this ui instance.
512 # relevant to this ui instance.
513 modules = {m.__name__ for n, m in extensions.extensions(ui)}
513 modules = {m.__name__ for n, m in extensions.extensions(ui)}
514
514
515 for fn in featuresetupfuncs:
515 for fn in featuresetupfuncs:
516 if fn.__module__ in modules:
516 if fn.__module__ in modules:
517 fn(ui, supported)
517 fn(ui, supported)
518
518
519 # Add derived requirements from registered compression engines.
519 # Add derived requirements from registered compression engines.
520 for name in util.compengines:
520 for name in util.compengines:
521 engine = util.compengines[name]
521 engine = util.compengines[name]
522 if engine.revlogheader():
522 if engine.revlogheader():
523 supported.add(b'exp-compression-%s' % name)
523 supported.add(b'exp-compression-%s' % name)
524
524
525 return supported
525 return supported
526
526
527 def ensurerequirementsrecognized(requirements, supported):
527 def ensurerequirementsrecognized(requirements, supported):
528 """Validate that a set of local requirements is recognized.
528 """Validate that a set of local requirements is recognized.
529
529
530 Receives a set of requirements. Raises an ``error.RepoError`` if there
530 Receives a set of requirements. Raises an ``error.RepoError`` if there
531 exists any requirement in that set that currently loaded code doesn't
531 exists any requirement in that set that currently loaded code doesn't
532 recognize.
532 recognize.
533
533
534 Returns a set of supported requirements.
534 Returns a set of supported requirements.
535 """
535 """
536 missing = set()
536 missing = set()
537
537
538 for requirement in requirements:
538 for requirement in requirements:
539 if requirement in supported:
539 if requirement in supported:
540 continue
540 continue
541
541
542 if not requirement or not requirement[0:1].isalnum():
542 if not requirement or not requirement[0:1].isalnum():
543 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
543 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
544
544
545 missing.add(requirement)
545 missing.add(requirement)
546
546
547 if missing:
547 if missing:
548 raise error.RequirementError(
548 raise error.RequirementError(
549 _(b'repository requires features unknown to this Mercurial: %s') %
549 _(b'repository requires features unknown to this Mercurial: %s') %
550 b' '.join(sorted(missing)),
550 b' '.join(sorted(missing)),
551 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
551 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
552 b'for more information'))
552 b'for more information'))
553
553
554 def ensurerequirementscompatible(ui, requirements):
554 def ensurerequirementscompatible(ui, requirements):
555 """Validates that a set of recognized requirements is mutually compatible.
555 """Validates that a set of recognized requirements is mutually compatible.
556
556
557 Some requirements may not be compatible with others or require
557 Some requirements may not be compatible with others or require
558 config options that aren't enabled. This function is called during
558 config options that aren't enabled. This function is called during
559 repository opening to ensure that the set of requirements needed
559 repository opening to ensure that the set of requirements needed
560 to open a repository is sane and compatible with config options.
560 to open a repository is sane and compatible with config options.
561
561
562 Extensions can monkeypatch this function to perform additional
562 Extensions can monkeypatch this function to perform additional
563 checking.
563 checking.
564
564
565 ``error.RepoError`` should be raised on failure.
565 ``error.RepoError`` should be raised on failure.
566 """
566 """
567 if b'exp-sparse' in requirements and not sparse.enabled:
567 if b'exp-sparse' in requirements and not sparse.enabled:
568 raise error.RepoError(_(b'repository is using sparse feature but '
568 raise error.RepoError(_(b'repository is using sparse feature but '
569 b'sparse is not enabled; enable the '
569 b'sparse is not enabled; enable the '
570 b'"sparse" extensions to access'))
570 b'"sparse" extensions to access'))
571
571
572 def makestore(requirements, path, vfstype):
572 def makestore(requirements, path, vfstype):
573 """Construct a storage object for a repository."""
573 """Construct a storage object for a repository."""
574 if b'store' in requirements:
574 if b'store' in requirements:
575 if b'fncache' in requirements:
575 if b'fncache' in requirements:
576 return storemod.fncachestore(path, vfstype,
576 return storemod.fncachestore(path, vfstype,
577 b'dotencode' in requirements)
577 b'dotencode' in requirements)
578
578
579 return storemod.encodedstore(path, vfstype)
579 return storemod.encodedstore(path, vfstype)
580
580
581 return storemod.basicstore(path, vfstype)
581 return storemod.basicstore(path, vfstype)
582
582
583 def resolvestorevfsoptions(ui, requirements):
583 def resolvestorevfsoptions(ui, requirements):
584 """Resolve the options to pass to the store vfs opener.
584 """Resolve the options to pass to the store vfs opener.
585
585
586 The returned dict is used to influence behavior of the storage layer.
586 The returned dict is used to influence behavior of the storage layer.
587 """
587 """
588 options = {}
588 options = {}
589
589
590 if b'treemanifest' in requirements:
590 if b'treemanifest' in requirements:
591 options[b'treemanifest'] = True
591 options[b'treemanifest'] = True
592
592
593 # experimental config: format.manifestcachesize
593 # experimental config: format.manifestcachesize
594 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
594 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
595 if manifestcachesize is not None:
595 if manifestcachesize is not None:
596 options[b'manifestcachesize'] = manifestcachesize
596 options[b'manifestcachesize'] = manifestcachesize
597
597
598 # In the absence of another requirement superseding a revlog-related
598 # In the absence of another requirement superseding a revlog-related
599 # requirement, we have to assume the repo is using revlog version 0.
599 # requirement, we have to assume the repo is using revlog version 0.
600 # This revlog format is super old and we don't bother trying to parse
600 # This revlog format is super old and we don't bother trying to parse
601 # opener options for it because those options wouldn't do anything
601 # opener options for it because those options wouldn't do anything
602 # meaningful on such old repos.
602 # meaningful on such old repos.
603 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
603 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
604 options.update(resolverevlogstorevfsoptions(ui, requirements))
604 options.update(resolverevlogstorevfsoptions(ui, requirements))
605
605
606 return options
606 return options
607
607
608 def resolverevlogstorevfsoptions(ui, requirements):
608 def resolverevlogstorevfsoptions(ui, requirements):
609 """Resolve opener options specific to revlogs."""
609 """Resolve opener options specific to revlogs."""
610
610
611 options = {}
611 options = {}
612
612
613 if b'revlogv1' in requirements:
613 if b'revlogv1' in requirements:
614 options[b'revlogv1'] = True
614 options[b'revlogv1'] = True
615 if REVLOGV2_REQUIREMENT in requirements:
615 if REVLOGV2_REQUIREMENT in requirements:
616 options[b'revlogv2'] = True
616 options[b'revlogv2'] = True
617
617
618 if b'generaldelta' in requirements:
618 if b'generaldelta' in requirements:
619 options[b'generaldelta'] = True
619 options[b'generaldelta'] = True
620
620
621 # experimental config: format.chunkcachesize
621 # experimental config: format.chunkcachesize
622 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
622 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
623 if chunkcachesize is not None:
623 if chunkcachesize is not None:
624 options[b'chunkcachesize'] = chunkcachesize
624 options[b'chunkcachesize'] = chunkcachesize
625
625
626 deltabothparents = ui.configbool(b'storage',
626 deltabothparents = ui.configbool(b'storage',
627 b'revlog.optimize-delta-parent-choice')
627 b'revlog.optimize-delta-parent-choice')
628 options[b'deltabothparents'] = deltabothparents
628 options[b'deltabothparents'] = deltabothparents
629
629
630 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
630 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
631
631
632 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
632 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
633 if 0 <= chainspan:
633 if 0 <= chainspan:
634 options[b'maxdeltachainspan'] = chainspan
634 options[b'maxdeltachainspan'] = chainspan
635
635
636 mmapindexthreshold = ui.configbytes(b'experimental',
636 mmapindexthreshold = ui.configbytes(b'experimental',
637 b'mmapindexthreshold')
637 b'mmapindexthreshold')
638 if mmapindexthreshold is not None:
638 if mmapindexthreshold is not None:
639 options[b'mmapindexthreshold'] = mmapindexthreshold
639 options[b'mmapindexthreshold'] = mmapindexthreshold
640
640
641 withsparseread = ui.configbool(b'experimental', b'sparse-read')
641 withsparseread = ui.configbool(b'experimental', b'sparse-read')
642 srdensitythres = float(ui.config(b'experimental',
642 srdensitythres = float(ui.config(b'experimental',
643 b'sparse-read.density-threshold'))
643 b'sparse-read.density-threshold'))
644 srmingapsize = ui.configbytes(b'experimental',
644 srmingapsize = ui.configbytes(b'experimental',
645 b'sparse-read.min-gap-size')
645 b'sparse-read.min-gap-size')
646 options[b'with-sparse-read'] = withsparseread
646 options[b'with-sparse-read'] = withsparseread
647 options[b'sparse-read-density-threshold'] = srdensitythres
647 options[b'sparse-read-density-threshold'] = srdensitythres
648 options[b'sparse-read-min-gap-size'] = srmingapsize
648 options[b'sparse-read-min-gap-size'] = srmingapsize
649
649
650 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
650 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
651 options[b'sparse-revlog'] = sparserevlog
651 options[b'sparse-revlog'] = sparserevlog
652 if sparserevlog:
652 if sparserevlog:
653 options[b'generaldelta'] = True
653 options[b'generaldelta'] = True
654
654
655 maxchainlen = None
655 maxchainlen = None
656 if sparserevlog:
656 if sparserevlog:
657 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
657 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
658 # experimental config: format.maxchainlen
658 # experimental config: format.maxchainlen
659 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
659 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
660 if maxchainlen is not None:
660 if maxchainlen is not None:
661 options[b'maxchainlen'] = maxchainlen
661 options[b'maxchainlen'] = maxchainlen
662
662
663 for r in requirements:
663 for r in requirements:
664 if r.startswith(b'exp-compression-'):
664 if r.startswith(b'exp-compression-'):
665 options[b'compengine'] = r[len(b'exp-compression-'):]
665 options[b'compengine'] = r[len(b'exp-compression-'):]
666
666
667 return options
667 return options
668
668
669 @interfaceutil.implementer(repository.completelocalrepository)
669 @interfaceutil.implementer(repository.completelocalrepository)
670 class localrepository(object):
670 class localrepository(object):
671
671
672 # obsolete experimental requirements:
672 # obsolete experimental requirements:
673 # - manifestv2: An experimental new manifest format that allowed
673 # - manifestv2: An experimental new manifest format that allowed
674 # for stem compression of long paths. Experiment ended up not
674 # for stem compression of long paths. Experiment ended up not
675 # being successful (repository sizes went up due to worse delta
675 # being successful (repository sizes went up due to worse delta
676 # chains), and the code was deleted in 4.6.
676 # chains), and the code was deleted in 4.6.
677 supportedformats = {
677 supportedformats = {
678 'revlogv1',
678 'revlogv1',
679 'generaldelta',
679 'generaldelta',
680 'treemanifest',
680 'treemanifest',
681 REVLOGV2_REQUIREMENT,
681 REVLOGV2_REQUIREMENT,
682 SPARSEREVLOG_REQUIREMENT,
682 SPARSEREVLOG_REQUIREMENT,
683 }
683 }
684 _basesupported = supportedformats | {
684 _basesupported = supportedformats | {
685 'store',
685 'store',
686 'fncache',
686 'fncache',
687 'shared',
687 'shared',
688 'relshared',
688 'relshared',
689 'dotencode',
689 'dotencode',
690 'exp-sparse',
690 'exp-sparse',
691 'internal-phase'
691 'internal-phase'
692 }
692 }
693
693
694 # list of prefix for file which can be written without 'wlock'
694 # list of prefix for file which can be written without 'wlock'
695 # Extensions should extend this list when needed
695 # Extensions should extend this list when needed
696 _wlockfreeprefix = {
696 _wlockfreeprefix = {
697 # We migh consider requiring 'wlock' for the next
697 # We migh consider requiring 'wlock' for the next
698 # two, but pretty much all the existing code assume
698 # two, but pretty much all the existing code assume
699 # wlock is not needed so we keep them excluded for
699 # wlock is not needed so we keep them excluded for
700 # now.
700 # now.
701 'hgrc',
701 'hgrc',
702 'requires',
702 'requires',
703 # XXX cache is a complicatged business someone
703 # XXX cache is a complicatged business someone
704 # should investigate this in depth at some point
704 # should investigate this in depth at some point
705 'cache/',
705 'cache/',
706 # XXX shouldn't be dirstate covered by the wlock?
706 # XXX shouldn't be dirstate covered by the wlock?
707 'dirstate',
707 'dirstate',
708 # XXX bisect was still a bit too messy at the time
708 # XXX bisect was still a bit too messy at the time
709 # this changeset was introduced. Someone should fix
709 # this changeset was introduced. Someone should fix
710 # the remainig bit and drop this line
710 # the remainig bit and drop this line
711 'bisect.state',
711 'bisect.state',
712 }
712 }
713
713
714 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
714 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
715 supportedrequirements, sharedpath, store, cachevfs,
715 supportedrequirements, sharedpath, store, cachevfs,
716 intents=None):
716 intents=None):
717 """Create a new local repository instance.
717 """Create a new local repository instance.
718
718
719 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
719 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
720 or ``localrepo.makelocalrepository()`` for obtaining a new repository
720 or ``localrepo.makelocalrepository()`` for obtaining a new repository
721 object.
721 object.
722
722
723 Arguments:
723 Arguments:
724
724
725 baseui
725 baseui
726 ``ui.ui`` instance that ``ui`` argument was based off of.
726 ``ui.ui`` instance that ``ui`` argument was based off of.
727
727
728 ui
728 ui
729 ``ui.ui`` instance for use by the repository.
729 ``ui.ui`` instance for use by the repository.
730
730
731 origroot
731 origroot
732 ``bytes`` path to working directory root of this repository.
732 ``bytes`` path to working directory root of this repository.
733
733
734 wdirvfs
734 wdirvfs
735 ``vfs.vfs`` rooted at the working directory.
735 ``vfs.vfs`` rooted at the working directory.
736
736
737 hgvfs
737 hgvfs
738 ``vfs.vfs`` rooted at .hg/
738 ``vfs.vfs`` rooted at .hg/
739
739
740 requirements
740 requirements
741 ``set`` of bytestrings representing repository opening requirements.
741 ``set`` of bytestrings representing repository opening requirements.
742
742
743 supportedrequirements
743 supportedrequirements
744 ``set`` of bytestrings representing repository requirements that we
744 ``set`` of bytestrings representing repository requirements that we
745 know how to open. May be a supetset of ``requirements``.
745 know how to open. May be a supetset of ``requirements``.
746
746
747 sharedpath
747 sharedpath
748 ``bytes`` Defining path to storage base directory. Points to a
748 ``bytes`` Defining path to storage base directory. Points to a
749 ``.hg/`` directory somewhere.
749 ``.hg/`` directory somewhere.
750
750
751 store
751 store
752 ``store.basicstore`` (or derived) instance providing access to
752 ``store.basicstore`` (or derived) instance providing access to
753 versioned storage.
753 versioned storage.
754
754
755 cachevfs
755 cachevfs
756 ``vfs.vfs`` used for cache files.
756 ``vfs.vfs`` used for cache files.
757
757
758 intents
758 intents
759 ``set`` of system strings indicating what this repo will be used
759 ``set`` of system strings indicating what this repo will be used
760 for.
760 for.
761 """
761 """
762 self.baseui = baseui
762 self.baseui = baseui
763 self.ui = ui
763 self.ui = ui
764 self.origroot = origroot
764 self.origroot = origroot
765 # vfs rooted at working directory.
765 # vfs rooted at working directory.
766 self.wvfs = wdirvfs
766 self.wvfs = wdirvfs
767 self.root = wdirvfs.base
767 self.root = wdirvfs.base
768 # vfs rooted at .hg/. Used to access most non-store paths.
768 # vfs rooted at .hg/. Used to access most non-store paths.
769 self.vfs = hgvfs
769 self.vfs = hgvfs
770 self.path = hgvfs.base
770 self.path = hgvfs.base
771 self.requirements = requirements
771 self.requirements = requirements
772 self.supported = supportedrequirements
772 self.supported = supportedrequirements
773 self.sharedpath = sharedpath
773 self.sharedpath = sharedpath
774 self.store = store
774 self.store = store
775 self.cachevfs = cachevfs
775 self.cachevfs = cachevfs
776
776
777 self.filtername = None
777 self.filtername = None
778
778
779 if (self.ui.configbool('devel', 'all-warnings') or
779 if (self.ui.configbool('devel', 'all-warnings') or
780 self.ui.configbool('devel', 'check-locks')):
780 self.ui.configbool('devel', 'check-locks')):
781 self.vfs.audit = self._getvfsward(self.vfs.audit)
781 self.vfs.audit = self._getvfsward(self.vfs.audit)
782 # A list of callback to shape the phase if no data were found.
782 # A list of callback to shape the phase if no data were found.
783 # Callback are in the form: func(repo, roots) --> processed root.
783 # Callback are in the form: func(repo, roots) --> processed root.
784 # This list it to be filled by extension during repo setup
784 # This list it to be filled by extension during repo setup
785 self._phasedefaults = []
785 self._phasedefaults = []
786
786
787 color.setup(self.ui)
787 color.setup(self.ui)
788
788
789 self.spath = self.store.path
789 self.spath = self.store.path
790 self.svfs = self.store.vfs
790 self.svfs = self.store.vfs
791 self.sjoin = self.store.join
791 self.sjoin = self.store.join
792 if (self.ui.configbool('devel', 'all-warnings') or
792 if (self.ui.configbool('devel', 'all-warnings') or
793 self.ui.configbool('devel', 'check-locks')):
793 self.ui.configbool('devel', 'check-locks')):
794 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
794 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
795 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
795 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
796 else: # standard vfs
796 else: # standard vfs
797 self.svfs.audit = self._getsvfsward(self.svfs.audit)
797 self.svfs.audit = self._getsvfsward(self.svfs.audit)
798
798
799 self._dirstatevalidatewarned = False
799 self._dirstatevalidatewarned = False
800
800
801 self._branchcaches = {}
801 self._branchcaches = {}
802 self._revbranchcache = None
802 self._revbranchcache = None
803 self._filterpats = {}
803 self._filterpats = {}
804 self._datafilters = {}
804 self._datafilters = {}
805 self._transref = self._lockref = self._wlockref = None
805 self._transref = self._lockref = self._wlockref = None
806
806
807 # A cache for various files under .hg/ that tracks file changes,
807 # A cache for various files under .hg/ that tracks file changes,
808 # (used by the filecache decorator)
808 # (used by the filecache decorator)
809 #
809 #
810 # Maps a property name to its util.filecacheentry
810 # Maps a property name to its util.filecacheentry
811 self._filecache = {}
811 self._filecache = {}
812
812
813 # hold sets of revision to be filtered
813 # hold sets of revision to be filtered
814 # should be cleared when something might have changed the filter value:
814 # should be cleared when something might have changed the filter value:
815 # - new changesets,
815 # - new changesets,
816 # - phase change,
816 # - phase change,
817 # - new obsolescence marker,
817 # - new obsolescence marker,
818 # - working directory parent change,
818 # - working directory parent change,
819 # - bookmark changes
819 # - bookmark changes
820 self.filteredrevcache = {}
820 self.filteredrevcache = {}
821
821
822 # post-dirstate-status hooks
822 # post-dirstate-status hooks
823 self._postdsstatus = []
823 self._postdsstatus = []
824
824
825 # generic mapping between names and nodes
825 # generic mapping between names and nodes
826 self.names = namespaces.namespaces()
826 self.names = namespaces.namespaces()
827
827
828 # Key to signature value.
828 # Key to signature value.
829 self._sparsesignaturecache = {}
829 self._sparsesignaturecache = {}
830 # Signature to cached matcher instance.
830 # Signature to cached matcher instance.
831 self._sparsematchercache = {}
831 self._sparsematchercache = {}
832
832
833 def _getvfsward(self, origfunc):
833 def _getvfsward(self, origfunc):
834 """build a ward for self.vfs"""
834 """build a ward for self.vfs"""
835 rref = weakref.ref(self)
835 rref = weakref.ref(self)
836 def checkvfs(path, mode=None):
836 def checkvfs(path, mode=None):
837 ret = origfunc(path, mode=mode)
837 ret = origfunc(path, mode=mode)
838 repo = rref()
838 repo = rref()
839 if (repo is None
839 if (repo is None
840 or not util.safehasattr(repo, '_wlockref')
840 or not util.safehasattr(repo, '_wlockref')
841 or not util.safehasattr(repo, '_lockref')):
841 or not util.safehasattr(repo, '_lockref')):
842 return
842 return
843 if mode in (None, 'r', 'rb'):
843 if mode in (None, 'r', 'rb'):
844 return
844 return
845 if path.startswith(repo.path):
845 if path.startswith(repo.path):
846 # truncate name relative to the repository (.hg)
846 # truncate name relative to the repository (.hg)
847 path = path[len(repo.path) + 1:]
847 path = path[len(repo.path) + 1:]
848 if path.startswith('cache/'):
848 if path.startswith('cache/'):
849 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
849 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
850 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
850 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
851 if path.startswith('journal.'):
851 if path.startswith('journal.'):
852 # journal is covered by 'lock'
852 # journal is covered by 'lock'
853 if repo._currentlock(repo._lockref) is None:
853 if repo._currentlock(repo._lockref) is None:
854 repo.ui.develwarn('write with no lock: "%s"' % path,
854 repo.ui.develwarn('write with no lock: "%s"' % path,
855 stacklevel=2, config='check-locks')
855 stacklevel=2, config='check-locks')
856 elif repo._currentlock(repo._wlockref) is None:
856 elif repo._currentlock(repo._wlockref) is None:
857 # rest of vfs files are covered by 'wlock'
857 # rest of vfs files are covered by 'wlock'
858 #
858 #
859 # exclude special files
859 # exclude special files
860 for prefix in self._wlockfreeprefix:
860 for prefix in self._wlockfreeprefix:
861 if path.startswith(prefix):
861 if path.startswith(prefix):
862 return
862 return
863 repo.ui.develwarn('write with no wlock: "%s"' % path,
863 repo.ui.develwarn('write with no wlock: "%s"' % path,
864 stacklevel=2, config='check-locks')
864 stacklevel=2, config='check-locks')
865 return ret
865 return ret
866 return checkvfs
866 return checkvfs
867
867
868 def _getsvfsward(self, origfunc):
868 def _getsvfsward(self, origfunc):
869 """build a ward for self.svfs"""
869 """build a ward for self.svfs"""
870 rref = weakref.ref(self)
870 rref = weakref.ref(self)
871 def checksvfs(path, mode=None):
871 def checksvfs(path, mode=None):
872 ret = origfunc(path, mode=mode)
872 ret = origfunc(path, mode=mode)
873 repo = rref()
873 repo = rref()
874 if repo is None or not util.safehasattr(repo, '_lockref'):
874 if repo is None or not util.safehasattr(repo, '_lockref'):
875 return
875 return
876 if mode in (None, 'r', 'rb'):
876 if mode in (None, 'r', 'rb'):
877 return
877 return
878 if path.startswith(repo.sharedpath):
878 if path.startswith(repo.sharedpath):
879 # truncate name relative to the repository (.hg)
879 # truncate name relative to the repository (.hg)
880 path = path[len(repo.sharedpath) + 1:]
880 path = path[len(repo.sharedpath) + 1:]
881 if repo._currentlock(repo._lockref) is None:
881 if repo._currentlock(repo._lockref) is None:
882 repo.ui.develwarn('write with no lock: "%s"' % path,
882 repo.ui.develwarn('write with no lock: "%s"' % path,
883 stacklevel=3)
883 stacklevel=3)
884 return ret
884 return ret
885 return checksvfs
885 return checksvfs
886
886
887 def close(self):
887 def close(self):
888 self._writecaches()
888 self._writecaches()
889
889
890 def _writecaches(self):
890 def _writecaches(self):
891 if self._revbranchcache:
891 if self._revbranchcache:
892 self._revbranchcache.write()
892 self._revbranchcache.write()
893
893
894 def _restrictcapabilities(self, caps):
894 def _restrictcapabilities(self, caps):
895 if self.ui.configbool('experimental', 'bundle2-advertise'):
895 if self.ui.configbool('experimental', 'bundle2-advertise'):
896 caps = set(caps)
896 caps = set(caps)
897 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
897 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
898 role='client'))
898 role='client'))
899 caps.add('bundle2=' + urlreq.quote(capsblob))
899 caps.add('bundle2=' + urlreq.quote(capsblob))
900 return caps
900 return caps
901
901
902 def _writerequirements(self):
902 def _writerequirements(self):
903 scmutil.writerequires(self.vfs, self.requirements)
903 scmutil.writerequires(self.vfs, self.requirements)
904
904
905 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
905 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
906 # self -> auditor -> self._checknested -> self
906 # self -> auditor -> self._checknested -> self
907
907
908 @property
908 @property
909 def auditor(self):
909 def auditor(self):
910 # This is only used by context.workingctx.match in order to
910 # This is only used by context.workingctx.match in order to
911 # detect files in subrepos.
911 # detect files in subrepos.
912 return pathutil.pathauditor(self.root, callback=self._checknested)
912 return pathutil.pathauditor(self.root, callback=self._checknested)
913
913
914 @property
914 @property
915 def nofsauditor(self):
915 def nofsauditor(self):
916 # This is only used by context.basectx.match in order to detect
916 # This is only used by context.basectx.match in order to detect
917 # files in subrepos.
917 # files in subrepos.
918 return pathutil.pathauditor(self.root, callback=self._checknested,
918 return pathutil.pathauditor(self.root, callback=self._checknested,
919 realfs=False, cached=True)
919 realfs=False, cached=True)
920
920
921 def _checknested(self, path):
921 def _checknested(self, path):
922 """Determine if path is a legal nested repository."""
922 """Determine if path is a legal nested repository."""
923 if not path.startswith(self.root):
923 if not path.startswith(self.root):
924 return False
924 return False
925 subpath = path[len(self.root) + 1:]
925 subpath = path[len(self.root) + 1:]
926 normsubpath = util.pconvert(subpath)
926 normsubpath = util.pconvert(subpath)
927
927
928 # XXX: Checking against the current working copy is wrong in
928 # XXX: Checking against the current working copy is wrong in
929 # the sense that it can reject things like
929 # the sense that it can reject things like
930 #
930 #
931 # $ hg cat -r 10 sub/x.txt
931 # $ hg cat -r 10 sub/x.txt
932 #
932 #
933 # if sub/ is no longer a subrepository in the working copy
933 # if sub/ is no longer a subrepository in the working copy
934 # parent revision.
934 # parent revision.
935 #
935 #
936 # However, it can of course also allow things that would have
936 # However, it can of course also allow things that would have
937 # been rejected before, such as the above cat command if sub/
937 # been rejected before, such as the above cat command if sub/
938 # is a subrepository now, but was a normal directory before.
938 # is a subrepository now, but was a normal directory before.
939 # The old path auditor would have rejected by mistake since it
939 # The old path auditor would have rejected by mistake since it
940 # panics when it sees sub/.hg/.
940 # panics when it sees sub/.hg/.
941 #
941 #
942 # All in all, checking against the working copy seems sensible
942 # All in all, checking against the working copy seems sensible
943 # since we want to prevent access to nested repositories on
943 # since we want to prevent access to nested repositories on
944 # the filesystem *now*.
944 # the filesystem *now*.
945 ctx = self[None]
945 ctx = self[None]
946 parts = util.splitpath(subpath)
946 parts = util.splitpath(subpath)
947 while parts:
947 while parts:
948 prefix = '/'.join(parts)
948 prefix = '/'.join(parts)
949 if prefix in ctx.substate:
949 if prefix in ctx.substate:
950 if prefix == normsubpath:
950 if prefix == normsubpath:
951 return True
951 return True
952 else:
952 else:
953 sub = ctx.sub(prefix)
953 sub = ctx.sub(prefix)
954 return sub.checknested(subpath[len(prefix) + 1:])
954 return sub.checknested(subpath[len(prefix) + 1:])
955 else:
955 else:
956 parts.pop()
956 parts.pop()
957 return False
957 return False
958
958
959 def peer(self):
959 def peer(self):
960 return localpeer(self) # not cached to avoid reference cycle
960 return localpeer(self) # not cached to avoid reference cycle
961
961
962 def unfiltered(self):
962 def unfiltered(self):
963 """Return unfiltered version of the repository
963 """Return unfiltered version of the repository
964
964
965 Intended to be overwritten by filtered repo."""
965 Intended to be overwritten by filtered repo."""
966 return self
966 return self
967
967
968 def filtered(self, name, visibilityexceptions=None):
968 def filtered(self, name, visibilityexceptions=None):
969 """Return a filtered version of a repository"""
969 """Return a filtered version of a repository"""
970 cls = repoview.newtype(self.unfiltered().__class__)
970 cls = repoview.newtype(self.unfiltered().__class__)
971 return cls(self, name, visibilityexceptions)
971 return cls(self, name, visibilityexceptions)
972
972
973 @repofilecache('bookmarks', 'bookmarks.current')
973 @repofilecache('bookmarks', 'bookmarks.current')
974 def _bookmarks(self):
974 def _bookmarks(self):
975 return bookmarks.bmstore(self)
975 return bookmarks.bmstore(self)
976
976
977 @property
977 @property
978 def _activebookmark(self):
978 def _activebookmark(self):
979 return self._bookmarks.active
979 return self._bookmarks.active
980
980
981 # _phasesets depend on changelog. what we need is to call
981 # _phasesets depend on changelog. what we need is to call
982 # _phasecache.invalidate() if '00changelog.i' was changed, but it
982 # _phasecache.invalidate() if '00changelog.i' was changed, but it
983 # can't be easily expressed in filecache mechanism.
983 # can't be easily expressed in filecache mechanism.
984 @storecache('phaseroots', '00changelog.i')
984 @storecache('phaseroots', '00changelog.i')
985 def _phasecache(self):
985 def _phasecache(self):
986 return phases.phasecache(self, self._phasedefaults)
986 return phases.phasecache(self, self._phasedefaults)
987
987
988 @storecache('obsstore')
988 @storecache('obsstore')
989 def obsstore(self):
989 def obsstore(self):
990 return obsolete.makestore(self.ui, self)
990 return obsolete.makestore(self.ui, self)
991
991
992 @storecache('00changelog.i')
992 @storecache('00changelog.i')
993 def changelog(self):
993 def changelog(self):
994 return changelog.changelog(self.svfs,
994 return changelog.changelog(self.svfs,
995 trypending=txnutil.mayhavepending(self.root))
995 trypending=txnutil.mayhavepending(self.root))
996
996
997 def _constructmanifest(self):
998 # This is a temporary function while we migrate from manifest to
999 # manifestlog. It allows bundlerepo and unionrepo to intercept the
1000 # manifest creation.
1001 return manifest.manifestrevlog(self.svfs)
1002
1003 @storecache('00manifest.i')
997 @storecache('00manifest.i')
1004 def manifestlog(self):
998 def manifestlog(self):
1005 return manifest.manifestlog(self.svfs, self)
999 rootstore = manifest.manifestrevlog(self.svfs)
1000 return manifest.manifestlog(self.svfs, self, rootstore)
1006
1001
1007 @repofilecache('dirstate')
1002 @repofilecache('dirstate')
1008 def dirstate(self):
1003 def dirstate(self):
1009 return self._makedirstate()
1004 return self._makedirstate()
1010
1005
1011 def _makedirstate(self):
1006 def _makedirstate(self):
1012 """Extension point for wrapping the dirstate per-repo."""
1007 """Extension point for wrapping the dirstate per-repo."""
1013 sparsematchfn = lambda: sparse.matcher(self)
1008 sparsematchfn = lambda: sparse.matcher(self)
1014
1009
1015 return dirstate.dirstate(self.vfs, self.ui, self.root,
1010 return dirstate.dirstate(self.vfs, self.ui, self.root,
1016 self._dirstatevalidate, sparsematchfn)
1011 self._dirstatevalidate, sparsematchfn)
1017
1012
1018 def _dirstatevalidate(self, node):
1013 def _dirstatevalidate(self, node):
1019 try:
1014 try:
1020 self.changelog.rev(node)
1015 self.changelog.rev(node)
1021 return node
1016 return node
1022 except error.LookupError:
1017 except error.LookupError:
1023 if not self._dirstatevalidatewarned:
1018 if not self._dirstatevalidatewarned:
1024 self._dirstatevalidatewarned = True
1019 self._dirstatevalidatewarned = True
1025 self.ui.warn(_("warning: ignoring unknown"
1020 self.ui.warn(_("warning: ignoring unknown"
1026 " working parent %s!\n") % short(node))
1021 " working parent %s!\n") % short(node))
1027 return nullid
1022 return nullid
1028
1023
1029 @storecache(narrowspec.FILENAME)
1024 @storecache(narrowspec.FILENAME)
1030 def narrowpats(self):
1025 def narrowpats(self):
1031 """matcher patterns for this repository's narrowspec
1026 """matcher patterns for this repository's narrowspec
1032
1027
1033 A tuple of (includes, excludes).
1028 A tuple of (includes, excludes).
1034 """
1029 """
1035 return narrowspec.load(self)
1030 return narrowspec.load(self)
1036
1031
1037 @storecache(narrowspec.FILENAME)
1032 @storecache(narrowspec.FILENAME)
1038 def _narrowmatch(self):
1033 def _narrowmatch(self):
1039 if repository.NARROW_REQUIREMENT not in self.requirements:
1034 if repository.NARROW_REQUIREMENT not in self.requirements:
1040 return matchmod.always(self.root, '')
1035 return matchmod.always(self.root, '')
1041 include, exclude = self.narrowpats
1036 include, exclude = self.narrowpats
1042 return narrowspec.match(self.root, include=include, exclude=exclude)
1037 return narrowspec.match(self.root, include=include, exclude=exclude)
1043
1038
1044 # TODO(martinvonz): make this property-like instead?
1039 # TODO(martinvonz): make this property-like instead?
1045 def narrowmatch(self):
1040 def narrowmatch(self):
1046 return self._narrowmatch
1041 return self._narrowmatch
1047
1042
1048 def setnarrowpats(self, newincludes, newexcludes):
1043 def setnarrowpats(self, newincludes, newexcludes):
1049 narrowspec.save(self, newincludes, newexcludes)
1044 narrowspec.save(self, newincludes, newexcludes)
1050 self.invalidate(clearfilecache=True)
1045 self.invalidate(clearfilecache=True)
1051
1046
1052 def __getitem__(self, changeid):
1047 def __getitem__(self, changeid):
1053 if changeid is None:
1048 if changeid is None:
1054 return context.workingctx(self)
1049 return context.workingctx(self)
1055 if isinstance(changeid, context.basectx):
1050 if isinstance(changeid, context.basectx):
1056 return changeid
1051 return changeid
1057 if isinstance(changeid, slice):
1052 if isinstance(changeid, slice):
1058 # wdirrev isn't contiguous so the slice shouldn't include it
1053 # wdirrev isn't contiguous so the slice shouldn't include it
1059 return [context.changectx(self, i)
1054 return [context.changectx(self, i)
1060 for i in pycompat.xrange(*changeid.indices(len(self)))
1055 for i in pycompat.xrange(*changeid.indices(len(self)))
1061 if i not in self.changelog.filteredrevs]
1056 if i not in self.changelog.filteredrevs]
1062 try:
1057 try:
1063 return context.changectx(self, changeid)
1058 return context.changectx(self, changeid)
1064 except error.WdirUnsupported:
1059 except error.WdirUnsupported:
1065 return context.workingctx(self)
1060 return context.workingctx(self)
1066
1061
1067 def __contains__(self, changeid):
1062 def __contains__(self, changeid):
1068 """True if the given changeid exists
1063 """True if the given changeid exists
1069
1064
1070 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1065 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1071 specified.
1066 specified.
1072 """
1067 """
1073 try:
1068 try:
1074 self[changeid]
1069 self[changeid]
1075 return True
1070 return True
1076 except error.RepoLookupError:
1071 except error.RepoLookupError:
1077 return False
1072 return False
1078
1073
1079 def __nonzero__(self):
1074 def __nonzero__(self):
1080 return True
1075 return True
1081
1076
1082 __bool__ = __nonzero__
1077 __bool__ = __nonzero__
1083
1078
1084 def __len__(self):
1079 def __len__(self):
1085 # no need to pay the cost of repoview.changelog
1080 # no need to pay the cost of repoview.changelog
1086 unfi = self.unfiltered()
1081 unfi = self.unfiltered()
1087 return len(unfi.changelog)
1082 return len(unfi.changelog)
1088
1083
1089 def __iter__(self):
1084 def __iter__(self):
1090 return iter(self.changelog)
1085 return iter(self.changelog)
1091
1086
1092 def revs(self, expr, *args):
1087 def revs(self, expr, *args):
1093 '''Find revisions matching a revset.
1088 '''Find revisions matching a revset.
1094
1089
1095 The revset is specified as a string ``expr`` that may contain
1090 The revset is specified as a string ``expr`` that may contain
1096 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1091 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1097
1092
1098 Revset aliases from the configuration are not expanded. To expand
1093 Revset aliases from the configuration are not expanded. To expand
1099 user aliases, consider calling ``scmutil.revrange()`` or
1094 user aliases, consider calling ``scmutil.revrange()`` or
1100 ``repo.anyrevs([expr], user=True)``.
1095 ``repo.anyrevs([expr], user=True)``.
1101
1096
1102 Returns a revset.abstractsmartset, which is a list-like interface
1097 Returns a revset.abstractsmartset, which is a list-like interface
1103 that contains integer revisions.
1098 that contains integer revisions.
1104 '''
1099 '''
1105 expr = revsetlang.formatspec(expr, *args)
1100 expr = revsetlang.formatspec(expr, *args)
1106 m = revset.match(None, expr)
1101 m = revset.match(None, expr)
1107 return m(self)
1102 return m(self)
1108
1103
1109 def set(self, expr, *args):
1104 def set(self, expr, *args):
1110 '''Find revisions matching a revset and emit changectx instances.
1105 '''Find revisions matching a revset and emit changectx instances.
1111
1106
1112 This is a convenience wrapper around ``revs()`` that iterates the
1107 This is a convenience wrapper around ``revs()`` that iterates the
1113 result and is a generator of changectx instances.
1108 result and is a generator of changectx instances.
1114
1109
1115 Revset aliases from the configuration are not expanded. To expand
1110 Revset aliases from the configuration are not expanded. To expand
1116 user aliases, consider calling ``scmutil.revrange()``.
1111 user aliases, consider calling ``scmutil.revrange()``.
1117 '''
1112 '''
1118 for r in self.revs(expr, *args):
1113 for r in self.revs(expr, *args):
1119 yield self[r]
1114 yield self[r]
1120
1115
1121 def anyrevs(self, specs, user=False, localalias=None):
1116 def anyrevs(self, specs, user=False, localalias=None):
1122 '''Find revisions matching one of the given revsets.
1117 '''Find revisions matching one of the given revsets.
1123
1118
1124 Revset aliases from the configuration are not expanded by default. To
1119 Revset aliases from the configuration are not expanded by default. To
1125 expand user aliases, specify ``user=True``. To provide some local
1120 expand user aliases, specify ``user=True``. To provide some local
1126 definitions overriding user aliases, set ``localalias`` to
1121 definitions overriding user aliases, set ``localalias`` to
1127 ``{name: definitionstring}``.
1122 ``{name: definitionstring}``.
1128 '''
1123 '''
1129 if user:
1124 if user:
1130 m = revset.matchany(self.ui, specs,
1125 m = revset.matchany(self.ui, specs,
1131 lookup=revset.lookupfn(self),
1126 lookup=revset.lookupfn(self),
1132 localalias=localalias)
1127 localalias=localalias)
1133 else:
1128 else:
1134 m = revset.matchany(None, specs, localalias=localalias)
1129 m = revset.matchany(None, specs, localalias=localalias)
1135 return m(self)
1130 return m(self)
1136
1131
1137 def url(self):
1132 def url(self):
1138 return 'file:' + self.root
1133 return 'file:' + self.root
1139
1134
1140 def hook(self, name, throw=False, **args):
1135 def hook(self, name, throw=False, **args):
1141 """Call a hook, passing this repo instance.
1136 """Call a hook, passing this repo instance.
1142
1137
1143 This a convenience method to aid invoking hooks. Extensions likely
1138 This a convenience method to aid invoking hooks. Extensions likely
1144 won't call this unless they have registered a custom hook or are
1139 won't call this unless they have registered a custom hook or are
1145 replacing code that is expected to call a hook.
1140 replacing code that is expected to call a hook.
1146 """
1141 """
1147 return hook.hook(self.ui, self, name, throw, **args)
1142 return hook.hook(self.ui, self, name, throw, **args)
1148
1143
1149 @filteredpropertycache
1144 @filteredpropertycache
1150 def _tagscache(self):
1145 def _tagscache(self):
1151 '''Returns a tagscache object that contains various tags related
1146 '''Returns a tagscache object that contains various tags related
1152 caches.'''
1147 caches.'''
1153
1148
1154 # This simplifies its cache management by having one decorated
1149 # This simplifies its cache management by having one decorated
1155 # function (this one) and the rest simply fetch things from it.
1150 # function (this one) and the rest simply fetch things from it.
1156 class tagscache(object):
1151 class tagscache(object):
1157 def __init__(self):
1152 def __init__(self):
1158 # These two define the set of tags for this repository. tags
1153 # These two define the set of tags for this repository. tags
1159 # maps tag name to node; tagtypes maps tag name to 'global' or
1154 # maps tag name to node; tagtypes maps tag name to 'global' or
1160 # 'local'. (Global tags are defined by .hgtags across all
1155 # 'local'. (Global tags are defined by .hgtags across all
1161 # heads, and local tags are defined in .hg/localtags.)
1156 # heads, and local tags are defined in .hg/localtags.)
1162 # They constitute the in-memory cache of tags.
1157 # They constitute the in-memory cache of tags.
1163 self.tags = self.tagtypes = None
1158 self.tags = self.tagtypes = None
1164
1159
1165 self.nodetagscache = self.tagslist = None
1160 self.nodetagscache = self.tagslist = None
1166
1161
1167 cache = tagscache()
1162 cache = tagscache()
1168 cache.tags, cache.tagtypes = self._findtags()
1163 cache.tags, cache.tagtypes = self._findtags()
1169
1164
1170 return cache
1165 return cache
1171
1166
1172 def tags(self):
1167 def tags(self):
1173 '''return a mapping of tag to node'''
1168 '''return a mapping of tag to node'''
1174 t = {}
1169 t = {}
1175 if self.changelog.filteredrevs:
1170 if self.changelog.filteredrevs:
1176 tags, tt = self._findtags()
1171 tags, tt = self._findtags()
1177 else:
1172 else:
1178 tags = self._tagscache.tags
1173 tags = self._tagscache.tags
1179 for k, v in tags.iteritems():
1174 for k, v in tags.iteritems():
1180 try:
1175 try:
1181 # ignore tags to unknown nodes
1176 # ignore tags to unknown nodes
1182 self.changelog.rev(v)
1177 self.changelog.rev(v)
1183 t[k] = v
1178 t[k] = v
1184 except (error.LookupError, ValueError):
1179 except (error.LookupError, ValueError):
1185 pass
1180 pass
1186 return t
1181 return t
1187
1182
1188 def _findtags(self):
1183 def _findtags(self):
1189 '''Do the hard work of finding tags. Return a pair of dicts
1184 '''Do the hard work of finding tags. Return a pair of dicts
1190 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1185 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1191 maps tag name to a string like \'global\' or \'local\'.
1186 maps tag name to a string like \'global\' or \'local\'.
1192 Subclasses or extensions are free to add their own tags, but
1187 Subclasses or extensions are free to add their own tags, but
1193 should be aware that the returned dicts will be retained for the
1188 should be aware that the returned dicts will be retained for the
1194 duration of the localrepo object.'''
1189 duration of the localrepo object.'''
1195
1190
1196 # XXX what tagtype should subclasses/extensions use? Currently
1191 # XXX what tagtype should subclasses/extensions use? Currently
1197 # mq and bookmarks add tags, but do not set the tagtype at all.
1192 # mq and bookmarks add tags, but do not set the tagtype at all.
1198 # Should each extension invent its own tag type? Should there
1193 # Should each extension invent its own tag type? Should there
1199 # be one tagtype for all such "virtual" tags? Or is the status
1194 # be one tagtype for all such "virtual" tags? Or is the status
1200 # quo fine?
1195 # quo fine?
1201
1196
1202
1197
1203 # map tag name to (node, hist)
1198 # map tag name to (node, hist)
1204 alltags = tagsmod.findglobaltags(self.ui, self)
1199 alltags = tagsmod.findglobaltags(self.ui, self)
1205 # map tag name to tag type
1200 # map tag name to tag type
1206 tagtypes = dict((tag, 'global') for tag in alltags)
1201 tagtypes = dict((tag, 'global') for tag in alltags)
1207
1202
1208 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1203 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1209
1204
1210 # Build the return dicts. Have to re-encode tag names because
1205 # Build the return dicts. Have to re-encode tag names because
1211 # the tags module always uses UTF-8 (in order not to lose info
1206 # the tags module always uses UTF-8 (in order not to lose info
1212 # writing to the cache), but the rest of Mercurial wants them in
1207 # writing to the cache), but the rest of Mercurial wants them in
1213 # local encoding.
1208 # local encoding.
1214 tags = {}
1209 tags = {}
1215 for (name, (node, hist)) in alltags.iteritems():
1210 for (name, (node, hist)) in alltags.iteritems():
1216 if node != nullid:
1211 if node != nullid:
1217 tags[encoding.tolocal(name)] = node
1212 tags[encoding.tolocal(name)] = node
1218 tags['tip'] = self.changelog.tip()
1213 tags['tip'] = self.changelog.tip()
1219 tagtypes = dict([(encoding.tolocal(name), value)
1214 tagtypes = dict([(encoding.tolocal(name), value)
1220 for (name, value) in tagtypes.iteritems()])
1215 for (name, value) in tagtypes.iteritems()])
1221 return (tags, tagtypes)
1216 return (tags, tagtypes)
1222
1217
1223 def tagtype(self, tagname):
1218 def tagtype(self, tagname):
1224 '''
1219 '''
1225 return the type of the given tag. result can be:
1220 return the type of the given tag. result can be:
1226
1221
1227 'local' : a local tag
1222 'local' : a local tag
1228 'global' : a global tag
1223 'global' : a global tag
1229 None : tag does not exist
1224 None : tag does not exist
1230 '''
1225 '''
1231
1226
1232 return self._tagscache.tagtypes.get(tagname)
1227 return self._tagscache.tagtypes.get(tagname)
1233
1228
1234 def tagslist(self):
1229 def tagslist(self):
1235 '''return a list of tags ordered by revision'''
1230 '''return a list of tags ordered by revision'''
1236 if not self._tagscache.tagslist:
1231 if not self._tagscache.tagslist:
1237 l = []
1232 l = []
1238 for t, n in self.tags().iteritems():
1233 for t, n in self.tags().iteritems():
1239 l.append((self.changelog.rev(n), t, n))
1234 l.append((self.changelog.rev(n), t, n))
1240 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1235 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1241
1236
1242 return self._tagscache.tagslist
1237 return self._tagscache.tagslist
1243
1238
1244 def nodetags(self, node):
1239 def nodetags(self, node):
1245 '''return the tags associated with a node'''
1240 '''return the tags associated with a node'''
1246 if not self._tagscache.nodetagscache:
1241 if not self._tagscache.nodetagscache:
1247 nodetagscache = {}
1242 nodetagscache = {}
1248 for t, n in self._tagscache.tags.iteritems():
1243 for t, n in self._tagscache.tags.iteritems():
1249 nodetagscache.setdefault(n, []).append(t)
1244 nodetagscache.setdefault(n, []).append(t)
1250 for tags in nodetagscache.itervalues():
1245 for tags in nodetagscache.itervalues():
1251 tags.sort()
1246 tags.sort()
1252 self._tagscache.nodetagscache = nodetagscache
1247 self._tagscache.nodetagscache = nodetagscache
1253 return self._tagscache.nodetagscache.get(node, [])
1248 return self._tagscache.nodetagscache.get(node, [])
1254
1249
1255 def nodebookmarks(self, node):
1250 def nodebookmarks(self, node):
1256 """return the list of bookmarks pointing to the specified node"""
1251 """return the list of bookmarks pointing to the specified node"""
1257 return self._bookmarks.names(node)
1252 return self._bookmarks.names(node)
1258
1253
1259 def branchmap(self):
1254 def branchmap(self):
1260 '''returns a dictionary {branch: [branchheads]} with branchheads
1255 '''returns a dictionary {branch: [branchheads]} with branchheads
1261 ordered by increasing revision number'''
1256 ordered by increasing revision number'''
1262 branchmap.updatecache(self)
1257 branchmap.updatecache(self)
1263 return self._branchcaches[self.filtername]
1258 return self._branchcaches[self.filtername]
1264
1259
1265 @unfilteredmethod
1260 @unfilteredmethod
1266 def revbranchcache(self):
1261 def revbranchcache(self):
1267 if not self._revbranchcache:
1262 if not self._revbranchcache:
1268 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1263 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1269 return self._revbranchcache
1264 return self._revbranchcache
1270
1265
1271 def branchtip(self, branch, ignoremissing=False):
1266 def branchtip(self, branch, ignoremissing=False):
1272 '''return the tip node for a given branch
1267 '''return the tip node for a given branch
1273
1268
1274 If ignoremissing is True, then this method will not raise an error.
1269 If ignoremissing is True, then this method will not raise an error.
1275 This is helpful for callers that only expect None for a missing branch
1270 This is helpful for callers that only expect None for a missing branch
1276 (e.g. namespace).
1271 (e.g. namespace).
1277
1272
1278 '''
1273 '''
1279 try:
1274 try:
1280 return self.branchmap().branchtip(branch)
1275 return self.branchmap().branchtip(branch)
1281 except KeyError:
1276 except KeyError:
1282 if not ignoremissing:
1277 if not ignoremissing:
1283 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1278 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1284 else:
1279 else:
1285 pass
1280 pass
1286
1281
1287 def lookup(self, key):
1282 def lookup(self, key):
1288 return scmutil.revsymbol(self, key).node()
1283 return scmutil.revsymbol(self, key).node()
1289
1284
1290 def lookupbranch(self, key):
1285 def lookupbranch(self, key):
1291 if key in self.branchmap():
1286 if key in self.branchmap():
1292 return key
1287 return key
1293
1288
1294 return scmutil.revsymbol(self, key).branch()
1289 return scmutil.revsymbol(self, key).branch()
1295
1290
1296 def known(self, nodes):
1291 def known(self, nodes):
1297 cl = self.changelog
1292 cl = self.changelog
1298 nm = cl.nodemap
1293 nm = cl.nodemap
1299 filtered = cl.filteredrevs
1294 filtered = cl.filteredrevs
1300 result = []
1295 result = []
1301 for n in nodes:
1296 for n in nodes:
1302 r = nm.get(n)
1297 r = nm.get(n)
1303 resp = not (r is None or r in filtered)
1298 resp = not (r is None or r in filtered)
1304 result.append(resp)
1299 result.append(resp)
1305 return result
1300 return result
1306
1301
1307 def local(self):
1302 def local(self):
1308 return self
1303 return self
1309
1304
1310 def publishing(self):
1305 def publishing(self):
1311 # it's safe (and desirable) to trust the publish flag unconditionally
1306 # it's safe (and desirable) to trust the publish flag unconditionally
1312 # so that we don't finalize changes shared between users via ssh or nfs
1307 # so that we don't finalize changes shared between users via ssh or nfs
1313 return self.ui.configbool('phases', 'publish', untrusted=True)
1308 return self.ui.configbool('phases', 'publish', untrusted=True)
1314
1309
1315 def cancopy(self):
1310 def cancopy(self):
1316 # so statichttprepo's override of local() works
1311 # so statichttprepo's override of local() works
1317 if not self.local():
1312 if not self.local():
1318 return False
1313 return False
1319 if not self.publishing():
1314 if not self.publishing():
1320 return True
1315 return True
1321 # if publishing we can't copy if there is filtered content
1316 # if publishing we can't copy if there is filtered content
1322 return not self.filtered('visible').changelog.filteredrevs
1317 return not self.filtered('visible').changelog.filteredrevs
1323
1318
1324 def shared(self):
1319 def shared(self):
1325 '''the type of shared repository (None if not shared)'''
1320 '''the type of shared repository (None if not shared)'''
1326 if self.sharedpath != self.path:
1321 if self.sharedpath != self.path:
1327 return 'store'
1322 return 'store'
1328 return None
1323 return None
1329
1324
1330 def wjoin(self, f, *insidef):
1325 def wjoin(self, f, *insidef):
1331 return self.vfs.reljoin(self.root, f, *insidef)
1326 return self.vfs.reljoin(self.root, f, *insidef)
1332
1327
1333 def file(self, f):
1328 def file(self, f):
1334 if f[0] == '/':
1329 if f[0] == '/':
1335 f = f[1:]
1330 f = f[1:]
1336 return filelog.filelog(self.svfs, f)
1331 return filelog.filelog(self.svfs, f)
1337
1332
1338 def setparents(self, p1, p2=nullid):
1333 def setparents(self, p1, p2=nullid):
1339 with self.dirstate.parentchange():
1334 with self.dirstate.parentchange():
1340 copies = self.dirstate.setparents(p1, p2)
1335 copies = self.dirstate.setparents(p1, p2)
1341 pctx = self[p1]
1336 pctx = self[p1]
1342 if copies:
1337 if copies:
1343 # Adjust copy records, the dirstate cannot do it, it
1338 # Adjust copy records, the dirstate cannot do it, it
1344 # requires access to parents manifests. Preserve them
1339 # requires access to parents manifests. Preserve them
1345 # only for entries added to first parent.
1340 # only for entries added to first parent.
1346 for f in copies:
1341 for f in copies:
1347 if f not in pctx and copies[f] in pctx:
1342 if f not in pctx and copies[f] in pctx:
1348 self.dirstate.copy(copies[f], f)
1343 self.dirstate.copy(copies[f], f)
1349 if p2 == nullid:
1344 if p2 == nullid:
1350 for f, s in sorted(self.dirstate.copies().items()):
1345 for f, s in sorted(self.dirstate.copies().items()):
1351 if f not in pctx and s not in pctx:
1346 if f not in pctx and s not in pctx:
1352 self.dirstate.copy(None, f)
1347 self.dirstate.copy(None, f)
1353
1348
1354 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1349 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1355 """changeid can be a changeset revision, node, or tag.
1350 """changeid can be a changeset revision, node, or tag.
1356 fileid can be a file revision or node."""
1351 fileid can be a file revision or node."""
1357 return context.filectx(self, path, changeid, fileid,
1352 return context.filectx(self, path, changeid, fileid,
1358 changectx=changectx)
1353 changectx=changectx)
1359
1354
1360 def getcwd(self):
1355 def getcwd(self):
1361 return self.dirstate.getcwd()
1356 return self.dirstate.getcwd()
1362
1357
1363 def pathto(self, f, cwd=None):
1358 def pathto(self, f, cwd=None):
1364 return self.dirstate.pathto(f, cwd)
1359 return self.dirstate.pathto(f, cwd)
1365
1360
1366 def _loadfilter(self, filter):
1361 def _loadfilter(self, filter):
1367 if filter not in self._filterpats:
1362 if filter not in self._filterpats:
1368 l = []
1363 l = []
1369 for pat, cmd in self.ui.configitems(filter):
1364 for pat, cmd in self.ui.configitems(filter):
1370 if cmd == '!':
1365 if cmd == '!':
1371 continue
1366 continue
1372 mf = matchmod.match(self.root, '', [pat])
1367 mf = matchmod.match(self.root, '', [pat])
1373 fn = None
1368 fn = None
1374 params = cmd
1369 params = cmd
1375 for name, filterfn in self._datafilters.iteritems():
1370 for name, filterfn in self._datafilters.iteritems():
1376 if cmd.startswith(name):
1371 if cmd.startswith(name):
1377 fn = filterfn
1372 fn = filterfn
1378 params = cmd[len(name):].lstrip()
1373 params = cmd[len(name):].lstrip()
1379 break
1374 break
1380 if not fn:
1375 if not fn:
1381 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1376 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1382 # Wrap old filters not supporting keyword arguments
1377 # Wrap old filters not supporting keyword arguments
1383 if not pycompat.getargspec(fn)[2]:
1378 if not pycompat.getargspec(fn)[2]:
1384 oldfn = fn
1379 oldfn = fn
1385 fn = lambda s, c, **kwargs: oldfn(s, c)
1380 fn = lambda s, c, **kwargs: oldfn(s, c)
1386 l.append((mf, fn, params))
1381 l.append((mf, fn, params))
1387 self._filterpats[filter] = l
1382 self._filterpats[filter] = l
1388 return self._filterpats[filter]
1383 return self._filterpats[filter]
1389
1384
1390 def _filter(self, filterpats, filename, data):
1385 def _filter(self, filterpats, filename, data):
1391 for mf, fn, cmd in filterpats:
1386 for mf, fn, cmd in filterpats:
1392 if mf(filename):
1387 if mf(filename):
1393 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1388 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1394 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1389 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1395 break
1390 break
1396
1391
1397 return data
1392 return data
1398
1393
1399 @unfilteredpropertycache
1394 @unfilteredpropertycache
1400 def _encodefilterpats(self):
1395 def _encodefilterpats(self):
1401 return self._loadfilter('encode')
1396 return self._loadfilter('encode')
1402
1397
1403 @unfilteredpropertycache
1398 @unfilteredpropertycache
1404 def _decodefilterpats(self):
1399 def _decodefilterpats(self):
1405 return self._loadfilter('decode')
1400 return self._loadfilter('decode')
1406
1401
1407 def adddatafilter(self, name, filter):
1402 def adddatafilter(self, name, filter):
1408 self._datafilters[name] = filter
1403 self._datafilters[name] = filter
1409
1404
1410 def wread(self, filename):
1405 def wread(self, filename):
1411 if self.wvfs.islink(filename):
1406 if self.wvfs.islink(filename):
1412 data = self.wvfs.readlink(filename)
1407 data = self.wvfs.readlink(filename)
1413 else:
1408 else:
1414 data = self.wvfs.read(filename)
1409 data = self.wvfs.read(filename)
1415 return self._filter(self._encodefilterpats, filename, data)
1410 return self._filter(self._encodefilterpats, filename, data)
1416
1411
1417 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1412 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1418 """write ``data`` into ``filename`` in the working directory
1413 """write ``data`` into ``filename`` in the working directory
1419
1414
1420 This returns length of written (maybe decoded) data.
1415 This returns length of written (maybe decoded) data.
1421 """
1416 """
1422 data = self._filter(self._decodefilterpats, filename, data)
1417 data = self._filter(self._decodefilterpats, filename, data)
1423 if 'l' in flags:
1418 if 'l' in flags:
1424 self.wvfs.symlink(data, filename)
1419 self.wvfs.symlink(data, filename)
1425 else:
1420 else:
1426 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1421 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1427 **kwargs)
1422 **kwargs)
1428 if 'x' in flags:
1423 if 'x' in flags:
1429 self.wvfs.setflags(filename, False, True)
1424 self.wvfs.setflags(filename, False, True)
1430 else:
1425 else:
1431 self.wvfs.setflags(filename, False, False)
1426 self.wvfs.setflags(filename, False, False)
1432 return len(data)
1427 return len(data)
1433
1428
1434 def wwritedata(self, filename, data):
1429 def wwritedata(self, filename, data):
1435 return self._filter(self._decodefilterpats, filename, data)
1430 return self._filter(self._decodefilterpats, filename, data)
1436
1431
1437 def currenttransaction(self):
1432 def currenttransaction(self):
1438 """return the current transaction or None if non exists"""
1433 """return the current transaction or None if non exists"""
1439 if self._transref:
1434 if self._transref:
1440 tr = self._transref()
1435 tr = self._transref()
1441 else:
1436 else:
1442 tr = None
1437 tr = None
1443
1438
1444 if tr and tr.running():
1439 if tr and tr.running():
1445 return tr
1440 return tr
1446 return None
1441 return None
1447
1442
1448 def transaction(self, desc, report=None):
1443 def transaction(self, desc, report=None):
1449 if (self.ui.configbool('devel', 'all-warnings')
1444 if (self.ui.configbool('devel', 'all-warnings')
1450 or self.ui.configbool('devel', 'check-locks')):
1445 or self.ui.configbool('devel', 'check-locks')):
1451 if self._currentlock(self._lockref) is None:
1446 if self._currentlock(self._lockref) is None:
1452 raise error.ProgrammingError('transaction requires locking')
1447 raise error.ProgrammingError('transaction requires locking')
1453 tr = self.currenttransaction()
1448 tr = self.currenttransaction()
1454 if tr is not None:
1449 if tr is not None:
1455 return tr.nest(name=desc)
1450 return tr.nest(name=desc)
1456
1451
1457 # abort here if the journal already exists
1452 # abort here if the journal already exists
1458 if self.svfs.exists("journal"):
1453 if self.svfs.exists("journal"):
1459 raise error.RepoError(
1454 raise error.RepoError(
1460 _("abandoned transaction found"),
1455 _("abandoned transaction found"),
1461 hint=_("run 'hg recover' to clean up transaction"))
1456 hint=_("run 'hg recover' to clean up transaction"))
1462
1457
1463 idbase = "%.40f#%f" % (random.random(), time.time())
1458 idbase = "%.40f#%f" % (random.random(), time.time())
1464 ha = hex(hashlib.sha1(idbase).digest())
1459 ha = hex(hashlib.sha1(idbase).digest())
1465 txnid = 'TXN:' + ha
1460 txnid = 'TXN:' + ha
1466 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1461 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1467
1462
1468 self._writejournal(desc)
1463 self._writejournal(desc)
1469 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1464 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1470 if report:
1465 if report:
1471 rp = report
1466 rp = report
1472 else:
1467 else:
1473 rp = self.ui.warn
1468 rp = self.ui.warn
1474 vfsmap = {'plain': self.vfs} # root of .hg/
1469 vfsmap = {'plain': self.vfs} # root of .hg/
1475 # we must avoid cyclic reference between repo and transaction.
1470 # we must avoid cyclic reference between repo and transaction.
1476 reporef = weakref.ref(self)
1471 reporef = weakref.ref(self)
1477 # Code to track tag movement
1472 # Code to track tag movement
1478 #
1473 #
1479 # Since tags are all handled as file content, it is actually quite hard
1474 # Since tags are all handled as file content, it is actually quite hard
1480 # to track these movement from a code perspective. So we fallback to a
1475 # to track these movement from a code perspective. So we fallback to a
1481 # tracking at the repository level. One could envision to track changes
1476 # tracking at the repository level. One could envision to track changes
1482 # to the '.hgtags' file through changegroup apply but that fails to
1477 # to the '.hgtags' file through changegroup apply but that fails to
1483 # cope with case where transaction expose new heads without changegroup
1478 # cope with case where transaction expose new heads without changegroup
1484 # being involved (eg: phase movement).
1479 # being involved (eg: phase movement).
1485 #
1480 #
1486 # For now, We gate the feature behind a flag since this likely comes
1481 # For now, We gate the feature behind a flag since this likely comes
1487 # with performance impacts. The current code run more often than needed
1482 # with performance impacts. The current code run more often than needed
1488 # and do not use caches as much as it could. The current focus is on
1483 # and do not use caches as much as it could. The current focus is on
1489 # the behavior of the feature so we disable it by default. The flag
1484 # the behavior of the feature so we disable it by default. The flag
1490 # will be removed when we are happy with the performance impact.
1485 # will be removed when we are happy with the performance impact.
1491 #
1486 #
1492 # Once this feature is no longer experimental move the following
1487 # Once this feature is no longer experimental move the following
1493 # documentation to the appropriate help section:
1488 # documentation to the appropriate help section:
1494 #
1489 #
1495 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1490 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1496 # tags (new or changed or deleted tags). In addition the details of
1491 # tags (new or changed or deleted tags). In addition the details of
1497 # these changes are made available in a file at:
1492 # these changes are made available in a file at:
1498 # ``REPOROOT/.hg/changes/tags.changes``.
1493 # ``REPOROOT/.hg/changes/tags.changes``.
1499 # Make sure you check for HG_TAG_MOVED before reading that file as it
1494 # Make sure you check for HG_TAG_MOVED before reading that file as it
1500 # might exist from a previous transaction even if no tag were touched
1495 # might exist from a previous transaction even if no tag were touched
1501 # in this one. Changes are recorded in a line base format::
1496 # in this one. Changes are recorded in a line base format::
1502 #
1497 #
1503 # <action> <hex-node> <tag-name>\n
1498 # <action> <hex-node> <tag-name>\n
1504 #
1499 #
1505 # Actions are defined as follow:
1500 # Actions are defined as follow:
1506 # "-R": tag is removed,
1501 # "-R": tag is removed,
1507 # "+A": tag is added,
1502 # "+A": tag is added,
1508 # "-M": tag is moved (old value),
1503 # "-M": tag is moved (old value),
1509 # "+M": tag is moved (new value),
1504 # "+M": tag is moved (new value),
1510 tracktags = lambda x: None
1505 tracktags = lambda x: None
1511 # experimental config: experimental.hook-track-tags
1506 # experimental config: experimental.hook-track-tags
1512 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1507 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1513 if desc != 'strip' and shouldtracktags:
1508 if desc != 'strip' and shouldtracktags:
1514 oldheads = self.changelog.headrevs()
1509 oldheads = self.changelog.headrevs()
1515 def tracktags(tr2):
1510 def tracktags(tr2):
1516 repo = reporef()
1511 repo = reporef()
1517 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1512 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1518 newheads = repo.changelog.headrevs()
1513 newheads = repo.changelog.headrevs()
1519 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1514 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1520 # notes: we compare lists here.
1515 # notes: we compare lists here.
1521 # As we do it only once buiding set would not be cheaper
1516 # As we do it only once buiding set would not be cheaper
1522 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1517 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1523 if changes:
1518 if changes:
1524 tr2.hookargs['tag_moved'] = '1'
1519 tr2.hookargs['tag_moved'] = '1'
1525 with repo.vfs('changes/tags.changes', 'w',
1520 with repo.vfs('changes/tags.changes', 'w',
1526 atomictemp=True) as changesfile:
1521 atomictemp=True) as changesfile:
1527 # note: we do not register the file to the transaction
1522 # note: we do not register the file to the transaction
1528 # because we needs it to still exist on the transaction
1523 # because we needs it to still exist on the transaction
1529 # is close (for txnclose hooks)
1524 # is close (for txnclose hooks)
1530 tagsmod.writediff(changesfile, changes)
1525 tagsmod.writediff(changesfile, changes)
1531 def validate(tr2):
1526 def validate(tr2):
1532 """will run pre-closing hooks"""
1527 """will run pre-closing hooks"""
1533 # XXX the transaction API is a bit lacking here so we take a hacky
1528 # XXX the transaction API is a bit lacking here so we take a hacky
1534 # path for now
1529 # path for now
1535 #
1530 #
1536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1531 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1537 # dict is copied before these run. In addition we needs the data
1532 # dict is copied before these run. In addition we needs the data
1538 # available to in memory hooks too.
1533 # available to in memory hooks too.
1539 #
1534 #
1540 # Moreover, we also need to make sure this runs before txnclose
1535 # Moreover, we also need to make sure this runs before txnclose
1541 # hooks and there is no "pending" mechanism that would execute
1536 # hooks and there is no "pending" mechanism that would execute
1542 # logic only if hooks are about to run.
1537 # logic only if hooks are about to run.
1543 #
1538 #
1544 # Fixing this limitation of the transaction is also needed to track
1539 # Fixing this limitation of the transaction is also needed to track
1545 # other families of changes (bookmarks, phases, obsolescence).
1540 # other families of changes (bookmarks, phases, obsolescence).
1546 #
1541 #
1547 # This will have to be fixed before we remove the experimental
1542 # This will have to be fixed before we remove the experimental
1548 # gating.
1543 # gating.
1549 tracktags(tr2)
1544 tracktags(tr2)
1550 repo = reporef()
1545 repo = reporef()
1551 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1546 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1552 scmutil.enforcesinglehead(repo, tr2, desc)
1547 scmutil.enforcesinglehead(repo, tr2, desc)
1553 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1548 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1554 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1549 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1555 args = tr.hookargs.copy()
1550 args = tr.hookargs.copy()
1556 args.update(bookmarks.preparehookargs(name, old, new))
1551 args.update(bookmarks.preparehookargs(name, old, new))
1557 repo.hook('pretxnclose-bookmark', throw=True,
1552 repo.hook('pretxnclose-bookmark', throw=True,
1558 txnname=desc,
1553 txnname=desc,
1559 **pycompat.strkwargs(args))
1554 **pycompat.strkwargs(args))
1560 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1555 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1561 cl = repo.unfiltered().changelog
1556 cl = repo.unfiltered().changelog
1562 for rev, (old, new) in tr.changes['phases'].items():
1557 for rev, (old, new) in tr.changes['phases'].items():
1563 args = tr.hookargs.copy()
1558 args = tr.hookargs.copy()
1564 node = hex(cl.node(rev))
1559 node = hex(cl.node(rev))
1565 args.update(phases.preparehookargs(node, old, new))
1560 args.update(phases.preparehookargs(node, old, new))
1566 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1561 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1567 **pycompat.strkwargs(args))
1562 **pycompat.strkwargs(args))
1568
1563
1569 repo.hook('pretxnclose', throw=True,
1564 repo.hook('pretxnclose', throw=True,
1570 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1565 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1571 def releasefn(tr, success):
1566 def releasefn(tr, success):
1572 repo = reporef()
1567 repo = reporef()
1573 if success:
1568 if success:
1574 # this should be explicitly invoked here, because
1569 # this should be explicitly invoked here, because
1575 # in-memory changes aren't written out at closing
1570 # in-memory changes aren't written out at closing
1576 # transaction, if tr.addfilegenerator (via
1571 # transaction, if tr.addfilegenerator (via
1577 # dirstate.write or so) isn't invoked while
1572 # dirstate.write or so) isn't invoked while
1578 # transaction running
1573 # transaction running
1579 repo.dirstate.write(None)
1574 repo.dirstate.write(None)
1580 else:
1575 else:
1581 # discard all changes (including ones already written
1576 # discard all changes (including ones already written
1582 # out) in this transaction
1577 # out) in this transaction
1583 narrowspec.restorebackup(self, 'journal.narrowspec')
1578 narrowspec.restorebackup(self, 'journal.narrowspec')
1584 repo.dirstate.restorebackup(None, 'journal.dirstate')
1579 repo.dirstate.restorebackup(None, 'journal.dirstate')
1585
1580
1586 repo.invalidate(clearfilecache=True)
1581 repo.invalidate(clearfilecache=True)
1587
1582
1588 tr = transaction.transaction(rp, self.svfs, vfsmap,
1583 tr = transaction.transaction(rp, self.svfs, vfsmap,
1589 "journal",
1584 "journal",
1590 "undo",
1585 "undo",
1591 aftertrans(renames),
1586 aftertrans(renames),
1592 self.store.createmode,
1587 self.store.createmode,
1593 validator=validate,
1588 validator=validate,
1594 releasefn=releasefn,
1589 releasefn=releasefn,
1595 checkambigfiles=_cachedfiles,
1590 checkambigfiles=_cachedfiles,
1596 name=desc)
1591 name=desc)
1597 tr.changes['origrepolen'] = len(self)
1592 tr.changes['origrepolen'] = len(self)
1598 tr.changes['obsmarkers'] = set()
1593 tr.changes['obsmarkers'] = set()
1599 tr.changes['phases'] = {}
1594 tr.changes['phases'] = {}
1600 tr.changes['bookmarks'] = {}
1595 tr.changes['bookmarks'] = {}
1601
1596
1602 tr.hookargs['txnid'] = txnid
1597 tr.hookargs['txnid'] = txnid
1603 # note: writing the fncache only during finalize mean that the file is
1598 # note: writing the fncache only during finalize mean that the file is
1604 # outdated when running hooks. As fncache is used for streaming clone,
1599 # outdated when running hooks. As fncache is used for streaming clone,
1605 # this is not expected to break anything that happen during the hooks.
1600 # this is not expected to break anything that happen during the hooks.
1606 tr.addfinalize('flush-fncache', self.store.write)
1601 tr.addfinalize('flush-fncache', self.store.write)
1607 def txnclosehook(tr2):
1602 def txnclosehook(tr2):
1608 """To be run if transaction is successful, will schedule a hook run
1603 """To be run if transaction is successful, will schedule a hook run
1609 """
1604 """
1610 # Don't reference tr2 in hook() so we don't hold a reference.
1605 # Don't reference tr2 in hook() so we don't hold a reference.
1611 # This reduces memory consumption when there are multiple
1606 # This reduces memory consumption when there are multiple
1612 # transactions per lock. This can likely go away if issue5045
1607 # transactions per lock. This can likely go away if issue5045
1613 # fixes the function accumulation.
1608 # fixes the function accumulation.
1614 hookargs = tr2.hookargs
1609 hookargs = tr2.hookargs
1615
1610
1616 def hookfunc():
1611 def hookfunc():
1617 repo = reporef()
1612 repo = reporef()
1618 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1613 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1619 bmchanges = sorted(tr.changes['bookmarks'].items())
1614 bmchanges = sorted(tr.changes['bookmarks'].items())
1620 for name, (old, new) in bmchanges:
1615 for name, (old, new) in bmchanges:
1621 args = tr.hookargs.copy()
1616 args = tr.hookargs.copy()
1622 args.update(bookmarks.preparehookargs(name, old, new))
1617 args.update(bookmarks.preparehookargs(name, old, new))
1623 repo.hook('txnclose-bookmark', throw=False,
1618 repo.hook('txnclose-bookmark', throw=False,
1624 txnname=desc, **pycompat.strkwargs(args))
1619 txnname=desc, **pycompat.strkwargs(args))
1625
1620
1626 if hook.hashook(repo.ui, 'txnclose-phase'):
1621 if hook.hashook(repo.ui, 'txnclose-phase'):
1627 cl = repo.unfiltered().changelog
1622 cl = repo.unfiltered().changelog
1628 phasemv = sorted(tr.changes['phases'].items())
1623 phasemv = sorted(tr.changes['phases'].items())
1629 for rev, (old, new) in phasemv:
1624 for rev, (old, new) in phasemv:
1630 args = tr.hookargs.copy()
1625 args = tr.hookargs.copy()
1631 node = hex(cl.node(rev))
1626 node = hex(cl.node(rev))
1632 args.update(phases.preparehookargs(node, old, new))
1627 args.update(phases.preparehookargs(node, old, new))
1633 repo.hook('txnclose-phase', throw=False, txnname=desc,
1628 repo.hook('txnclose-phase', throw=False, txnname=desc,
1634 **pycompat.strkwargs(args))
1629 **pycompat.strkwargs(args))
1635
1630
1636 repo.hook('txnclose', throw=False, txnname=desc,
1631 repo.hook('txnclose', throw=False, txnname=desc,
1637 **pycompat.strkwargs(hookargs))
1632 **pycompat.strkwargs(hookargs))
1638 reporef()._afterlock(hookfunc)
1633 reporef()._afterlock(hookfunc)
1639 tr.addfinalize('txnclose-hook', txnclosehook)
1634 tr.addfinalize('txnclose-hook', txnclosehook)
1640 # Include a leading "-" to make it happen before the transaction summary
1635 # Include a leading "-" to make it happen before the transaction summary
1641 # reports registered via scmutil.registersummarycallback() whose names
1636 # reports registered via scmutil.registersummarycallback() whose names
1642 # are 00-txnreport etc. That way, the caches will be warm when the
1637 # are 00-txnreport etc. That way, the caches will be warm when the
1643 # callbacks run.
1638 # callbacks run.
1644 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1639 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1645 def txnaborthook(tr2):
1640 def txnaborthook(tr2):
1646 """To be run if transaction is aborted
1641 """To be run if transaction is aborted
1647 """
1642 """
1648 reporef().hook('txnabort', throw=False, txnname=desc,
1643 reporef().hook('txnabort', throw=False, txnname=desc,
1649 **pycompat.strkwargs(tr2.hookargs))
1644 **pycompat.strkwargs(tr2.hookargs))
1650 tr.addabort('txnabort-hook', txnaborthook)
1645 tr.addabort('txnabort-hook', txnaborthook)
1651 # avoid eager cache invalidation. in-memory data should be identical
1646 # avoid eager cache invalidation. in-memory data should be identical
1652 # to stored data if transaction has no error.
1647 # to stored data if transaction has no error.
1653 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1648 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1654 self._transref = weakref.ref(tr)
1649 self._transref = weakref.ref(tr)
1655 scmutil.registersummarycallback(self, tr, desc)
1650 scmutil.registersummarycallback(self, tr, desc)
1656 return tr
1651 return tr
1657
1652
1658 def _journalfiles(self):
1653 def _journalfiles(self):
1659 return ((self.svfs, 'journal'),
1654 return ((self.svfs, 'journal'),
1660 (self.vfs, 'journal.dirstate'),
1655 (self.vfs, 'journal.dirstate'),
1661 (self.vfs, 'journal.branch'),
1656 (self.vfs, 'journal.branch'),
1662 (self.vfs, 'journal.desc'),
1657 (self.vfs, 'journal.desc'),
1663 (self.vfs, 'journal.bookmarks'),
1658 (self.vfs, 'journal.bookmarks'),
1664 (self.svfs, 'journal.phaseroots'))
1659 (self.svfs, 'journal.phaseroots'))
1665
1660
1666 def undofiles(self):
1661 def undofiles(self):
1667 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1662 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1668
1663
1669 @unfilteredmethod
1664 @unfilteredmethod
1670 def _writejournal(self, desc):
1665 def _writejournal(self, desc):
1671 self.dirstate.savebackup(None, 'journal.dirstate')
1666 self.dirstate.savebackup(None, 'journal.dirstate')
1672 narrowspec.savebackup(self, 'journal.narrowspec')
1667 narrowspec.savebackup(self, 'journal.narrowspec')
1673 self.vfs.write("journal.branch",
1668 self.vfs.write("journal.branch",
1674 encoding.fromlocal(self.dirstate.branch()))
1669 encoding.fromlocal(self.dirstate.branch()))
1675 self.vfs.write("journal.desc",
1670 self.vfs.write("journal.desc",
1676 "%d\n%s\n" % (len(self), desc))
1671 "%d\n%s\n" % (len(self), desc))
1677 self.vfs.write("journal.bookmarks",
1672 self.vfs.write("journal.bookmarks",
1678 self.vfs.tryread("bookmarks"))
1673 self.vfs.tryread("bookmarks"))
1679 self.svfs.write("journal.phaseroots",
1674 self.svfs.write("journal.phaseroots",
1680 self.svfs.tryread("phaseroots"))
1675 self.svfs.tryread("phaseroots"))
1681
1676
1682 def recover(self):
1677 def recover(self):
1683 with self.lock():
1678 with self.lock():
1684 if self.svfs.exists("journal"):
1679 if self.svfs.exists("journal"):
1685 self.ui.status(_("rolling back interrupted transaction\n"))
1680 self.ui.status(_("rolling back interrupted transaction\n"))
1686 vfsmap = {'': self.svfs,
1681 vfsmap = {'': self.svfs,
1687 'plain': self.vfs,}
1682 'plain': self.vfs,}
1688 transaction.rollback(self.svfs, vfsmap, "journal",
1683 transaction.rollback(self.svfs, vfsmap, "journal",
1689 self.ui.warn,
1684 self.ui.warn,
1690 checkambigfiles=_cachedfiles)
1685 checkambigfiles=_cachedfiles)
1691 self.invalidate()
1686 self.invalidate()
1692 return True
1687 return True
1693 else:
1688 else:
1694 self.ui.warn(_("no interrupted transaction available\n"))
1689 self.ui.warn(_("no interrupted transaction available\n"))
1695 return False
1690 return False
1696
1691
1697 def rollback(self, dryrun=False, force=False):
1692 def rollback(self, dryrun=False, force=False):
1698 wlock = lock = dsguard = None
1693 wlock = lock = dsguard = None
1699 try:
1694 try:
1700 wlock = self.wlock()
1695 wlock = self.wlock()
1701 lock = self.lock()
1696 lock = self.lock()
1702 if self.svfs.exists("undo"):
1697 if self.svfs.exists("undo"):
1703 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1698 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1704
1699
1705 return self._rollback(dryrun, force, dsguard)
1700 return self._rollback(dryrun, force, dsguard)
1706 else:
1701 else:
1707 self.ui.warn(_("no rollback information available\n"))
1702 self.ui.warn(_("no rollback information available\n"))
1708 return 1
1703 return 1
1709 finally:
1704 finally:
1710 release(dsguard, lock, wlock)
1705 release(dsguard, lock, wlock)
1711
1706
1712 @unfilteredmethod # Until we get smarter cache management
1707 @unfilteredmethod # Until we get smarter cache management
1713 def _rollback(self, dryrun, force, dsguard):
1708 def _rollback(self, dryrun, force, dsguard):
1714 ui = self.ui
1709 ui = self.ui
1715 try:
1710 try:
1716 args = self.vfs.read('undo.desc').splitlines()
1711 args = self.vfs.read('undo.desc').splitlines()
1717 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1712 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1718 if len(args) >= 3:
1713 if len(args) >= 3:
1719 detail = args[2]
1714 detail = args[2]
1720 oldtip = oldlen - 1
1715 oldtip = oldlen - 1
1721
1716
1722 if detail and ui.verbose:
1717 if detail and ui.verbose:
1723 msg = (_('repository tip rolled back to revision %d'
1718 msg = (_('repository tip rolled back to revision %d'
1724 ' (undo %s: %s)\n')
1719 ' (undo %s: %s)\n')
1725 % (oldtip, desc, detail))
1720 % (oldtip, desc, detail))
1726 else:
1721 else:
1727 msg = (_('repository tip rolled back to revision %d'
1722 msg = (_('repository tip rolled back to revision %d'
1728 ' (undo %s)\n')
1723 ' (undo %s)\n')
1729 % (oldtip, desc))
1724 % (oldtip, desc))
1730 except IOError:
1725 except IOError:
1731 msg = _('rolling back unknown transaction\n')
1726 msg = _('rolling back unknown transaction\n')
1732 desc = None
1727 desc = None
1733
1728
1734 if not force and self['.'] != self['tip'] and desc == 'commit':
1729 if not force and self['.'] != self['tip'] and desc == 'commit':
1735 raise error.Abort(
1730 raise error.Abort(
1736 _('rollback of last commit while not checked out '
1731 _('rollback of last commit while not checked out '
1737 'may lose data'), hint=_('use -f to force'))
1732 'may lose data'), hint=_('use -f to force'))
1738
1733
1739 ui.status(msg)
1734 ui.status(msg)
1740 if dryrun:
1735 if dryrun:
1741 return 0
1736 return 0
1742
1737
1743 parents = self.dirstate.parents()
1738 parents = self.dirstate.parents()
1744 self.destroying()
1739 self.destroying()
1745 vfsmap = {'plain': self.vfs, '': self.svfs}
1740 vfsmap = {'plain': self.vfs, '': self.svfs}
1746 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1741 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1747 checkambigfiles=_cachedfiles)
1742 checkambigfiles=_cachedfiles)
1748 if self.vfs.exists('undo.bookmarks'):
1743 if self.vfs.exists('undo.bookmarks'):
1749 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1744 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1750 if self.svfs.exists('undo.phaseroots'):
1745 if self.svfs.exists('undo.phaseroots'):
1751 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1746 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1752 self.invalidate()
1747 self.invalidate()
1753
1748
1754 parentgone = (parents[0] not in self.changelog.nodemap or
1749 parentgone = (parents[0] not in self.changelog.nodemap or
1755 parents[1] not in self.changelog.nodemap)
1750 parents[1] not in self.changelog.nodemap)
1756 if parentgone:
1751 if parentgone:
1757 # prevent dirstateguard from overwriting already restored one
1752 # prevent dirstateguard from overwriting already restored one
1758 dsguard.close()
1753 dsguard.close()
1759
1754
1760 narrowspec.restorebackup(self, 'undo.narrowspec')
1755 narrowspec.restorebackup(self, 'undo.narrowspec')
1761 self.dirstate.restorebackup(None, 'undo.dirstate')
1756 self.dirstate.restorebackup(None, 'undo.dirstate')
1762 try:
1757 try:
1763 branch = self.vfs.read('undo.branch')
1758 branch = self.vfs.read('undo.branch')
1764 self.dirstate.setbranch(encoding.tolocal(branch))
1759 self.dirstate.setbranch(encoding.tolocal(branch))
1765 except IOError:
1760 except IOError:
1766 ui.warn(_('named branch could not be reset: '
1761 ui.warn(_('named branch could not be reset: '
1767 'current branch is still \'%s\'\n')
1762 'current branch is still \'%s\'\n')
1768 % self.dirstate.branch())
1763 % self.dirstate.branch())
1769
1764
1770 parents = tuple([p.rev() for p in self[None].parents()])
1765 parents = tuple([p.rev() for p in self[None].parents()])
1771 if len(parents) > 1:
1766 if len(parents) > 1:
1772 ui.status(_('working directory now based on '
1767 ui.status(_('working directory now based on '
1773 'revisions %d and %d\n') % parents)
1768 'revisions %d and %d\n') % parents)
1774 else:
1769 else:
1775 ui.status(_('working directory now based on '
1770 ui.status(_('working directory now based on '
1776 'revision %d\n') % parents)
1771 'revision %d\n') % parents)
1777 mergemod.mergestate.clean(self, self['.'].node())
1772 mergemod.mergestate.clean(self, self['.'].node())
1778
1773
1779 # TODO: if we know which new heads may result from this rollback, pass
1774 # TODO: if we know which new heads may result from this rollback, pass
1780 # them to destroy(), which will prevent the branchhead cache from being
1775 # them to destroy(), which will prevent the branchhead cache from being
1781 # invalidated.
1776 # invalidated.
1782 self.destroyed()
1777 self.destroyed()
1783 return 0
1778 return 0
1784
1779
1785 def _buildcacheupdater(self, newtransaction):
1780 def _buildcacheupdater(self, newtransaction):
1786 """called during transaction to build the callback updating cache
1781 """called during transaction to build the callback updating cache
1787
1782
1788 Lives on the repository to help extension who might want to augment
1783 Lives on the repository to help extension who might want to augment
1789 this logic. For this purpose, the created transaction is passed to the
1784 this logic. For this purpose, the created transaction is passed to the
1790 method.
1785 method.
1791 """
1786 """
1792 # we must avoid cyclic reference between repo and transaction.
1787 # we must avoid cyclic reference between repo and transaction.
1793 reporef = weakref.ref(self)
1788 reporef = weakref.ref(self)
1794 def updater(tr):
1789 def updater(tr):
1795 repo = reporef()
1790 repo = reporef()
1796 repo.updatecaches(tr)
1791 repo.updatecaches(tr)
1797 return updater
1792 return updater
1798
1793
1799 @unfilteredmethod
1794 @unfilteredmethod
1800 def updatecaches(self, tr=None, full=False):
1795 def updatecaches(self, tr=None, full=False):
1801 """warm appropriate caches
1796 """warm appropriate caches
1802
1797
1803 If this function is called after a transaction closed. The transaction
1798 If this function is called after a transaction closed. The transaction
1804 will be available in the 'tr' argument. This can be used to selectively
1799 will be available in the 'tr' argument. This can be used to selectively
1805 update caches relevant to the changes in that transaction.
1800 update caches relevant to the changes in that transaction.
1806
1801
1807 If 'full' is set, make sure all caches the function knows about have
1802 If 'full' is set, make sure all caches the function knows about have
1808 up-to-date data. Even the ones usually loaded more lazily.
1803 up-to-date data. Even the ones usually loaded more lazily.
1809 """
1804 """
1810 if tr is not None and tr.hookargs.get('source') == 'strip':
1805 if tr is not None and tr.hookargs.get('source') == 'strip':
1811 # During strip, many caches are invalid but
1806 # During strip, many caches are invalid but
1812 # later call to `destroyed` will refresh them.
1807 # later call to `destroyed` will refresh them.
1813 return
1808 return
1814
1809
1815 if tr is None or tr.changes['origrepolen'] < len(self):
1810 if tr is None or tr.changes['origrepolen'] < len(self):
1816 # updating the unfiltered branchmap should refresh all the others,
1811 # updating the unfiltered branchmap should refresh all the others,
1817 self.ui.debug('updating the branch cache\n')
1812 self.ui.debug('updating the branch cache\n')
1818 branchmap.updatecache(self.filtered('served'))
1813 branchmap.updatecache(self.filtered('served'))
1819
1814
1820 if full:
1815 if full:
1821 rbc = self.revbranchcache()
1816 rbc = self.revbranchcache()
1822 for r in self.changelog:
1817 for r in self.changelog:
1823 rbc.branchinfo(r)
1818 rbc.branchinfo(r)
1824 rbc.write()
1819 rbc.write()
1825
1820
1826 # ensure the working copy parents are in the manifestfulltextcache
1821 # ensure the working copy parents are in the manifestfulltextcache
1827 for ctx in self['.'].parents():
1822 for ctx in self['.'].parents():
1828 ctx.manifest() # accessing the manifest is enough
1823 ctx.manifest() # accessing the manifest is enough
1829
1824
1830 def invalidatecaches(self):
1825 def invalidatecaches(self):
1831
1826
1832 if '_tagscache' in vars(self):
1827 if '_tagscache' in vars(self):
1833 # can't use delattr on proxy
1828 # can't use delattr on proxy
1834 del self.__dict__['_tagscache']
1829 del self.__dict__['_tagscache']
1835
1830
1836 self.unfiltered()._branchcaches.clear()
1831 self.unfiltered()._branchcaches.clear()
1837 self.invalidatevolatilesets()
1832 self.invalidatevolatilesets()
1838 self._sparsesignaturecache.clear()
1833 self._sparsesignaturecache.clear()
1839
1834
1840 def invalidatevolatilesets(self):
1835 def invalidatevolatilesets(self):
1841 self.filteredrevcache.clear()
1836 self.filteredrevcache.clear()
1842 obsolete.clearobscaches(self)
1837 obsolete.clearobscaches(self)
1843
1838
1844 def invalidatedirstate(self):
1839 def invalidatedirstate(self):
1845 '''Invalidates the dirstate, causing the next call to dirstate
1840 '''Invalidates the dirstate, causing the next call to dirstate
1846 to check if it was modified since the last time it was read,
1841 to check if it was modified since the last time it was read,
1847 rereading it if it has.
1842 rereading it if it has.
1848
1843
1849 This is different to dirstate.invalidate() that it doesn't always
1844 This is different to dirstate.invalidate() that it doesn't always
1850 rereads the dirstate. Use dirstate.invalidate() if you want to
1845 rereads the dirstate. Use dirstate.invalidate() if you want to
1851 explicitly read the dirstate again (i.e. restoring it to a previous
1846 explicitly read the dirstate again (i.e. restoring it to a previous
1852 known good state).'''
1847 known good state).'''
1853 if hasunfilteredcache(self, 'dirstate'):
1848 if hasunfilteredcache(self, 'dirstate'):
1854 for k in self.dirstate._filecache:
1849 for k in self.dirstate._filecache:
1855 try:
1850 try:
1856 delattr(self.dirstate, k)
1851 delattr(self.dirstate, k)
1857 except AttributeError:
1852 except AttributeError:
1858 pass
1853 pass
1859 delattr(self.unfiltered(), 'dirstate')
1854 delattr(self.unfiltered(), 'dirstate')
1860
1855
1861 def invalidate(self, clearfilecache=False):
1856 def invalidate(self, clearfilecache=False):
1862 '''Invalidates both store and non-store parts other than dirstate
1857 '''Invalidates both store and non-store parts other than dirstate
1863
1858
1864 If a transaction is running, invalidation of store is omitted,
1859 If a transaction is running, invalidation of store is omitted,
1865 because discarding in-memory changes might cause inconsistency
1860 because discarding in-memory changes might cause inconsistency
1866 (e.g. incomplete fncache causes unintentional failure, but
1861 (e.g. incomplete fncache causes unintentional failure, but
1867 redundant one doesn't).
1862 redundant one doesn't).
1868 '''
1863 '''
1869 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1864 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1870 for k in list(self._filecache.keys()):
1865 for k in list(self._filecache.keys()):
1871 # dirstate is invalidated separately in invalidatedirstate()
1866 # dirstate is invalidated separately in invalidatedirstate()
1872 if k == 'dirstate':
1867 if k == 'dirstate':
1873 continue
1868 continue
1874 if (k == 'changelog' and
1869 if (k == 'changelog' and
1875 self.currenttransaction() and
1870 self.currenttransaction() and
1876 self.changelog._delayed):
1871 self.changelog._delayed):
1877 # The changelog object may store unwritten revisions. We don't
1872 # The changelog object may store unwritten revisions. We don't
1878 # want to lose them.
1873 # want to lose them.
1879 # TODO: Solve the problem instead of working around it.
1874 # TODO: Solve the problem instead of working around it.
1880 continue
1875 continue
1881
1876
1882 if clearfilecache:
1877 if clearfilecache:
1883 del self._filecache[k]
1878 del self._filecache[k]
1884 try:
1879 try:
1885 delattr(unfiltered, k)
1880 delattr(unfiltered, k)
1886 except AttributeError:
1881 except AttributeError:
1887 pass
1882 pass
1888 self.invalidatecaches()
1883 self.invalidatecaches()
1889 if not self.currenttransaction():
1884 if not self.currenttransaction():
1890 # TODO: Changing contents of store outside transaction
1885 # TODO: Changing contents of store outside transaction
1891 # causes inconsistency. We should make in-memory store
1886 # causes inconsistency. We should make in-memory store
1892 # changes detectable, and abort if changed.
1887 # changes detectable, and abort if changed.
1893 self.store.invalidatecaches()
1888 self.store.invalidatecaches()
1894
1889
1895 def invalidateall(self):
1890 def invalidateall(self):
1896 '''Fully invalidates both store and non-store parts, causing the
1891 '''Fully invalidates both store and non-store parts, causing the
1897 subsequent operation to reread any outside changes.'''
1892 subsequent operation to reread any outside changes.'''
1898 # extension should hook this to invalidate its caches
1893 # extension should hook this to invalidate its caches
1899 self.invalidate()
1894 self.invalidate()
1900 self.invalidatedirstate()
1895 self.invalidatedirstate()
1901
1896
1902 @unfilteredmethod
1897 @unfilteredmethod
1903 def _refreshfilecachestats(self, tr):
1898 def _refreshfilecachestats(self, tr):
1904 """Reload stats of cached files so that they are flagged as valid"""
1899 """Reload stats of cached files so that they are flagged as valid"""
1905 for k, ce in self._filecache.items():
1900 for k, ce in self._filecache.items():
1906 k = pycompat.sysstr(k)
1901 k = pycompat.sysstr(k)
1907 if k == r'dirstate' or k not in self.__dict__:
1902 if k == r'dirstate' or k not in self.__dict__:
1908 continue
1903 continue
1909 ce.refresh()
1904 ce.refresh()
1910
1905
1911 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1906 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1912 inheritchecker=None, parentenvvar=None):
1907 inheritchecker=None, parentenvvar=None):
1913 parentlock = None
1908 parentlock = None
1914 # the contents of parentenvvar are used by the underlying lock to
1909 # the contents of parentenvvar are used by the underlying lock to
1915 # determine whether it can be inherited
1910 # determine whether it can be inherited
1916 if parentenvvar is not None:
1911 if parentenvvar is not None:
1917 parentlock = encoding.environ.get(parentenvvar)
1912 parentlock = encoding.environ.get(parentenvvar)
1918
1913
1919 timeout = 0
1914 timeout = 0
1920 warntimeout = 0
1915 warntimeout = 0
1921 if wait:
1916 if wait:
1922 timeout = self.ui.configint("ui", "timeout")
1917 timeout = self.ui.configint("ui", "timeout")
1923 warntimeout = self.ui.configint("ui", "timeout.warn")
1918 warntimeout = self.ui.configint("ui", "timeout.warn")
1924 # internal config: ui.signal-safe-lock
1919 # internal config: ui.signal-safe-lock
1925 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1920 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1926
1921
1927 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1922 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1928 releasefn=releasefn,
1923 releasefn=releasefn,
1929 acquirefn=acquirefn, desc=desc,
1924 acquirefn=acquirefn, desc=desc,
1930 inheritchecker=inheritchecker,
1925 inheritchecker=inheritchecker,
1931 parentlock=parentlock,
1926 parentlock=parentlock,
1932 signalsafe=signalsafe)
1927 signalsafe=signalsafe)
1933 return l
1928 return l
1934
1929
1935 def _afterlock(self, callback):
1930 def _afterlock(self, callback):
1936 """add a callback to be run when the repository is fully unlocked
1931 """add a callback to be run when the repository is fully unlocked
1937
1932
1938 The callback will be executed when the outermost lock is released
1933 The callback will be executed when the outermost lock is released
1939 (with wlock being higher level than 'lock')."""
1934 (with wlock being higher level than 'lock')."""
1940 for ref in (self._wlockref, self._lockref):
1935 for ref in (self._wlockref, self._lockref):
1941 l = ref and ref()
1936 l = ref and ref()
1942 if l and l.held:
1937 if l and l.held:
1943 l.postrelease.append(callback)
1938 l.postrelease.append(callback)
1944 break
1939 break
1945 else: # no lock have been found.
1940 else: # no lock have been found.
1946 callback()
1941 callback()
1947
1942
1948 def lock(self, wait=True):
1943 def lock(self, wait=True):
1949 '''Lock the repository store (.hg/store) and return a weak reference
1944 '''Lock the repository store (.hg/store) and return a weak reference
1950 to the lock. Use this before modifying the store (e.g. committing or
1945 to the lock. Use this before modifying the store (e.g. committing or
1951 stripping). If you are opening a transaction, get a lock as well.)
1946 stripping). If you are opening a transaction, get a lock as well.)
1952
1947
1953 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1948 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1954 'wlock' first to avoid a dead-lock hazard.'''
1949 'wlock' first to avoid a dead-lock hazard.'''
1955 l = self._currentlock(self._lockref)
1950 l = self._currentlock(self._lockref)
1956 if l is not None:
1951 if l is not None:
1957 l.lock()
1952 l.lock()
1958 return l
1953 return l
1959
1954
1960 l = self._lock(self.svfs, "lock", wait, None,
1955 l = self._lock(self.svfs, "lock", wait, None,
1961 self.invalidate, _('repository %s') % self.origroot)
1956 self.invalidate, _('repository %s') % self.origroot)
1962 self._lockref = weakref.ref(l)
1957 self._lockref = weakref.ref(l)
1963 return l
1958 return l
1964
1959
1965 def _wlockchecktransaction(self):
1960 def _wlockchecktransaction(self):
1966 if self.currenttransaction() is not None:
1961 if self.currenttransaction() is not None:
1967 raise error.LockInheritanceContractViolation(
1962 raise error.LockInheritanceContractViolation(
1968 'wlock cannot be inherited in the middle of a transaction')
1963 'wlock cannot be inherited in the middle of a transaction')
1969
1964
1970 def wlock(self, wait=True):
1965 def wlock(self, wait=True):
1971 '''Lock the non-store parts of the repository (everything under
1966 '''Lock the non-store parts of the repository (everything under
1972 .hg except .hg/store) and return a weak reference to the lock.
1967 .hg except .hg/store) and return a weak reference to the lock.
1973
1968
1974 Use this before modifying files in .hg.
1969 Use this before modifying files in .hg.
1975
1970
1976 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1971 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1977 'wlock' first to avoid a dead-lock hazard.'''
1972 'wlock' first to avoid a dead-lock hazard.'''
1978 l = self._wlockref and self._wlockref()
1973 l = self._wlockref and self._wlockref()
1979 if l is not None and l.held:
1974 if l is not None and l.held:
1980 l.lock()
1975 l.lock()
1981 return l
1976 return l
1982
1977
1983 # We do not need to check for non-waiting lock acquisition. Such
1978 # We do not need to check for non-waiting lock acquisition. Such
1984 # acquisition would not cause dead-lock as they would just fail.
1979 # acquisition would not cause dead-lock as they would just fail.
1985 if wait and (self.ui.configbool('devel', 'all-warnings')
1980 if wait and (self.ui.configbool('devel', 'all-warnings')
1986 or self.ui.configbool('devel', 'check-locks')):
1981 or self.ui.configbool('devel', 'check-locks')):
1987 if self._currentlock(self._lockref) is not None:
1982 if self._currentlock(self._lockref) is not None:
1988 self.ui.develwarn('"wlock" acquired after "lock"')
1983 self.ui.develwarn('"wlock" acquired after "lock"')
1989
1984
1990 def unlock():
1985 def unlock():
1991 if self.dirstate.pendingparentchange():
1986 if self.dirstate.pendingparentchange():
1992 self.dirstate.invalidate()
1987 self.dirstate.invalidate()
1993 else:
1988 else:
1994 self.dirstate.write(None)
1989 self.dirstate.write(None)
1995
1990
1996 self._filecache['dirstate'].refresh()
1991 self._filecache['dirstate'].refresh()
1997
1992
1998 l = self._lock(self.vfs, "wlock", wait, unlock,
1993 l = self._lock(self.vfs, "wlock", wait, unlock,
1999 self.invalidatedirstate, _('working directory of %s') %
1994 self.invalidatedirstate, _('working directory of %s') %
2000 self.origroot,
1995 self.origroot,
2001 inheritchecker=self._wlockchecktransaction,
1996 inheritchecker=self._wlockchecktransaction,
2002 parentenvvar='HG_WLOCK_LOCKER')
1997 parentenvvar='HG_WLOCK_LOCKER')
2003 self._wlockref = weakref.ref(l)
1998 self._wlockref = weakref.ref(l)
2004 return l
1999 return l
2005
2000
2006 def _currentlock(self, lockref):
2001 def _currentlock(self, lockref):
2007 """Returns the lock if it's held, or None if it's not."""
2002 """Returns the lock if it's held, or None if it's not."""
2008 if lockref is None:
2003 if lockref is None:
2009 return None
2004 return None
2010 l = lockref()
2005 l = lockref()
2011 if l is None or not l.held:
2006 if l is None or not l.held:
2012 return None
2007 return None
2013 return l
2008 return l
2014
2009
2015 def currentwlock(self):
2010 def currentwlock(self):
2016 """Returns the wlock if it's held, or None if it's not."""
2011 """Returns the wlock if it's held, or None if it's not."""
2017 return self._currentlock(self._wlockref)
2012 return self._currentlock(self._wlockref)
2018
2013
2019 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2014 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2020 """
2015 """
2021 commit an individual file as part of a larger transaction
2016 commit an individual file as part of a larger transaction
2022 """
2017 """
2023
2018
2024 fname = fctx.path()
2019 fname = fctx.path()
2025 fparent1 = manifest1.get(fname, nullid)
2020 fparent1 = manifest1.get(fname, nullid)
2026 fparent2 = manifest2.get(fname, nullid)
2021 fparent2 = manifest2.get(fname, nullid)
2027 if isinstance(fctx, context.filectx):
2022 if isinstance(fctx, context.filectx):
2028 node = fctx.filenode()
2023 node = fctx.filenode()
2029 if node in [fparent1, fparent2]:
2024 if node in [fparent1, fparent2]:
2030 self.ui.debug('reusing %s filelog entry\n' % fname)
2025 self.ui.debug('reusing %s filelog entry\n' % fname)
2031 if manifest1.flags(fname) != fctx.flags():
2026 if manifest1.flags(fname) != fctx.flags():
2032 changelist.append(fname)
2027 changelist.append(fname)
2033 return node
2028 return node
2034
2029
2035 flog = self.file(fname)
2030 flog = self.file(fname)
2036 meta = {}
2031 meta = {}
2037 copy = fctx.renamed()
2032 copy = fctx.renamed()
2038 if copy and copy[0] != fname:
2033 if copy and copy[0] != fname:
2039 # Mark the new revision of this file as a copy of another
2034 # Mark the new revision of this file as a copy of another
2040 # file. This copy data will effectively act as a parent
2035 # file. This copy data will effectively act as a parent
2041 # of this new revision. If this is a merge, the first
2036 # of this new revision. If this is a merge, the first
2042 # parent will be the nullid (meaning "look up the copy data")
2037 # parent will be the nullid (meaning "look up the copy data")
2043 # and the second one will be the other parent. For example:
2038 # and the second one will be the other parent. For example:
2044 #
2039 #
2045 # 0 --- 1 --- 3 rev1 changes file foo
2040 # 0 --- 1 --- 3 rev1 changes file foo
2046 # \ / rev2 renames foo to bar and changes it
2041 # \ / rev2 renames foo to bar and changes it
2047 # \- 2 -/ rev3 should have bar with all changes and
2042 # \- 2 -/ rev3 should have bar with all changes and
2048 # should record that bar descends from
2043 # should record that bar descends from
2049 # bar in rev2 and foo in rev1
2044 # bar in rev2 and foo in rev1
2050 #
2045 #
2051 # this allows this merge to succeed:
2046 # this allows this merge to succeed:
2052 #
2047 #
2053 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2048 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2054 # \ / merging rev3 and rev4 should use bar@rev2
2049 # \ / merging rev3 and rev4 should use bar@rev2
2055 # \- 2 --- 4 as the merge base
2050 # \- 2 --- 4 as the merge base
2056 #
2051 #
2057
2052
2058 cfname = copy[0]
2053 cfname = copy[0]
2059 crev = manifest1.get(cfname)
2054 crev = manifest1.get(cfname)
2060 newfparent = fparent2
2055 newfparent = fparent2
2061
2056
2062 if manifest2: # branch merge
2057 if manifest2: # branch merge
2063 if fparent2 == nullid or crev is None: # copied on remote side
2058 if fparent2 == nullid or crev is None: # copied on remote side
2064 if cfname in manifest2:
2059 if cfname in manifest2:
2065 crev = manifest2[cfname]
2060 crev = manifest2[cfname]
2066 newfparent = fparent1
2061 newfparent = fparent1
2067
2062
2068 # Here, we used to search backwards through history to try to find
2063 # Here, we used to search backwards through history to try to find
2069 # where the file copy came from if the source of a copy was not in
2064 # where the file copy came from if the source of a copy was not in
2070 # the parent directory. However, this doesn't actually make sense to
2065 # the parent directory. However, this doesn't actually make sense to
2071 # do (what does a copy from something not in your working copy even
2066 # do (what does a copy from something not in your working copy even
2072 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2067 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2073 # the user that copy information was dropped, so if they didn't
2068 # the user that copy information was dropped, so if they didn't
2074 # expect this outcome it can be fixed, but this is the correct
2069 # expect this outcome it can be fixed, but this is the correct
2075 # behavior in this circumstance.
2070 # behavior in this circumstance.
2076
2071
2077 if crev:
2072 if crev:
2078 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2073 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2079 meta["copy"] = cfname
2074 meta["copy"] = cfname
2080 meta["copyrev"] = hex(crev)
2075 meta["copyrev"] = hex(crev)
2081 fparent1, fparent2 = nullid, newfparent
2076 fparent1, fparent2 = nullid, newfparent
2082 else:
2077 else:
2083 self.ui.warn(_("warning: can't find ancestor for '%s' "
2078 self.ui.warn(_("warning: can't find ancestor for '%s' "
2084 "copied from '%s'!\n") % (fname, cfname))
2079 "copied from '%s'!\n") % (fname, cfname))
2085
2080
2086 elif fparent1 == nullid:
2081 elif fparent1 == nullid:
2087 fparent1, fparent2 = fparent2, nullid
2082 fparent1, fparent2 = fparent2, nullid
2088 elif fparent2 != nullid:
2083 elif fparent2 != nullid:
2089 # is one parent an ancestor of the other?
2084 # is one parent an ancestor of the other?
2090 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2085 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2091 if fparent1 in fparentancestors:
2086 if fparent1 in fparentancestors:
2092 fparent1, fparent2 = fparent2, nullid
2087 fparent1, fparent2 = fparent2, nullid
2093 elif fparent2 in fparentancestors:
2088 elif fparent2 in fparentancestors:
2094 fparent2 = nullid
2089 fparent2 = nullid
2095
2090
2096 # is the file changed?
2091 # is the file changed?
2097 text = fctx.data()
2092 text = fctx.data()
2098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2093 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2099 changelist.append(fname)
2094 changelist.append(fname)
2100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2095 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2101 # are just the flags changed during merge?
2096 # are just the flags changed during merge?
2102 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2097 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2103 changelist.append(fname)
2098 changelist.append(fname)
2104
2099
2105 return fparent1
2100 return fparent1
2106
2101
2107 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2102 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2108 """check for commit arguments that aren't committable"""
2103 """check for commit arguments that aren't committable"""
2109 if match.isexact() or match.prefix():
2104 if match.isexact() or match.prefix():
2110 matched = set(status.modified + status.added + status.removed)
2105 matched = set(status.modified + status.added + status.removed)
2111
2106
2112 for f in match.files():
2107 for f in match.files():
2113 f = self.dirstate.normalize(f)
2108 f = self.dirstate.normalize(f)
2114 if f == '.' or f in matched or f in wctx.substate:
2109 if f == '.' or f in matched or f in wctx.substate:
2115 continue
2110 continue
2116 if f in status.deleted:
2111 if f in status.deleted:
2117 fail(f, _('file not found!'))
2112 fail(f, _('file not found!'))
2118 if f in vdirs: # visited directory
2113 if f in vdirs: # visited directory
2119 d = f + '/'
2114 d = f + '/'
2120 for mf in matched:
2115 for mf in matched:
2121 if mf.startswith(d):
2116 if mf.startswith(d):
2122 break
2117 break
2123 else:
2118 else:
2124 fail(f, _("no match under directory!"))
2119 fail(f, _("no match under directory!"))
2125 elif f not in self.dirstate:
2120 elif f not in self.dirstate:
2126 fail(f, _("file not tracked!"))
2121 fail(f, _("file not tracked!"))
2127
2122
2128 @unfilteredmethod
2123 @unfilteredmethod
2129 def commit(self, text="", user=None, date=None, match=None, force=False,
2124 def commit(self, text="", user=None, date=None, match=None, force=False,
2130 editor=False, extra=None):
2125 editor=False, extra=None):
2131 """Add a new revision to current repository.
2126 """Add a new revision to current repository.
2132
2127
2133 Revision information is gathered from the working directory,
2128 Revision information is gathered from the working directory,
2134 match can be used to filter the committed files. If editor is
2129 match can be used to filter the committed files. If editor is
2135 supplied, it is called to get a commit message.
2130 supplied, it is called to get a commit message.
2136 """
2131 """
2137 if extra is None:
2132 if extra is None:
2138 extra = {}
2133 extra = {}
2139
2134
2140 def fail(f, msg):
2135 def fail(f, msg):
2141 raise error.Abort('%s: %s' % (f, msg))
2136 raise error.Abort('%s: %s' % (f, msg))
2142
2137
2143 if not match:
2138 if not match:
2144 match = matchmod.always(self.root, '')
2139 match = matchmod.always(self.root, '')
2145
2140
2146 if not force:
2141 if not force:
2147 vdirs = []
2142 vdirs = []
2148 match.explicitdir = vdirs.append
2143 match.explicitdir = vdirs.append
2149 match.bad = fail
2144 match.bad = fail
2150
2145
2151 wlock = lock = tr = None
2146 wlock = lock = tr = None
2152 try:
2147 try:
2153 wlock = self.wlock()
2148 wlock = self.wlock()
2154 lock = self.lock() # for recent changelog (see issue4368)
2149 lock = self.lock() # for recent changelog (see issue4368)
2155
2150
2156 wctx = self[None]
2151 wctx = self[None]
2157 merge = len(wctx.parents()) > 1
2152 merge = len(wctx.parents()) > 1
2158
2153
2159 if not force and merge and not match.always():
2154 if not force and merge and not match.always():
2160 raise error.Abort(_('cannot partially commit a merge '
2155 raise error.Abort(_('cannot partially commit a merge '
2161 '(do not specify files or patterns)'))
2156 '(do not specify files or patterns)'))
2162
2157
2163 status = self.status(match=match, clean=force)
2158 status = self.status(match=match, clean=force)
2164 if force:
2159 if force:
2165 status.modified.extend(status.clean) # mq may commit clean files
2160 status.modified.extend(status.clean) # mq may commit clean files
2166
2161
2167 # check subrepos
2162 # check subrepos
2168 subs, commitsubs, newstate = subrepoutil.precommit(
2163 subs, commitsubs, newstate = subrepoutil.precommit(
2169 self.ui, wctx, status, match, force=force)
2164 self.ui, wctx, status, match, force=force)
2170
2165
2171 # make sure all explicit patterns are matched
2166 # make sure all explicit patterns are matched
2172 if not force:
2167 if not force:
2173 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2168 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2174
2169
2175 cctx = context.workingcommitctx(self, status,
2170 cctx = context.workingcommitctx(self, status,
2176 text, user, date, extra)
2171 text, user, date, extra)
2177
2172
2178 # internal config: ui.allowemptycommit
2173 # internal config: ui.allowemptycommit
2179 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2174 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2180 or extra.get('close') or merge or cctx.files()
2175 or extra.get('close') or merge or cctx.files()
2181 or self.ui.configbool('ui', 'allowemptycommit'))
2176 or self.ui.configbool('ui', 'allowemptycommit'))
2182 if not allowemptycommit:
2177 if not allowemptycommit:
2183 return None
2178 return None
2184
2179
2185 if merge and cctx.deleted():
2180 if merge and cctx.deleted():
2186 raise error.Abort(_("cannot commit merge with missing files"))
2181 raise error.Abort(_("cannot commit merge with missing files"))
2187
2182
2188 ms = mergemod.mergestate.read(self)
2183 ms = mergemod.mergestate.read(self)
2189 mergeutil.checkunresolved(ms)
2184 mergeutil.checkunresolved(ms)
2190
2185
2191 if editor:
2186 if editor:
2192 cctx._text = editor(self, cctx, subs)
2187 cctx._text = editor(self, cctx, subs)
2193 edited = (text != cctx._text)
2188 edited = (text != cctx._text)
2194
2189
2195 # Save commit message in case this transaction gets rolled back
2190 # Save commit message in case this transaction gets rolled back
2196 # (e.g. by a pretxncommit hook). Leave the content alone on
2191 # (e.g. by a pretxncommit hook). Leave the content alone on
2197 # the assumption that the user will use the same editor again.
2192 # the assumption that the user will use the same editor again.
2198 msgfn = self.savecommitmessage(cctx._text)
2193 msgfn = self.savecommitmessage(cctx._text)
2199
2194
2200 # commit subs and write new state
2195 # commit subs and write new state
2201 if subs:
2196 if subs:
2202 for s in sorted(commitsubs):
2197 for s in sorted(commitsubs):
2203 sub = wctx.sub(s)
2198 sub = wctx.sub(s)
2204 self.ui.status(_('committing subrepository %s\n') %
2199 self.ui.status(_('committing subrepository %s\n') %
2205 subrepoutil.subrelpath(sub))
2200 subrepoutil.subrelpath(sub))
2206 sr = sub.commit(cctx._text, user, date)
2201 sr = sub.commit(cctx._text, user, date)
2207 newstate[s] = (newstate[s][0], sr)
2202 newstate[s] = (newstate[s][0], sr)
2208 subrepoutil.writestate(self, newstate)
2203 subrepoutil.writestate(self, newstate)
2209
2204
2210 p1, p2 = self.dirstate.parents()
2205 p1, p2 = self.dirstate.parents()
2211 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2206 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2212 try:
2207 try:
2213 self.hook("precommit", throw=True, parent1=hookp1,
2208 self.hook("precommit", throw=True, parent1=hookp1,
2214 parent2=hookp2)
2209 parent2=hookp2)
2215 tr = self.transaction('commit')
2210 tr = self.transaction('commit')
2216 ret = self.commitctx(cctx, True)
2211 ret = self.commitctx(cctx, True)
2217 except: # re-raises
2212 except: # re-raises
2218 if edited:
2213 if edited:
2219 self.ui.write(
2214 self.ui.write(
2220 _('note: commit message saved in %s\n') % msgfn)
2215 _('note: commit message saved in %s\n') % msgfn)
2221 raise
2216 raise
2222 # update bookmarks, dirstate and mergestate
2217 # update bookmarks, dirstate and mergestate
2223 bookmarks.update(self, [p1, p2], ret)
2218 bookmarks.update(self, [p1, p2], ret)
2224 cctx.markcommitted(ret)
2219 cctx.markcommitted(ret)
2225 ms.reset()
2220 ms.reset()
2226 tr.close()
2221 tr.close()
2227
2222
2228 finally:
2223 finally:
2229 lockmod.release(tr, lock, wlock)
2224 lockmod.release(tr, lock, wlock)
2230
2225
2231 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2226 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2232 # hack for command that use a temporary commit (eg: histedit)
2227 # hack for command that use a temporary commit (eg: histedit)
2233 # temporary commit got stripped before hook release
2228 # temporary commit got stripped before hook release
2234 if self.changelog.hasnode(ret):
2229 if self.changelog.hasnode(ret):
2235 self.hook("commit", node=node, parent1=parent1,
2230 self.hook("commit", node=node, parent1=parent1,
2236 parent2=parent2)
2231 parent2=parent2)
2237 self._afterlock(commithook)
2232 self._afterlock(commithook)
2238 return ret
2233 return ret
2239
2234
2240 @unfilteredmethod
2235 @unfilteredmethod
2241 def commitctx(self, ctx, error=False):
2236 def commitctx(self, ctx, error=False):
2242 """Add a new revision to current repository.
2237 """Add a new revision to current repository.
2243 Revision information is passed via the context argument.
2238 Revision information is passed via the context argument.
2244
2239
2245 ctx.files() should list all files involved in this commit, i.e.
2240 ctx.files() should list all files involved in this commit, i.e.
2246 modified/added/removed files. On merge, it may be wider than the
2241 modified/added/removed files. On merge, it may be wider than the
2247 ctx.files() to be committed, since any file nodes derived directly
2242 ctx.files() to be committed, since any file nodes derived directly
2248 from p1 or p2 are excluded from the committed ctx.files().
2243 from p1 or p2 are excluded from the committed ctx.files().
2249 """
2244 """
2250
2245
2251 tr = None
2246 tr = None
2252 p1, p2 = ctx.p1(), ctx.p2()
2247 p1, p2 = ctx.p1(), ctx.p2()
2253 user = ctx.user()
2248 user = ctx.user()
2254
2249
2255 lock = self.lock()
2250 lock = self.lock()
2256 try:
2251 try:
2257 tr = self.transaction("commit")
2252 tr = self.transaction("commit")
2258 trp = weakref.proxy(tr)
2253 trp = weakref.proxy(tr)
2259
2254
2260 if ctx.manifestnode():
2255 if ctx.manifestnode():
2261 # reuse an existing manifest revision
2256 # reuse an existing manifest revision
2262 self.ui.debug('reusing known manifest\n')
2257 self.ui.debug('reusing known manifest\n')
2263 mn = ctx.manifestnode()
2258 mn = ctx.manifestnode()
2264 files = ctx.files()
2259 files = ctx.files()
2265 elif ctx.files():
2260 elif ctx.files():
2266 m1ctx = p1.manifestctx()
2261 m1ctx = p1.manifestctx()
2267 m2ctx = p2.manifestctx()
2262 m2ctx = p2.manifestctx()
2268 mctx = m1ctx.copy()
2263 mctx = m1ctx.copy()
2269
2264
2270 m = mctx.read()
2265 m = mctx.read()
2271 m1 = m1ctx.read()
2266 m1 = m1ctx.read()
2272 m2 = m2ctx.read()
2267 m2 = m2ctx.read()
2273
2268
2274 # check in files
2269 # check in files
2275 added = []
2270 added = []
2276 changed = []
2271 changed = []
2277 removed = list(ctx.removed())
2272 removed = list(ctx.removed())
2278 linkrev = len(self)
2273 linkrev = len(self)
2279 self.ui.note(_("committing files:\n"))
2274 self.ui.note(_("committing files:\n"))
2280 for f in sorted(ctx.modified() + ctx.added()):
2275 for f in sorted(ctx.modified() + ctx.added()):
2281 self.ui.note(f + "\n")
2276 self.ui.note(f + "\n")
2282 try:
2277 try:
2283 fctx = ctx[f]
2278 fctx = ctx[f]
2284 if fctx is None:
2279 if fctx is None:
2285 removed.append(f)
2280 removed.append(f)
2286 else:
2281 else:
2287 added.append(f)
2282 added.append(f)
2288 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2283 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2289 trp, changed)
2284 trp, changed)
2290 m.setflag(f, fctx.flags())
2285 m.setflag(f, fctx.flags())
2291 except OSError as inst:
2286 except OSError as inst:
2292 self.ui.warn(_("trouble committing %s!\n") % f)
2287 self.ui.warn(_("trouble committing %s!\n") % f)
2293 raise
2288 raise
2294 except IOError as inst:
2289 except IOError as inst:
2295 errcode = getattr(inst, 'errno', errno.ENOENT)
2290 errcode = getattr(inst, 'errno', errno.ENOENT)
2296 if error or errcode and errcode != errno.ENOENT:
2291 if error or errcode and errcode != errno.ENOENT:
2297 self.ui.warn(_("trouble committing %s!\n") % f)
2292 self.ui.warn(_("trouble committing %s!\n") % f)
2298 raise
2293 raise
2299
2294
2300 # update manifest
2295 # update manifest
2301 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2296 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2302 drop = [f for f in removed if f in m]
2297 drop = [f for f in removed if f in m]
2303 for f in drop:
2298 for f in drop:
2304 del m[f]
2299 del m[f]
2305 files = changed + removed
2300 files = changed + removed
2306 md = None
2301 md = None
2307 if not files:
2302 if not files:
2308 # if no "files" actually changed in terms of the changelog,
2303 # if no "files" actually changed in terms of the changelog,
2309 # try hard to detect unmodified manifest entry so that the
2304 # try hard to detect unmodified manifest entry so that the
2310 # exact same commit can be reproduced later on convert.
2305 # exact same commit can be reproduced later on convert.
2311 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2306 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2312 if not files and md:
2307 if not files and md:
2313 self.ui.debug('not reusing manifest (no file change in '
2308 self.ui.debug('not reusing manifest (no file change in '
2314 'changelog, but manifest differs)\n')
2309 'changelog, but manifest differs)\n')
2315 if files or md:
2310 if files or md:
2316 self.ui.note(_("committing manifest\n"))
2311 self.ui.note(_("committing manifest\n"))
2317 # we're using narrowmatch here since it's already applied at
2312 # we're using narrowmatch here since it's already applied at
2318 # other stages (such as dirstate.walk), so we're already
2313 # other stages (such as dirstate.walk), so we're already
2319 # ignoring things outside of narrowspec in most cases. The
2314 # ignoring things outside of narrowspec in most cases. The
2320 # one case where we might have files outside the narrowspec
2315 # one case where we might have files outside the narrowspec
2321 # at this point is merges, and we already error out in the
2316 # at this point is merges, and we already error out in the
2322 # case where the merge has files outside of the narrowspec,
2317 # case where the merge has files outside of the narrowspec,
2323 # so this is safe.
2318 # so this is safe.
2324 mn = mctx.write(trp, linkrev,
2319 mn = mctx.write(trp, linkrev,
2325 p1.manifestnode(), p2.manifestnode(),
2320 p1.manifestnode(), p2.manifestnode(),
2326 added, drop, match=self.narrowmatch())
2321 added, drop, match=self.narrowmatch())
2327 else:
2322 else:
2328 self.ui.debug('reusing manifest form p1 (listed files '
2323 self.ui.debug('reusing manifest form p1 (listed files '
2329 'actually unchanged)\n')
2324 'actually unchanged)\n')
2330 mn = p1.manifestnode()
2325 mn = p1.manifestnode()
2331 else:
2326 else:
2332 self.ui.debug('reusing manifest from p1 (no file change)\n')
2327 self.ui.debug('reusing manifest from p1 (no file change)\n')
2333 mn = p1.manifestnode()
2328 mn = p1.manifestnode()
2334 files = []
2329 files = []
2335
2330
2336 # update changelog
2331 # update changelog
2337 self.ui.note(_("committing changelog\n"))
2332 self.ui.note(_("committing changelog\n"))
2338 self.changelog.delayupdate(tr)
2333 self.changelog.delayupdate(tr)
2339 n = self.changelog.add(mn, files, ctx.description(),
2334 n = self.changelog.add(mn, files, ctx.description(),
2340 trp, p1.node(), p2.node(),
2335 trp, p1.node(), p2.node(),
2341 user, ctx.date(), ctx.extra().copy())
2336 user, ctx.date(), ctx.extra().copy())
2342 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2343 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2344 parent2=xp2)
2339 parent2=xp2)
2345 # set the new commit is proper phase
2340 # set the new commit is proper phase
2346 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2341 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2347 if targetphase:
2342 if targetphase:
2348 # retract boundary do not alter parent changeset.
2343 # retract boundary do not alter parent changeset.
2349 # if a parent have higher the resulting phase will
2344 # if a parent have higher the resulting phase will
2350 # be compliant anyway
2345 # be compliant anyway
2351 #
2346 #
2352 # if minimal phase was 0 we don't need to retract anything
2347 # if minimal phase was 0 we don't need to retract anything
2353 phases.registernew(self, tr, targetphase, [n])
2348 phases.registernew(self, tr, targetphase, [n])
2354 tr.close()
2349 tr.close()
2355 return n
2350 return n
2356 finally:
2351 finally:
2357 if tr:
2352 if tr:
2358 tr.release()
2353 tr.release()
2359 lock.release()
2354 lock.release()
2360
2355
2361 @unfilteredmethod
2356 @unfilteredmethod
2362 def destroying(self):
2357 def destroying(self):
2363 '''Inform the repository that nodes are about to be destroyed.
2358 '''Inform the repository that nodes are about to be destroyed.
2364 Intended for use by strip and rollback, so there's a common
2359 Intended for use by strip and rollback, so there's a common
2365 place for anything that has to be done before destroying history.
2360 place for anything that has to be done before destroying history.
2366
2361
2367 This is mostly useful for saving state that is in memory and waiting
2362 This is mostly useful for saving state that is in memory and waiting
2368 to be flushed when the current lock is released. Because a call to
2363 to be flushed when the current lock is released. Because a call to
2369 destroyed is imminent, the repo will be invalidated causing those
2364 destroyed is imminent, the repo will be invalidated causing those
2370 changes to stay in memory (waiting for the next unlock), or vanish
2365 changes to stay in memory (waiting for the next unlock), or vanish
2371 completely.
2366 completely.
2372 '''
2367 '''
2373 # When using the same lock to commit and strip, the phasecache is left
2368 # When using the same lock to commit and strip, the phasecache is left
2374 # dirty after committing. Then when we strip, the repo is invalidated,
2369 # dirty after committing. Then when we strip, the repo is invalidated,
2375 # causing those changes to disappear.
2370 # causing those changes to disappear.
2376 if '_phasecache' in vars(self):
2371 if '_phasecache' in vars(self):
2377 self._phasecache.write()
2372 self._phasecache.write()
2378
2373
2379 @unfilteredmethod
2374 @unfilteredmethod
2380 def destroyed(self):
2375 def destroyed(self):
2381 '''Inform the repository that nodes have been destroyed.
2376 '''Inform the repository that nodes have been destroyed.
2382 Intended for use by strip and rollback, so there's a common
2377 Intended for use by strip and rollback, so there's a common
2383 place for anything that has to be done after destroying history.
2378 place for anything that has to be done after destroying history.
2384 '''
2379 '''
2385 # When one tries to:
2380 # When one tries to:
2386 # 1) destroy nodes thus calling this method (e.g. strip)
2381 # 1) destroy nodes thus calling this method (e.g. strip)
2387 # 2) use phasecache somewhere (e.g. commit)
2382 # 2) use phasecache somewhere (e.g. commit)
2388 #
2383 #
2389 # then 2) will fail because the phasecache contains nodes that were
2384 # then 2) will fail because the phasecache contains nodes that were
2390 # removed. We can either remove phasecache from the filecache,
2385 # removed. We can either remove phasecache from the filecache,
2391 # causing it to reload next time it is accessed, or simply filter
2386 # causing it to reload next time it is accessed, or simply filter
2392 # the removed nodes now and write the updated cache.
2387 # the removed nodes now and write the updated cache.
2393 self._phasecache.filterunknown(self)
2388 self._phasecache.filterunknown(self)
2394 self._phasecache.write()
2389 self._phasecache.write()
2395
2390
2396 # refresh all repository caches
2391 # refresh all repository caches
2397 self.updatecaches()
2392 self.updatecaches()
2398
2393
2399 # Ensure the persistent tag cache is updated. Doing it now
2394 # Ensure the persistent tag cache is updated. Doing it now
2400 # means that the tag cache only has to worry about destroyed
2395 # means that the tag cache only has to worry about destroyed
2401 # heads immediately after a strip/rollback. That in turn
2396 # heads immediately after a strip/rollback. That in turn
2402 # guarantees that "cachetip == currenttip" (comparing both rev
2397 # guarantees that "cachetip == currenttip" (comparing both rev
2403 # and node) always means no nodes have been added or destroyed.
2398 # and node) always means no nodes have been added or destroyed.
2404
2399
2405 # XXX this is suboptimal when qrefresh'ing: we strip the current
2400 # XXX this is suboptimal when qrefresh'ing: we strip the current
2406 # head, refresh the tag cache, then immediately add a new head.
2401 # head, refresh the tag cache, then immediately add a new head.
2407 # But I think doing it this way is necessary for the "instant
2402 # But I think doing it this way is necessary for the "instant
2408 # tag cache retrieval" case to work.
2403 # tag cache retrieval" case to work.
2409 self.invalidate()
2404 self.invalidate()
2410
2405
2411 def status(self, node1='.', node2=None, match=None,
2406 def status(self, node1='.', node2=None, match=None,
2412 ignored=False, clean=False, unknown=False,
2407 ignored=False, clean=False, unknown=False,
2413 listsubrepos=False):
2408 listsubrepos=False):
2414 '''a convenience method that calls node1.status(node2)'''
2409 '''a convenience method that calls node1.status(node2)'''
2415 return self[node1].status(node2, match, ignored, clean, unknown,
2410 return self[node1].status(node2, match, ignored, clean, unknown,
2416 listsubrepos)
2411 listsubrepos)
2417
2412
2418 def addpostdsstatus(self, ps):
2413 def addpostdsstatus(self, ps):
2419 """Add a callback to run within the wlock, at the point at which status
2414 """Add a callback to run within the wlock, at the point at which status
2420 fixups happen.
2415 fixups happen.
2421
2416
2422 On status completion, callback(wctx, status) will be called with the
2417 On status completion, callback(wctx, status) will be called with the
2423 wlock held, unless the dirstate has changed from underneath or the wlock
2418 wlock held, unless the dirstate has changed from underneath or the wlock
2424 couldn't be grabbed.
2419 couldn't be grabbed.
2425
2420
2426 Callbacks should not capture and use a cached copy of the dirstate --
2421 Callbacks should not capture and use a cached copy of the dirstate --
2427 it might change in the meanwhile. Instead, they should access the
2422 it might change in the meanwhile. Instead, they should access the
2428 dirstate via wctx.repo().dirstate.
2423 dirstate via wctx.repo().dirstate.
2429
2424
2430 This list is emptied out after each status run -- extensions should
2425 This list is emptied out after each status run -- extensions should
2431 make sure it adds to this list each time dirstate.status is called.
2426 make sure it adds to this list each time dirstate.status is called.
2432 Extensions should also make sure they don't call this for statuses
2427 Extensions should also make sure they don't call this for statuses
2433 that don't involve the dirstate.
2428 that don't involve the dirstate.
2434 """
2429 """
2435
2430
2436 # The list is located here for uniqueness reasons -- it is actually
2431 # The list is located here for uniqueness reasons -- it is actually
2437 # managed by the workingctx, but that isn't unique per-repo.
2432 # managed by the workingctx, but that isn't unique per-repo.
2438 self._postdsstatus.append(ps)
2433 self._postdsstatus.append(ps)
2439
2434
2440 def postdsstatus(self):
2435 def postdsstatus(self):
2441 """Used by workingctx to get the list of post-dirstate-status hooks."""
2436 """Used by workingctx to get the list of post-dirstate-status hooks."""
2442 return self._postdsstatus
2437 return self._postdsstatus
2443
2438
2444 def clearpostdsstatus(self):
2439 def clearpostdsstatus(self):
2445 """Used by workingctx to clear post-dirstate-status hooks."""
2440 """Used by workingctx to clear post-dirstate-status hooks."""
2446 del self._postdsstatus[:]
2441 del self._postdsstatus[:]
2447
2442
2448 def heads(self, start=None):
2443 def heads(self, start=None):
2449 if start is None:
2444 if start is None:
2450 cl = self.changelog
2445 cl = self.changelog
2451 headrevs = reversed(cl.headrevs())
2446 headrevs = reversed(cl.headrevs())
2452 return [cl.node(rev) for rev in headrevs]
2447 return [cl.node(rev) for rev in headrevs]
2453
2448
2454 heads = self.changelog.heads(start)
2449 heads = self.changelog.heads(start)
2455 # sort the output in rev descending order
2450 # sort the output in rev descending order
2456 return sorted(heads, key=self.changelog.rev, reverse=True)
2451 return sorted(heads, key=self.changelog.rev, reverse=True)
2457
2452
2458 def branchheads(self, branch=None, start=None, closed=False):
2453 def branchheads(self, branch=None, start=None, closed=False):
2459 '''return a (possibly filtered) list of heads for the given branch
2454 '''return a (possibly filtered) list of heads for the given branch
2460
2455
2461 Heads are returned in topological order, from newest to oldest.
2456 Heads are returned in topological order, from newest to oldest.
2462 If branch is None, use the dirstate branch.
2457 If branch is None, use the dirstate branch.
2463 If start is not None, return only heads reachable from start.
2458 If start is not None, return only heads reachable from start.
2464 If closed is True, return heads that are marked as closed as well.
2459 If closed is True, return heads that are marked as closed as well.
2465 '''
2460 '''
2466 if branch is None:
2461 if branch is None:
2467 branch = self[None].branch()
2462 branch = self[None].branch()
2468 branches = self.branchmap()
2463 branches = self.branchmap()
2469 if branch not in branches:
2464 if branch not in branches:
2470 return []
2465 return []
2471 # the cache returns heads ordered lowest to highest
2466 # the cache returns heads ordered lowest to highest
2472 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2473 if start is not None:
2468 if start is not None:
2474 # filter out the heads that cannot be reached from startrev
2469 # filter out the heads that cannot be reached from startrev
2475 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2476 bheads = [h for h in bheads if h in fbheads]
2471 bheads = [h for h in bheads if h in fbheads]
2477 return bheads
2472 return bheads
2478
2473
2479 def branches(self, nodes):
2474 def branches(self, nodes):
2480 if not nodes:
2475 if not nodes:
2481 nodes = [self.changelog.tip()]
2476 nodes = [self.changelog.tip()]
2482 b = []
2477 b = []
2483 for n in nodes:
2478 for n in nodes:
2484 t = n
2479 t = n
2485 while True:
2480 while True:
2486 p = self.changelog.parents(n)
2481 p = self.changelog.parents(n)
2487 if p[1] != nullid or p[0] == nullid:
2482 if p[1] != nullid or p[0] == nullid:
2488 b.append((t, n, p[0], p[1]))
2483 b.append((t, n, p[0], p[1]))
2489 break
2484 break
2490 n = p[0]
2485 n = p[0]
2491 return b
2486 return b
2492
2487
2493 def between(self, pairs):
2488 def between(self, pairs):
2494 r = []
2489 r = []
2495
2490
2496 for top, bottom in pairs:
2491 for top, bottom in pairs:
2497 n, l, i = top, [], 0
2492 n, l, i = top, [], 0
2498 f = 1
2493 f = 1
2499
2494
2500 while n != bottom and n != nullid:
2495 while n != bottom and n != nullid:
2501 p = self.changelog.parents(n)[0]
2496 p = self.changelog.parents(n)[0]
2502 if i == f:
2497 if i == f:
2503 l.append(n)
2498 l.append(n)
2504 f = f * 2
2499 f = f * 2
2505 n = p
2500 n = p
2506 i += 1
2501 i += 1
2507
2502
2508 r.append(l)
2503 r.append(l)
2509
2504
2510 return r
2505 return r
2511
2506
2512 def checkpush(self, pushop):
2507 def checkpush(self, pushop):
2513 """Extensions can override this function if additional checks have
2508 """Extensions can override this function if additional checks have
2514 to be performed before pushing, or call it if they override push
2509 to be performed before pushing, or call it if they override push
2515 command.
2510 command.
2516 """
2511 """
2517
2512
2518 @unfilteredpropertycache
2513 @unfilteredpropertycache
2519 def prepushoutgoinghooks(self):
2514 def prepushoutgoinghooks(self):
2520 """Return util.hooks consists of a pushop with repo, remote, outgoing
2515 """Return util.hooks consists of a pushop with repo, remote, outgoing
2521 methods, which are called before pushing changesets.
2516 methods, which are called before pushing changesets.
2522 """
2517 """
2523 return util.hooks()
2518 return util.hooks()
2524
2519
2525 def pushkey(self, namespace, key, old, new):
2520 def pushkey(self, namespace, key, old, new):
2526 try:
2521 try:
2527 tr = self.currenttransaction()
2522 tr = self.currenttransaction()
2528 hookargs = {}
2523 hookargs = {}
2529 if tr is not None:
2524 if tr is not None:
2530 hookargs.update(tr.hookargs)
2525 hookargs.update(tr.hookargs)
2531 hookargs = pycompat.strkwargs(hookargs)
2526 hookargs = pycompat.strkwargs(hookargs)
2532 hookargs[r'namespace'] = namespace
2527 hookargs[r'namespace'] = namespace
2533 hookargs[r'key'] = key
2528 hookargs[r'key'] = key
2534 hookargs[r'old'] = old
2529 hookargs[r'old'] = old
2535 hookargs[r'new'] = new
2530 hookargs[r'new'] = new
2536 self.hook('prepushkey', throw=True, **hookargs)
2531 self.hook('prepushkey', throw=True, **hookargs)
2537 except error.HookAbort as exc:
2532 except error.HookAbort as exc:
2538 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2533 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2539 if exc.hint:
2534 if exc.hint:
2540 self.ui.write_err(_("(%s)\n") % exc.hint)
2535 self.ui.write_err(_("(%s)\n") % exc.hint)
2541 return False
2536 return False
2542 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2537 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2543 ret = pushkey.push(self, namespace, key, old, new)
2538 ret = pushkey.push(self, namespace, key, old, new)
2544 def runhook():
2539 def runhook():
2545 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2540 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2546 ret=ret)
2541 ret=ret)
2547 self._afterlock(runhook)
2542 self._afterlock(runhook)
2548 return ret
2543 return ret
2549
2544
2550 def listkeys(self, namespace):
2545 def listkeys(self, namespace):
2551 self.hook('prelistkeys', throw=True, namespace=namespace)
2546 self.hook('prelistkeys', throw=True, namespace=namespace)
2552 self.ui.debug('listing keys for "%s"\n' % namespace)
2547 self.ui.debug('listing keys for "%s"\n' % namespace)
2553 values = pushkey.list(self, namespace)
2548 values = pushkey.list(self, namespace)
2554 self.hook('listkeys', namespace=namespace, values=values)
2549 self.hook('listkeys', namespace=namespace, values=values)
2555 return values
2550 return values
2556
2551
2557 def debugwireargs(self, one, two, three=None, four=None, five=None):
2552 def debugwireargs(self, one, two, three=None, four=None, five=None):
2558 '''used to test argument passing over the wire'''
2553 '''used to test argument passing over the wire'''
2559 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2554 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2560 pycompat.bytestr(four),
2555 pycompat.bytestr(four),
2561 pycompat.bytestr(five))
2556 pycompat.bytestr(five))
2562
2557
2563 def savecommitmessage(self, text):
2558 def savecommitmessage(self, text):
2564 fp = self.vfs('last-message.txt', 'wb')
2559 fp = self.vfs('last-message.txt', 'wb')
2565 try:
2560 try:
2566 fp.write(text)
2561 fp.write(text)
2567 finally:
2562 finally:
2568 fp.close()
2563 fp.close()
2569 return self.pathto(fp.name[len(self.root) + 1:])
2564 return self.pathto(fp.name[len(self.root) + 1:])
2570
2565
2571 # used to avoid circular references so destructors work
2566 # used to avoid circular references so destructors work
2572 def aftertrans(files):
2567 def aftertrans(files):
2573 renamefiles = [tuple(t) for t in files]
2568 renamefiles = [tuple(t) for t in files]
2574 def a():
2569 def a():
2575 for vfs, src, dest in renamefiles:
2570 for vfs, src, dest in renamefiles:
2576 # if src and dest refer to a same file, vfs.rename is a no-op,
2571 # if src and dest refer to a same file, vfs.rename is a no-op,
2577 # leaving both src and dest on disk. delete dest to make sure
2572 # leaving both src and dest on disk. delete dest to make sure
2578 # the rename couldn't be such a no-op.
2573 # the rename couldn't be such a no-op.
2579 vfs.tryunlink(dest)
2574 vfs.tryunlink(dest)
2580 try:
2575 try:
2581 vfs.rename(src, dest)
2576 vfs.rename(src, dest)
2582 except OSError: # journal file does not yet exist
2577 except OSError: # journal file does not yet exist
2583 pass
2578 pass
2584 return a
2579 return a
2585
2580
2586 def undoname(fn):
2581 def undoname(fn):
2587 base, name = os.path.split(fn)
2582 base, name = os.path.split(fn)
2588 assert name.startswith('journal')
2583 assert name.startswith('journal')
2589 return os.path.join(base, name.replace('journal', 'undo', 1))
2584 return os.path.join(base, name.replace('journal', 'undo', 1))
2590
2585
2591 def instance(ui, path, create, intents=None, createopts=None):
2586 def instance(ui, path, create, intents=None, createopts=None):
2592 localpath = util.urllocalpath(path)
2587 localpath = util.urllocalpath(path)
2593 if create:
2588 if create:
2594 createrepository(ui, localpath, createopts=createopts)
2589 createrepository(ui, localpath, createopts=createopts)
2595
2590
2596 return makelocalrepository(ui, localpath, intents=intents)
2591 return makelocalrepository(ui, localpath, intents=intents)
2597
2592
2598 def islocal(path):
2593 def islocal(path):
2599 return True
2594 return True
2600
2595
2601 def newreporequirements(ui, createopts=None):
2596 def newreporequirements(ui, createopts=None):
2602 """Determine the set of requirements for a new local repository.
2597 """Determine the set of requirements for a new local repository.
2603
2598
2604 Extensions can wrap this function to specify custom requirements for
2599 Extensions can wrap this function to specify custom requirements for
2605 new repositories.
2600 new repositories.
2606 """
2601 """
2607 createopts = createopts or {}
2602 createopts = createopts or {}
2608
2603
2609 requirements = {'revlogv1'}
2604 requirements = {'revlogv1'}
2610 if ui.configbool('format', 'usestore'):
2605 if ui.configbool('format', 'usestore'):
2611 requirements.add('store')
2606 requirements.add('store')
2612 if ui.configbool('format', 'usefncache'):
2607 if ui.configbool('format', 'usefncache'):
2613 requirements.add('fncache')
2608 requirements.add('fncache')
2614 if ui.configbool('format', 'dotencode'):
2609 if ui.configbool('format', 'dotencode'):
2615 requirements.add('dotencode')
2610 requirements.add('dotencode')
2616
2611
2617 compengine = ui.config('experimental', 'format.compression')
2612 compengine = ui.config('experimental', 'format.compression')
2618 if compengine not in util.compengines:
2613 if compengine not in util.compengines:
2619 raise error.Abort(_('compression engine %s defined by '
2614 raise error.Abort(_('compression engine %s defined by '
2620 'experimental.format.compression not available') %
2615 'experimental.format.compression not available') %
2621 compengine,
2616 compengine,
2622 hint=_('run "hg debuginstall" to list available '
2617 hint=_('run "hg debuginstall" to list available '
2623 'compression engines'))
2618 'compression engines'))
2624
2619
2625 # zlib is the historical default and doesn't need an explicit requirement.
2620 # zlib is the historical default and doesn't need an explicit requirement.
2626 if compengine != 'zlib':
2621 if compengine != 'zlib':
2627 requirements.add('exp-compression-%s' % compengine)
2622 requirements.add('exp-compression-%s' % compengine)
2628
2623
2629 if scmutil.gdinitconfig(ui):
2624 if scmutil.gdinitconfig(ui):
2630 requirements.add('generaldelta')
2625 requirements.add('generaldelta')
2631 if ui.configbool('experimental', 'treemanifest'):
2626 if ui.configbool('experimental', 'treemanifest'):
2632 requirements.add('treemanifest')
2627 requirements.add('treemanifest')
2633 # experimental config: format.sparse-revlog
2628 # experimental config: format.sparse-revlog
2634 if ui.configbool('format', 'sparse-revlog'):
2629 if ui.configbool('format', 'sparse-revlog'):
2635 requirements.add(SPARSEREVLOG_REQUIREMENT)
2630 requirements.add(SPARSEREVLOG_REQUIREMENT)
2636
2631
2637 revlogv2 = ui.config('experimental', 'revlogv2')
2632 revlogv2 = ui.config('experimental', 'revlogv2')
2638 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2633 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2639 requirements.remove('revlogv1')
2634 requirements.remove('revlogv1')
2640 # generaldelta is implied by revlogv2.
2635 # generaldelta is implied by revlogv2.
2641 requirements.discard('generaldelta')
2636 requirements.discard('generaldelta')
2642 requirements.add(REVLOGV2_REQUIREMENT)
2637 requirements.add(REVLOGV2_REQUIREMENT)
2643 # experimental config: format.internal-phase
2638 # experimental config: format.internal-phase
2644 if ui.configbool('format', 'internal-phase'):
2639 if ui.configbool('format', 'internal-phase'):
2645 requirements.add('internal-phase')
2640 requirements.add('internal-phase')
2646
2641
2647 if createopts.get('narrowfiles'):
2642 if createopts.get('narrowfiles'):
2648 requirements.add(repository.NARROW_REQUIREMENT)
2643 requirements.add(repository.NARROW_REQUIREMENT)
2649
2644
2650 return requirements
2645 return requirements
2651
2646
2652 def filterknowncreateopts(ui, createopts):
2647 def filterknowncreateopts(ui, createopts):
2653 """Filters a dict of repo creation options against options that are known.
2648 """Filters a dict of repo creation options against options that are known.
2654
2649
2655 Receives a dict of repo creation options and returns a dict of those
2650 Receives a dict of repo creation options and returns a dict of those
2656 options that we don't know how to handle.
2651 options that we don't know how to handle.
2657
2652
2658 This function is called as part of repository creation. If the
2653 This function is called as part of repository creation. If the
2659 returned dict contains any items, repository creation will not
2654 returned dict contains any items, repository creation will not
2660 be allowed, as it means there was a request to create a repository
2655 be allowed, as it means there was a request to create a repository
2661 with options not recognized by loaded code.
2656 with options not recognized by loaded code.
2662
2657
2663 Extensions can wrap this function to filter out creation options
2658 Extensions can wrap this function to filter out creation options
2664 they know how to handle.
2659 they know how to handle.
2665 """
2660 """
2666 known = {'narrowfiles'}
2661 known = {'narrowfiles'}
2667
2662
2668 return {k: v for k, v in createopts.items() if k not in known}
2663 return {k: v for k, v in createopts.items() if k not in known}
2669
2664
2670 def createrepository(ui, path, createopts=None):
2665 def createrepository(ui, path, createopts=None):
2671 """Create a new repository in a vfs.
2666 """Create a new repository in a vfs.
2672
2667
2673 ``path`` path to the new repo's working directory.
2668 ``path`` path to the new repo's working directory.
2674 ``createopts`` options for the new repository.
2669 ``createopts`` options for the new repository.
2675 """
2670 """
2676 createopts = createopts or {}
2671 createopts = createopts or {}
2677
2672
2678 unknownopts = filterknowncreateopts(ui, createopts)
2673 unknownopts = filterknowncreateopts(ui, createopts)
2679
2674
2680 if not isinstance(unknownopts, dict):
2675 if not isinstance(unknownopts, dict):
2681 raise error.ProgrammingError('filterknowncreateopts() did not return '
2676 raise error.ProgrammingError('filterknowncreateopts() did not return '
2682 'a dict')
2677 'a dict')
2683
2678
2684 if unknownopts:
2679 if unknownopts:
2685 raise error.Abort(_('unable to create repository because of unknown '
2680 raise error.Abort(_('unable to create repository because of unknown '
2686 'creation option: %s') %
2681 'creation option: %s') %
2687 ', '.sorted(unknownopts),
2682 ', '.sorted(unknownopts),
2688 hint=_('is a required extension not loaded?'))
2683 hint=_('is a required extension not loaded?'))
2689
2684
2690 requirements = newreporequirements(ui, createopts=createopts)
2685 requirements = newreporequirements(ui, createopts=createopts)
2691
2686
2692 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2687 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2693 if not wdirvfs.exists():
2688 if not wdirvfs.exists():
2694 wdirvfs.makedirs()
2689 wdirvfs.makedirs()
2695
2690
2696 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2691 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2697 if hgvfs.exists():
2692 if hgvfs.exists():
2698 raise error.RepoError(_('repository %s already exists') % path)
2693 raise error.RepoError(_('repository %s already exists') % path)
2699
2694
2700 hgvfs.makedir(notindexed=True)
2695 hgvfs.makedir(notindexed=True)
2701
2696
2702 if b'store' in requirements:
2697 if b'store' in requirements:
2703 hgvfs.mkdir(b'store')
2698 hgvfs.mkdir(b'store')
2704
2699
2705 # We create an invalid changelog outside the store so very old
2700 # We create an invalid changelog outside the store so very old
2706 # Mercurial versions (which didn't know about the requirements
2701 # Mercurial versions (which didn't know about the requirements
2707 # file) encounter an error on reading the changelog. This
2702 # file) encounter an error on reading the changelog. This
2708 # effectively locks out old clients and prevents them from
2703 # effectively locks out old clients and prevents them from
2709 # mucking with a repo in an unknown format.
2704 # mucking with a repo in an unknown format.
2710 #
2705 #
2711 # The revlog header has version 2, which won't be recognized by
2706 # The revlog header has version 2, which won't be recognized by
2712 # such old clients.
2707 # such old clients.
2713 hgvfs.append(b'00changelog.i',
2708 hgvfs.append(b'00changelog.i',
2714 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2709 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2715 b'layout')
2710 b'layout')
2716
2711
2717 scmutil.writerequires(hgvfs, requirements)
2712 scmutil.writerequires(hgvfs, requirements)
2718
2713
2719 def poisonrepository(repo):
2714 def poisonrepository(repo):
2720 """Poison a repository instance so it can no longer be used."""
2715 """Poison a repository instance so it can no longer be used."""
2721 # Perform any cleanup on the instance.
2716 # Perform any cleanup on the instance.
2722 repo.close()
2717 repo.close()
2723
2718
2724 # Our strategy is to replace the type of the object with one that
2719 # Our strategy is to replace the type of the object with one that
2725 # has all attribute lookups result in error.
2720 # has all attribute lookups result in error.
2726 #
2721 #
2727 # But we have to allow the close() method because some constructors
2722 # But we have to allow the close() method because some constructors
2728 # of repos call close() on repo references.
2723 # of repos call close() on repo references.
2729 class poisonedrepository(object):
2724 class poisonedrepository(object):
2730 def __getattribute__(self, item):
2725 def __getattribute__(self, item):
2731 if item == r'close':
2726 if item == r'close':
2732 return object.__getattribute__(self, item)
2727 return object.__getattribute__(self, item)
2733
2728
2734 raise error.ProgrammingError('repo instances should not be used '
2729 raise error.ProgrammingError('repo instances should not be used '
2735 'after unshare')
2730 'after unshare')
2736
2731
2737 def close(self):
2732 def close(self):
2738 pass
2733 pass
2739
2734
2740 # We may have a repoview, which intercepts __setattr__. So be sure
2735 # We may have a repoview, which intercepts __setattr__. So be sure
2741 # we operate at the lowest level possible.
2736 # we operate at the lowest level possible.
2742 object.__setattr__(repo, r'__class__', poisonedrepository)
2737 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,2017 +1,2017 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 error,
23 error,
24 mdiff,
24 mdiff,
25 policy,
25 policy,
26 pycompat,
26 pycompat,
27 repository,
27 repository,
28 revlog,
28 revlog,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 interfaceutil,
32 interfaceutil,
33 )
33 )
34
34
35 parsers = policy.importmod(r'parsers')
35 parsers = policy.importmod(r'parsers')
36 propertycache = util.propertycache
36 propertycache = util.propertycache
37
37
38 def _parse(data):
38 def _parse(data):
39 # This method does a little bit of excessive-looking
39 # This method does a little bit of excessive-looking
40 # precondition checking. This is so that the behavior of this
40 # precondition checking. This is so that the behavior of this
41 # class exactly matches its C counterpart to try and help
41 # class exactly matches its C counterpart to try and help
42 # prevent surprise breakage for anyone that develops against
42 # prevent surprise breakage for anyone that develops against
43 # the pure version.
43 # the pure version.
44 if data and data[-1:] != '\n':
44 if data and data[-1:] != '\n':
45 raise ValueError('Manifest did not end in a newline.')
45 raise ValueError('Manifest did not end in a newline.')
46 prev = None
46 prev = None
47 for l in data.splitlines():
47 for l in data.splitlines():
48 if prev is not None and prev > l:
48 if prev is not None and prev > l:
49 raise ValueError('Manifest lines not in sorted order.')
49 raise ValueError('Manifest lines not in sorted order.')
50 prev = l
50 prev = l
51 f, n = l.split('\0')
51 f, n = l.split('\0')
52 if len(n) > 40:
52 if len(n) > 40:
53 yield f, bin(n[:40]), n[40:]
53 yield f, bin(n[:40]), n[40:]
54 else:
54 else:
55 yield f, bin(n), ''
55 yield f, bin(n), ''
56
56
57 def _text(it):
57 def _text(it):
58 files = []
58 files = []
59 lines = []
59 lines = []
60 for f, n, fl in it:
60 for f, n, fl in it:
61 files.append(f)
61 files.append(f)
62 # if this is changed to support newlines in filenames,
62 # if this is changed to support newlines in filenames,
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65
65
66 _checkforbidden(files)
66 _checkforbidden(files)
67 return ''.join(lines)
67 return ''.join(lines)
68
68
69 class lazymanifestiter(object):
69 class lazymanifestiter(object):
70 def __init__(self, lm):
70 def __init__(self, lm):
71 self.pos = 0
71 self.pos = 0
72 self.lm = lm
72 self.lm = lm
73
73
74 def __iter__(self):
74 def __iter__(self):
75 return self
75 return self
76
76
77 def next(self):
77 def next(self):
78 try:
78 try:
79 data, pos = self.lm._get(self.pos)
79 data, pos = self.lm._get(self.pos)
80 except IndexError:
80 except IndexError:
81 raise StopIteration
81 raise StopIteration
82 if pos == -1:
82 if pos == -1:
83 self.pos += 1
83 self.pos += 1
84 return data[0]
84 return data[0]
85 self.pos += 1
85 self.pos += 1
86 zeropos = data.find('\x00', pos)
86 zeropos = data.find('\x00', pos)
87 return data[pos:zeropos]
87 return data[pos:zeropos]
88
88
89 __next__ = next
89 __next__ = next
90
90
91 class lazymanifestiterentries(object):
91 class lazymanifestiterentries(object):
92 def __init__(self, lm):
92 def __init__(self, lm):
93 self.lm = lm
93 self.lm = lm
94 self.pos = 0
94 self.pos = 0
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return self
97 return self
98
98
99 def next(self):
99 def next(self):
100 try:
100 try:
101 data, pos = self.lm._get(self.pos)
101 data, pos = self.lm._get(self.pos)
102 except IndexError:
102 except IndexError:
103 raise StopIteration
103 raise StopIteration
104 if pos == -1:
104 if pos == -1:
105 self.pos += 1
105 self.pos += 1
106 return data
106 return data
107 zeropos = data.find('\x00', pos)
107 zeropos = data.find('\x00', pos)
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 zeropos + 1, 40)
109 zeropos + 1, 40)
110 flags = self.lm._getflags(data, self.pos, zeropos)
110 flags = self.lm._getflags(data, self.pos, zeropos)
111 self.pos += 1
111 self.pos += 1
112 return (data[pos:zeropos], hashval, flags)
112 return (data[pos:zeropos], hashval, flags)
113
113
114 __next__ = next
114 __next__ = next
115
115
116 def unhexlify(data, extra, pos, length):
116 def unhexlify(data, extra, pos, length):
117 s = bin(data[pos:pos + length])
117 s = bin(data[pos:pos + length])
118 if extra:
118 if extra:
119 s += chr(extra & 0xff)
119 s += chr(extra & 0xff)
120 return s
120 return s
121
121
122 def _cmp(a, b):
122 def _cmp(a, b):
123 return (a > b) - (a < b)
123 return (a > b) - (a < b)
124
124
125 class _lazymanifest(object):
125 class _lazymanifest(object):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 if positions is None:
127 if positions is None:
128 self.positions = self.findlines(data)
128 self.positions = self.findlines(data)
129 self.extrainfo = [0] * len(self.positions)
129 self.extrainfo = [0] * len(self.positions)
130 self.data = data
130 self.data = data
131 self.extradata = []
131 self.extradata = []
132 else:
132 else:
133 self.positions = positions[:]
133 self.positions = positions[:]
134 self.extrainfo = extrainfo[:]
134 self.extrainfo = extrainfo[:]
135 self.extradata = extradata[:]
135 self.extradata = extradata[:]
136 self.data = data
136 self.data = data
137
137
138 def findlines(self, data):
138 def findlines(self, data):
139 if not data:
139 if not data:
140 return []
140 return []
141 pos = data.find("\n")
141 pos = data.find("\n")
142 if pos == -1 or data[-1:] != '\n':
142 if pos == -1 or data[-1:] != '\n':
143 raise ValueError("Manifest did not end in a newline.")
143 raise ValueError("Manifest did not end in a newline.")
144 positions = [0]
144 positions = [0]
145 prev = data[:data.find('\x00')]
145 prev = data[:data.find('\x00')]
146 while pos < len(data) - 1 and pos != -1:
146 while pos < len(data) - 1 and pos != -1:
147 positions.append(pos + 1)
147 positions.append(pos + 1)
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 if nexts < prev:
149 if nexts < prev:
150 raise ValueError("Manifest lines not in sorted order.")
150 raise ValueError("Manifest lines not in sorted order.")
151 prev = nexts
151 prev = nexts
152 pos = data.find("\n", pos + 1)
152 pos = data.find("\n", pos + 1)
153 return positions
153 return positions
154
154
155 def _get(self, index):
155 def _get(self, index):
156 # get the position encoded in pos:
156 # get the position encoded in pos:
157 # positive number is an index in 'data'
157 # positive number is an index in 'data'
158 # negative number is in extrapieces
158 # negative number is in extrapieces
159 pos = self.positions[index]
159 pos = self.positions[index]
160 if pos >= 0:
160 if pos >= 0:
161 return self.data, pos
161 return self.data, pos
162 return self.extradata[-pos - 1], -1
162 return self.extradata[-pos - 1], -1
163
163
164 def _getkey(self, pos):
164 def _getkey(self, pos):
165 if pos >= 0:
165 if pos >= 0:
166 return self.data[pos:self.data.find('\x00', pos + 1)]
166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 return self.extradata[-pos - 1][0]
167 return self.extradata[-pos - 1][0]
168
168
169 def bsearch(self, key):
169 def bsearch(self, key):
170 first = 0
170 first = 0
171 last = len(self.positions) - 1
171 last = len(self.positions) - 1
172
172
173 while first <= last:
173 while first <= last:
174 midpoint = (first + last)//2
174 midpoint = (first + last)//2
175 nextpos = self.positions[midpoint]
175 nextpos = self.positions[midpoint]
176 candidate = self._getkey(nextpos)
176 candidate = self._getkey(nextpos)
177 r = _cmp(key, candidate)
177 r = _cmp(key, candidate)
178 if r == 0:
178 if r == 0:
179 return midpoint
179 return midpoint
180 else:
180 else:
181 if r < 0:
181 if r < 0:
182 last = midpoint - 1
182 last = midpoint - 1
183 else:
183 else:
184 first = midpoint + 1
184 first = midpoint + 1
185 return -1
185 return -1
186
186
187 def bsearch2(self, key):
187 def bsearch2(self, key):
188 # same as the above, but will always return the position
188 # same as the above, but will always return the position
189 # done for performance reasons
189 # done for performance reasons
190 first = 0
190 first = 0
191 last = len(self.positions) - 1
191 last = len(self.positions) - 1
192
192
193 while first <= last:
193 while first <= last:
194 midpoint = (first + last)//2
194 midpoint = (first + last)//2
195 nextpos = self.positions[midpoint]
195 nextpos = self.positions[midpoint]
196 candidate = self._getkey(nextpos)
196 candidate = self._getkey(nextpos)
197 r = _cmp(key, candidate)
197 r = _cmp(key, candidate)
198 if r == 0:
198 if r == 0:
199 return (midpoint, True)
199 return (midpoint, True)
200 else:
200 else:
201 if r < 0:
201 if r < 0:
202 last = midpoint - 1
202 last = midpoint - 1
203 else:
203 else:
204 first = midpoint + 1
204 first = midpoint + 1
205 return (first, False)
205 return (first, False)
206
206
207 def __contains__(self, key):
207 def __contains__(self, key):
208 return self.bsearch(key) != -1
208 return self.bsearch(key) != -1
209
209
210 def _getflags(self, data, needle, pos):
210 def _getflags(self, data, needle, pos):
211 start = pos + 41
211 start = pos + 41
212 end = data.find("\n", start)
212 end = data.find("\n", start)
213 if end == -1:
213 if end == -1:
214 end = len(data) - 1
214 end = len(data) - 1
215 if start == end:
215 if start == end:
216 return ''
216 return ''
217 return self.data[start:end]
217 return self.data[start:end]
218
218
219 def __getitem__(self, key):
219 def __getitem__(self, key):
220 if not isinstance(key, bytes):
220 if not isinstance(key, bytes):
221 raise TypeError("getitem: manifest keys must be a bytes.")
221 raise TypeError("getitem: manifest keys must be a bytes.")
222 needle = self.bsearch(key)
222 needle = self.bsearch(key)
223 if needle == -1:
223 if needle == -1:
224 raise KeyError
224 raise KeyError
225 data, pos = self._get(needle)
225 data, pos = self._get(needle)
226 if pos == -1:
226 if pos == -1:
227 return (data[1], data[2])
227 return (data[1], data[2])
228 zeropos = data.find('\x00', pos)
228 zeropos = data.find('\x00', pos)
229 assert 0 <= needle <= len(self.positions)
229 assert 0 <= needle <= len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 flags = self._getflags(data, needle, zeropos)
232 flags = self._getflags(data, needle, zeropos)
233 return (hashval, flags)
233 return (hashval, flags)
234
234
235 def __delitem__(self, key):
235 def __delitem__(self, key):
236 needle, found = self.bsearch2(key)
236 needle, found = self.bsearch2(key)
237 if not found:
237 if not found:
238 raise KeyError
238 raise KeyError
239 cur = self.positions[needle]
239 cur = self.positions[needle]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 if cur >= 0:
242 if cur >= 0:
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244
244
245 def __setitem__(self, key, value):
245 def __setitem__(self, key, value):
246 if not isinstance(key, bytes):
246 if not isinstance(key, bytes):
247 raise TypeError("setitem: manifest keys must be a byte string.")
247 raise TypeError("setitem: manifest keys must be a byte string.")
248 if not isinstance(value, tuple) or len(value) != 2:
248 if not isinstance(value, tuple) or len(value) != 2:
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 hashval = value[0]
250 hashval = value[0]
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 raise TypeError("node must be a 20-byte byte string")
252 raise TypeError("node must be a 20-byte byte string")
253 flags = value[1]
253 flags = value[1]
254 if len(hashval) == 22:
254 if len(hashval) == 22:
255 hashval = hashval[:-1]
255 hashval = hashval[:-1]
256 if not isinstance(flags, bytes) or len(flags) > 1:
256 if not isinstance(flags, bytes) or len(flags) > 1:
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 needle, found = self.bsearch2(key)
258 needle, found = self.bsearch2(key)
259 if found:
259 if found:
260 # put the item
260 # put the item
261 pos = self.positions[needle]
261 pos = self.positions[needle]
262 if pos < 0:
262 if pos < 0:
263 self.extradata[-pos - 1] = (key, hashval, value[1])
263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 else:
264 else:
265 # just don't bother
265 # just don't bother
266 self.extradata.append((key, hashval, value[1]))
266 self.extradata.append((key, hashval, value[1]))
267 self.positions[needle] = -len(self.extradata)
267 self.positions[needle] = -len(self.extradata)
268 else:
268 else:
269 # not found, put it in with extra positions
269 # not found, put it in with extra positions
270 self.extradata.append((key, hashval, value[1]))
270 self.extradata.append((key, hashval, value[1]))
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 + self.positions[needle:])
272 + self.positions[needle:])
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 self.extrainfo[needle:])
274 self.extrainfo[needle:])
275
275
276 def copy(self):
276 def copy(self):
277 # XXX call _compact like in C?
277 # XXX call _compact like in C?
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 self.extradata)
279 self.extradata)
280
280
281 def _compact(self):
281 def _compact(self):
282 # hopefully not called TOO often
282 # hopefully not called TOO often
283 if len(self.extradata) == 0:
283 if len(self.extradata) == 0:
284 return
284 return
285 l = []
285 l = []
286 last_cut = 0
286 last_cut = 0
287 i = 0
287 i = 0
288 offset = 0
288 offset = 0
289 self.extrainfo = [0] * len(self.positions)
289 self.extrainfo = [0] * len(self.positions)
290 while i < len(self.positions):
290 while i < len(self.positions):
291 if self.positions[i] >= 0:
291 if self.positions[i] >= 0:
292 cur = self.positions[i]
292 cur = self.positions[i]
293 last_cut = cur
293 last_cut = cur
294 while True:
294 while True:
295 self.positions[i] = offset
295 self.positions[i] = offset
296 i += 1
296 i += 1
297 if i == len(self.positions) or self.positions[i] < 0:
297 if i == len(self.positions) or self.positions[i] < 0:
298 break
298 break
299 offset += self.positions[i] - cur
299 offset += self.positions[i] - cur
300 cur = self.positions[i]
300 cur = self.positions[i]
301 end_cut = self.data.find('\n', cur)
301 end_cut = self.data.find('\n', cur)
302 if end_cut != -1:
302 if end_cut != -1:
303 end_cut += 1
303 end_cut += 1
304 offset += end_cut - cur
304 offset += end_cut - cur
305 l.append(self.data[last_cut:end_cut])
305 l.append(self.data[last_cut:end_cut])
306 else:
306 else:
307 while i < len(self.positions) and self.positions[i] < 0:
307 while i < len(self.positions) and self.positions[i] < 0:
308 cur = self.positions[i]
308 cur = self.positions[i]
309 t = self.extradata[-cur - 1]
309 t = self.extradata[-cur - 1]
310 l.append(self._pack(t))
310 l.append(self._pack(t))
311 self.positions[i] = offset
311 self.positions[i] = offset
312 if len(t[1]) > 20:
312 if len(t[1]) > 20:
313 self.extrainfo[i] = ord(t[1][21])
313 self.extrainfo[i] = ord(t[1][21])
314 offset += len(l[-1])
314 offset += len(l[-1])
315 i += 1
315 i += 1
316 self.data = ''.join(l)
316 self.data = ''.join(l)
317 self.extradata = []
317 self.extradata = []
318
318
319 def _pack(self, d):
319 def _pack(self, d):
320 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
320 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
321
321
322 def text(self):
322 def text(self):
323 self._compact()
323 self._compact()
324 return self.data
324 return self.data
325
325
326 def diff(self, m2, clean=False):
326 def diff(self, m2, clean=False):
327 '''Finds changes between the current manifest and m2.'''
327 '''Finds changes between the current manifest and m2.'''
328 # XXX think whether efficiency matters here
328 # XXX think whether efficiency matters here
329 diff = {}
329 diff = {}
330
330
331 for fn, e1, flags in self.iterentries():
331 for fn, e1, flags in self.iterentries():
332 if fn not in m2:
332 if fn not in m2:
333 diff[fn] = (e1, flags), (None, '')
333 diff[fn] = (e1, flags), (None, '')
334 else:
334 else:
335 e2 = m2[fn]
335 e2 = m2[fn]
336 if (e1, flags) != e2:
336 if (e1, flags) != e2:
337 diff[fn] = (e1, flags), e2
337 diff[fn] = (e1, flags), e2
338 elif clean:
338 elif clean:
339 diff[fn] = None
339 diff[fn] = None
340
340
341 for fn, e2, flags in m2.iterentries():
341 for fn, e2, flags in m2.iterentries():
342 if fn not in self:
342 if fn not in self:
343 diff[fn] = (None, ''), (e2, flags)
343 diff[fn] = (None, ''), (e2, flags)
344
344
345 return diff
345 return diff
346
346
347 def iterentries(self):
347 def iterentries(self):
348 return lazymanifestiterentries(self)
348 return lazymanifestiterentries(self)
349
349
350 def iterkeys(self):
350 def iterkeys(self):
351 return lazymanifestiter(self)
351 return lazymanifestiter(self)
352
352
353 def __iter__(self):
353 def __iter__(self):
354 return lazymanifestiter(self)
354 return lazymanifestiter(self)
355
355
356 def __len__(self):
356 def __len__(self):
357 return len(self.positions)
357 return len(self.positions)
358
358
359 def filtercopy(self, filterfn):
359 def filtercopy(self, filterfn):
360 # XXX should be optimized
360 # XXX should be optimized
361 c = _lazymanifest('')
361 c = _lazymanifest('')
362 for f, n, fl in self.iterentries():
362 for f, n, fl in self.iterentries():
363 if filterfn(f):
363 if filterfn(f):
364 c[f] = n, fl
364 c[f] = n, fl
365 return c
365 return c
366
366
367 try:
367 try:
368 _lazymanifest = parsers.lazymanifest
368 _lazymanifest = parsers.lazymanifest
369 except AttributeError:
369 except AttributeError:
370 pass
370 pass
371
371
372 @interfaceutil.implementer(repository.imanifestdict)
372 @interfaceutil.implementer(repository.imanifestdict)
373 class manifestdict(object):
373 class manifestdict(object):
374 def __init__(self, data=''):
374 def __init__(self, data=''):
375 self._lm = _lazymanifest(data)
375 self._lm = _lazymanifest(data)
376
376
377 def __getitem__(self, key):
377 def __getitem__(self, key):
378 return self._lm[key][0]
378 return self._lm[key][0]
379
379
380 def find(self, key):
380 def find(self, key):
381 return self._lm[key]
381 return self._lm[key]
382
382
383 def __len__(self):
383 def __len__(self):
384 return len(self._lm)
384 return len(self._lm)
385
385
386 def __nonzero__(self):
386 def __nonzero__(self):
387 # nonzero is covered by the __len__ function, but implementing it here
387 # nonzero is covered by the __len__ function, but implementing it here
388 # makes it easier for extensions to override.
388 # makes it easier for extensions to override.
389 return len(self._lm) != 0
389 return len(self._lm) != 0
390
390
391 __bool__ = __nonzero__
391 __bool__ = __nonzero__
392
392
393 def __setitem__(self, key, node):
393 def __setitem__(self, key, node):
394 self._lm[key] = node, self.flags(key, '')
394 self._lm[key] = node, self.flags(key, '')
395
395
396 def __contains__(self, key):
396 def __contains__(self, key):
397 if key is None:
397 if key is None:
398 return False
398 return False
399 return key in self._lm
399 return key in self._lm
400
400
401 def __delitem__(self, key):
401 def __delitem__(self, key):
402 del self._lm[key]
402 del self._lm[key]
403
403
404 def __iter__(self):
404 def __iter__(self):
405 return self._lm.__iter__()
405 return self._lm.__iter__()
406
406
407 def iterkeys(self):
407 def iterkeys(self):
408 return self._lm.iterkeys()
408 return self._lm.iterkeys()
409
409
410 def keys(self):
410 def keys(self):
411 return list(self.iterkeys())
411 return list(self.iterkeys())
412
412
413 def filesnotin(self, m2, match=None):
413 def filesnotin(self, m2, match=None):
414 '''Set of files in this manifest that are not in the other'''
414 '''Set of files in this manifest that are not in the other'''
415 if match:
415 if match:
416 m1 = self.matches(match)
416 m1 = self.matches(match)
417 m2 = m2.matches(match)
417 m2 = m2.matches(match)
418 return m1.filesnotin(m2)
418 return m1.filesnotin(m2)
419 diff = self.diff(m2)
419 diff = self.diff(m2)
420 files = set(filepath
420 files = set(filepath
421 for filepath, hashflags in diff.iteritems()
421 for filepath, hashflags in diff.iteritems()
422 if hashflags[1][0] is None)
422 if hashflags[1][0] is None)
423 return files
423 return files
424
424
425 @propertycache
425 @propertycache
426 def _dirs(self):
426 def _dirs(self):
427 return util.dirs(self)
427 return util.dirs(self)
428
428
429 def dirs(self):
429 def dirs(self):
430 return self._dirs
430 return self._dirs
431
431
432 def hasdir(self, dir):
432 def hasdir(self, dir):
433 return dir in self._dirs
433 return dir in self._dirs
434
434
435 def _filesfastpath(self, match):
435 def _filesfastpath(self, match):
436 '''Checks whether we can correctly and quickly iterate over matcher
436 '''Checks whether we can correctly and quickly iterate over matcher
437 files instead of over manifest files.'''
437 files instead of over manifest files.'''
438 files = match.files()
438 files = match.files()
439 return (len(files) < 100 and (match.isexact() or
439 return (len(files) < 100 and (match.isexact() or
440 (match.prefix() and all(fn in self for fn in files))))
440 (match.prefix() and all(fn in self for fn in files))))
441
441
442 def walk(self, match):
442 def walk(self, match):
443 '''Generates matching file names.
443 '''Generates matching file names.
444
444
445 Equivalent to manifest.matches(match).iterkeys(), but without creating
445 Equivalent to manifest.matches(match).iterkeys(), but without creating
446 an entirely new manifest.
446 an entirely new manifest.
447
447
448 It also reports nonexistent files by marking them bad with match.bad().
448 It also reports nonexistent files by marking them bad with match.bad().
449 '''
449 '''
450 if match.always():
450 if match.always():
451 for f in iter(self):
451 for f in iter(self):
452 yield f
452 yield f
453 return
453 return
454
454
455 fset = set(match.files())
455 fset = set(match.files())
456
456
457 # avoid the entire walk if we're only looking for specific files
457 # avoid the entire walk if we're only looking for specific files
458 if self._filesfastpath(match):
458 if self._filesfastpath(match):
459 for fn in sorted(fset):
459 for fn in sorted(fset):
460 yield fn
460 yield fn
461 return
461 return
462
462
463 for fn in self:
463 for fn in self:
464 if fn in fset:
464 if fn in fset:
465 # specified pattern is the exact name
465 # specified pattern is the exact name
466 fset.remove(fn)
466 fset.remove(fn)
467 if match(fn):
467 if match(fn):
468 yield fn
468 yield fn
469
469
470 # for dirstate.walk, files=['.'] means "walk the whole tree".
470 # for dirstate.walk, files=['.'] means "walk the whole tree".
471 # follow that here, too
471 # follow that here, too
472 fset.discard('.')
472 fset.discard('.')
473
473
474 for fn in sorted(fset):
474 for fn in sorted(fset):
475 if not self.hasdir(fn):
475 if not self.hasdir(fn):
476 match.bad(fn, None)
476 match.bad(fn, None)
477
477
478 def matches(self, match):
478 def matches(self, match):
479 '''generate a new manifest filtered by the match argument'''
479 '''generate a new manifest filtered by the match argument'''
480 if match.always():
480 if match.always():
481 return self.copy()
481 return self.copy()
482
482
483 if self._filesfastpath(match):
483 if self._filesfastpath(match):
484 m = manifestdict()
484 m = manifestdict()
485 lm = self._lm
485 lm = self._lm
486 for fn in match.files():
486 for fn in match.files():
487 if fn in lm:
487 if fn in lm:
488 m._lm[fn] = lm[fn]
488 m._lm[fn] = lm[fn]
489 return m
489 return m
490
490
491 m = manifestdict()
491 m = manifestdict()
492 m._lm = self._lm.filtercopy(match)
492 m._lm = self._lm.filtercopy(match)
493 return m
493 return m
494
494
495 def diff(self, m2, match=None, clean=False):
495 def diff(self, m2, match=None, clean=False):
496 '''Finds changes between the current manifest and m2.
496 '''Finds changes between the current manifest and m2.
497
497
498 Args:
498 Args:
499 m2: the manifest to which this manifest should be compared.
499 m2: the manifest to which this manifest should be compared.
500 clean: if true, include files unchanged between these manifests
500 clean: if true, include files unchanged between these manifests
501 with a None value in the returned dictionary.
501 with a None value in the returned dictionary.
502
502
503 The result is returned as a dict with filename as key and
503 The result is returned as a dict with filename as key and
504 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
504 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
505 nodeid in the current/other manifest and fl1/fl2 is the flag
505 nodeid in the current/other manifest and fl1/fl2 is the flag
506 in the current/other manifest. Where the file does not exist,
506 in the current/other manifest. Where the file does not exist,
507 the nodeid will be None and the flags will be the empty
507 the nodeid will be None and the flags will be the empty
508 string.
508 string.
509 '''
509 '''
510 if match:
510 if match:
511 m1 = self.matches(match)
511 m1 = self.matches(match)
512 m2 = m2.matches(match)
512 m2 = m2.matches(match)
513 return m1.diff(m2, clean=clean)
513 return m1.diff(m2, clean=clean)
514 return self._lm.diff(m2._lm, clean)
514 return self._lm.diff(m2._lm, clean)
515
515
516 def setflag(self, key, flag):
516 def setflag(self, key, flag):
517 self._lm[key] = self[key], flag
517 self._lm[key] = self[key], flag
518
518
519 def get(self, key, default=None):
519 def get(self, key, default=None):
520 try:
520 try:
521 return self._lm[key][0]
521 return self._lm[key][0]
522 except KeyError:
522 except KeyError:
523 return default
523 return default
524
524
525 def flags(self, key, default=''):
525 def flags(self, key, default=''):
526 try:
526 try:
527 return self._lm[key][1]
527 return self._lm[key][1]
528 except KeyError:
528 except KeyError:
529 return default
529 return default
530
530
531 def copy(self):
531 def copy(self):
532 c = manifestdict()
532 c = manifestdict()
533 c._lm = self._lm.copy()
533 c._lm = self._lm.copy()
534 return c
534 return c
535
535
536 def items(self):
536 def items(self):
537 return (x[:2] for x in self._lm.iterentries())
537 return (x[:2] for x in self._lm.iterentries())
538
538
539 def iteritems(self):
539 def iteritems(self):
540 return (x[:2] for x in self._lm.iterentries())
540 return (x[:2] for x in self._lm.iterentries())
541
541
542 def iterentries(self):
542 def iterentries(self):
543 return self._lm.iterentries()
543 return self._lm.iterentries()
544
544
545 def text(self):
545 def text(self):
546 # most likely uses native version
546 # most likely uses native version
547 return self._lm.text()
547 return self._lm.text()
548
548
549 def fastdelta(self, base, changes):
549 def fastdelta(self, base, changes):
550 """Given a base manifest text as a bytearray and a list of changes
550 """Given a base manifest text as a bytearray and a list of changes
551 relative to that text, compute a delta that can be used by revlog.
551 relative to that text, compute a delta that can be used by revlog.
552 """
552 """
553 delta = []
553 delta = []
554 dstart = None
554 dstart = None
555 dend = None
555 dend = None
556 dline = [""]
556 dline = [""]
557 start = 0
557 start = 0
558 # zero copy representation of base as a buffer
558 # zero copy representation of base as a buffer
559 addbuf = util.buffer(base)
559 addbuf = util.buffer(base)
560
560
561 changes = list(changes)
561 changes = list(changes)
562 if len(changes) < 1000:
562 if len(changes) < 1000:
563 # start with a readonly loop that finds the offset of
563 # start with a readonly loop that finds the offset of
564 # each line and creates the deltas
564 # each line and creates the deltas
565 for f, todelete in changes:
565 for f, todelete in changes:
566 # bs will either be the index of the item or the insert point
566 # bs will either be the index of the item or the insert point
567 start, end = _msearch(addbuf, f, start)
567 start, end = _msearch(addbuf, f, start)
568 if not todelete:
568 if not todelete:
569 h, fl = self._lm[f]
569 h, fl = self._lm[f]
570 l = "%s\0%s%s\n" % (f, hex(h), fl)
570 l = "%s\0%s%s\n" % (f, hex(h), fl)
571 else:
571 else:
572 if start == end:
572 if start == end:
573 # item we want to delete was not found, error out
573 # item we want to delete was not found, error out
574 raise AssertionError(
574 raise AssertionError(
575 _("failed to remove %s from manifest") % f)
575 _("failed to remove %s from manifest") % f)
576 l = ""
576 l = ""
577 if dstart is not None and dstart <= start and dend >= start:
577 if dstart is not None and dstart <= start and dend >= start:
578 if dend < end:
578 if dend < end:
579 dend = end
579 dend = end
580 if l:
580 if l:
581 dline.append(l)
581 dline.append(l)
582 else:
582 else:
583 if dstart is not None:
583 if dstart is not None:
584 delta.append([dstart, dend, "".join(dline)])
584 delta.append([dstart, dend, "".join(dline)])
585 dstart = start
585 dstart = start
586 dend = end
586 dend = end
587 dline = [l]
587 dline = [l]
588
588
589 if dstart is not None:
589 if dstart is not None:
590 delta.append([dstart, dend, "".join(dline)])
590 delta.append([dstart, dend, "".join(dline)])
591 # apply the delta to the base, and get a delta for addrevision
591 # apply the delta to the base, and get a delta for addrevision
592 deltatext, arraytext = _addlistdelta(base, delta)
592 deltatext, arraytext = _addlistdelta(base, delta)
593 else:
593 else:
594 # For large changes, it's much cheaper to just build the text and
594 # For large changes, it's much cheaper to just build the text and
595 # diff it.
595 # diff it.
596 arraytext = bytearray(self.text())
596 arraytext = bytearray(self.text())
597 deltatext = mdiff.textdiff(
597 deltatext = mdiff.textdiff(
598 util.buffer(base), util.buffer(arraytext))
598 util.buffer(base), util.buffer(arraytext))
599
599
600 return arraytext, deltatext
600 return arraytext, deltatext
601
601
602 def _msearch(m, s, lo=0, hi=None):
602 def _msearch(m, s, lo=0, hi=None):
603 '''return a tuple (start, end) that says where to find s within m.
603 '''return a tuple (start, end) that says where to find s within m.
604
604
605 If the string is found m[start:end] are the line containing
605 If the string is found m[start:end] are the line containing
606 that string. If start == end the string was not found and
606 that string. If start == end the string was not found and
607 they indicate the proper sorted insertion point.
607 they indicate the proper sorted insertion point.
608
608
609 m should be a buffer, a memoryview or a byte string.
609 m should be a buffer, a memoryview or a byte string.
610 s is a byte string'''
610 s is a byte string'''
611 def advance(i, c):
611 def advance(i, c):
612 while i < lenm and m[i:i + 1] != c:
612 while i < lenm and m[i:i + 1] != c:
613 i += 1
613 i += 1
614 return i
614 return i
615 if not s:
615 if not s:
616 return (lo, lo)
616 return (lo, lo)
617 lenm = len(m)
617 lenm = len(m)
618 if not hi:
618 if not hi:
619 hi = lenm
619 hi = lenm
620 while lo < hi:
620 while lo < hi:
621 mid = (lo + hi) // 2
621 mid = (lo + hi) // 2
622 start = mid
622 start = mid
623 while start > 0 and m[start - 1:start] != '\n':
623 while start > 0 and m[start - 1:start] != '\n':
624 start -= 1
624 start -= 1
625 end = advance(start, '\0')
625 end = advance(start, '\0')
626 if bytes(m[start:end]) < s:
626 if bytes(m[start:end]) < s:
627 # we know that after the null there are 40 bytes of sha1
627 # we know that after the null there are 40 bytes of sha1
628 # this translates to the bisect lo = mid + 1
628 # this translates to the bisect lo = mid + 1
629 lo = advance(end + 40, '\n') + 1
629 lo = advance(end + 40, '\n') + 1
630 else:
630 else:
631 # this translates to the bisect hi = mid
631 # this translates to the bisect hi = mid
632 hi = start
632 hi = start
633 end = advance(lo, '\0')
633 end = advance(lo, '\0')
634 found = m[lo:end]
634 found = m[lo:end]
635 if s == found:
635 if s == found:
636 # we know that after the null there are 40 bytes of sha1
636 # we know that after the null there are 40 bytes of sha1
637 end = advance(end + 40, '\n')
637 end = advance(end + 40, '\n')
638 return (lo, end + 1)
638 return (lo, end + 1)
639 else:
639 else:
640 return (lo, lo)
640 return (lo, lo)
641
641
642 def _checkforbidden(l):
642 def _checkforbidden(l):
643 """Check filenames for illegal characters."""
643 """Check filenames for illegal characters."""
644 for f in l:
644 for f in l:
645 if '\n' in f or '\r' in f:
645 if '\n' in f or '\r' in f:
646 raise error.RevlogError(
646 raise error.RevlogError(
647 _("'\\n' and '\\r' disallowed in filenames: %r")
647 _("'\\n' and '\\r' disallowed in filenames: %r")
648 % pycompat.bytestr(f))
648 % pycompat.bytestr(f))
649
649
650
650
651 # apply the changes collected during the bisect loop to our addlist
651 # apply the changes collected during the bisect loop to our addlist
652 # return a delta suitable for addrevision
652 # return a delta suitable for addrevision
653 def _addlistdelta(addlist, x):
653 def _addlistdelta(addlist, x):
654 # for large addlist arrays, building a new array is cheaper
654 # for large addlist arrays, building a new array is cheaper
655 # than repeatedly modifying the existing one
655 # than repeatedly modifying the existing one
656 currentposition = 0
656 currentposition = 0
657 newaddlist = bytearray()
657 newaddlist = bytearray()
658
658
659 for start, end, content in x:
659 for start, end, content in x:
660 newaddlist += addlist[currentposition:start]
660 newaddlist += addlist[currentposition:start]
661 if content:
661 if content:
662 newaddlist += bytearray(content)
662 newaddlist += bytearray(content)
663
663
664 currentposition = end
664 currentposition = end
665
665
666 newaddlist += addlist[currentposition:]
666 newaddlist += addlist[currentposition:]
667
667
668 deltatext = "".join(struct.pack(">lll", start, end, len(content))
668 deltatext = "".join(struct.pack(">lll", start, end, len(content))
669 + content for start, end, content in x)
669 + content for start, end, content in x)
670 return deltatext, newaddlist
670 return deltatext, newaddlist
671
671
672 def _splittopdir(f):
672 def _splittopdir(f):
673 if '/' in f:
673 if '/' in f:
674 dir, subpath = f.split('/', 1)
674 dir, subpath = f.split('/', 1)
675 return dir + '/', subpath
675 return dir + '/', subpath
676 else:
676 else:
677 return '', f
677 return '', f
678
678
679 _noop = lambda s: None
679 _noop = lambda s: None
680
680
681 class treemanifest(object):
681 class treemanifest(object):
682 def __init__(self, dir='', text=''):
682 def __init__(self, dir='', text=''):
683 self._dir = dir
683 self._dir = dir
684 self._node = nullid
684 self._node = nullid
685 self._loadfunc = _noop
685 self._loadfunc = _noop
686 self._copyfunc = _noop
686 self._copyfunc = _noop
687 self._dirty = False
687 self._dirty = False
688 self._dirs = {}
688 self._dirs = {}
689 self._lazydirs = {}
689 self._lazydirs = {}
690 # Using _lazymanifest here is a little slower than plain old dicts
690 # Using _lazymanifest here is a little slower than plain old dicts
691 self._files = {}
691 self._files = {}
692 self._flags = {}
692 self._flags = {}
693 if text:
693 if text:
694 def readsubtree(subdir, subm):
694 def readsubtree(subdir, subm):
695 raise AssertionError('treemanifest constructor only accepts '
695 raise AssertionError('treemanifest constructor only accepts '
696 'flat manifests')
696 'flat manifests')
697 self.parse(text, readsubtree)
697 self.parse(text, readsubtree)
698 self._dirty = True # Mark flat manifest dirty after parsing
698 self._dirty = True # Mark flat manifest dirty after parsing
699
699
700 def _subpath(self, path):
700 def _subpath(self, path):
701 return self._dir + path
701 return self._dir + path
702
702
703 def _loadalllazy(self):
703 def _loadalllazy(self):
704 for k, (path, node, readsubtree) in self._lazydirs.iteritems():
704 for k, (path, node, readsubtree) in self._lazydirs.iteritems():
705 self._dirs[k] = readsubtree(path, node)
705 self._dirs[k] = readsubtree(path, node)
706 self._lazydirs = {}
706 self._lazydirs = {}
707
707
708 def _loadlazy(self, d):
708 def _loadlazy(self, d):
709 path, node, readsubtree = self._lazydirs[d]
709 path, node, readsubtree = self._lazydirs[d]
710 self._dirs[d] = readsubtree(path, node)
710 self._dirs[d] = readsubtree(path, node)
711 del self._lazydirs[d]
711 del self._lazydirs[d]
712
712
713 def _loadchildrensetlazy(self, visit):
713 def _loadchildrensetlazy(self, visit):
714 if not visit:
714 if not visit:
715 return None
715 return None
716 if visit == 'all' or visit == 'this':
716 if visit == 'all' or visit == 'this':
717 self._loadalllazy()
717 self._loadalllazy()
718 return None
718 return None
719
719
720 todel = []
720 todel = []
721 for k in visit:
721 for k in visit:
722 kslash = k + '/'
722 kslash = k + '/'
723 ld = self._lazydirs.get(kslash)
723 ld = self._lazydirs.get(kslash)
724 if ld:
724 if ld:
725 path, node, readsubtree = ld
725 path, node, readsubtree = ld
726 self._dirs[kslash] = readsubtree(path, node)
726 self._dirs[kslash] = readsubtree(path, node)
727 todel.append(kslash)
727 todel.append(kslash)
728 for kslash in todel:
728 for kslash in todel:
729 del self._lazydirs[kslash]
729 del self._lazydirs[kslash]
730 return visit
730 return visit
731
731
732 def __len__(self):
732 def __len__(self):
733 self._load()
733 self._load()
734 size = len(self._files)
734 size = len(self._files)
735 self._loadalllazy()
735 self._loadalllazy()
736 for m in self._dirs.values():
736 for m in self._dirs.values():
737 size += m.__len__()
737 size += m.__len__()
738 return size
738 return size
739
739
740 def __nonzero__(self):
740 def __nonzero__(self):
741 # Faster than "__len() != 0" since it avoids loading sub-manifests
741 # Faster than "__len() != 0" since it avoids loading sub-manifests
742 return not self._isempty()
742 return not self._isempty()
743
743
744 __bool__ = __nonzero__
744 __bool__ = __nonzero__
745
745
746 def _isempty(self):
746 def _isempty(self):
747 self._load() # for consistency; already loaded by all callers
747 self._load() # for consistency; already loaded by all callers
748 # See if we can skip loading everything.
748 # See if we can skip loading everything.
749 if self._files or (self._dirs and
749 if self._files or (self._dirs and
750 any(not m._isempty() for m in self._dirs.values())):
750 any(not m._isempty() for m in self._dirs.values())):
751 return False
751 return False
752 self._loadalllazy()
752 self._loadalllazy()
753 return (not self._dirs or
753 return (not self._dirs or
754 all(m._isempty() for m in self._dirs.values()))
754 all(m._isempty() for m in self._dirs.values()))
755
755
756 def __repr__(self):
756 def __repr__(self):
757 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
757 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
758 (self._dir, hex(self._node),
758 (self._dir, hex(self._node),
759 bool(self._loadfunc is _noop),
759 bool(self._loadfunc is _noop),
760 self._dirty, id(self)))
760 self._dirty, id(self)))
761
761
762 def dir(self):
762 def dir(self):
763 '''The directory that this tree manifest represents, including a
763 '''The directory that this tree manifest represents, including a
764 trailing '/'. Empty string for the repo root directory.'''
764 trailing '/'. Empty string for the repo root directory.'''
765 return self._dir
765 return self._dir
766
766
767 def node(self):
767 def node(self):
768 '''This node of this instance. nullid for unsaved instances. Should
768 '''This node of this instance. nullid for unsaved instances. Should
769 be updated when the instance is read or written from a revlog.
769 be updated when the instance is read or written from a revlog.
770 '''
770 '''
771 assert not self._dirty
771 assert not self._dirty
772 return self._node
772 return self._node
773
773
774 def setnode(self, node):
774 def setnode(self, node):
775 self._node = node
775 self._node = node
776 self._dirty = False
776 self._dirty = False
777
777
778 def iterentries(self):
778 def iterentries(self):
779 self._load()
779 self._load()
780 self._loadalllazy()
780 self._loadalllazy()
781 for p, n in sorted(itertools.chain(self._dirs.items(),
781 for p, n in sorted(itertools.chain(self._dirs.items(),
782 self._files.items())):
782 self._files.items())):
783 if p in self._files:
783 if p in self._files:
784 yield self._subpath(p), n, self._flags.get(p, '')
784 yield self._subpath(p), n, self._flags.get(p, '')
785 else:
785 else:
786 for x in n.iterentries():
786 for x in n.iterentries():
787 yield x
787 yield x
788
788
789 def items(self):
789 def items(self):
790 self._load()
790 self._load()
791 self._loadalllazy()
791 self._loadalllazy()
792 for p, n in sorted(itertools.chain(self._dirs.items(),
792 for p, n in sorted(itertools.chain(self._dirs.items(),
793 self._files.items())):
793 self._files.items())):
794 if p in self._files:
794 if p in self._files:
795 yield self._subpath(p), n
795 yield self._subpath(p), n
796 else:
796 else:
797 for f, sn in n.iteritems():
797 for f, sn in n.iteritems():
798 yield f, sn
798 yield f, sn
799
799
800 iteritems = items
800 iteritems = items
801
801
802 def iterkeys(self):
802 def iterkeys(self):
803 self._load()
803 self._load()
804 self._loadalllazy()
804 self._loadalllazy()
805 for p in sorted(itertools.chain(self._dirs, self._files)):
805 for p in sorted(itertools.chain(self._dirs, self._files)):
806 if p in self._files:
806 if p in self._files:
807 yield self._subpath(p)
807 yield self._subpath(p)
808 else:
808 else:
809 for f in self._dirs[p]:
809 for f in self._dirs[p]:
810 yield f
810 yield f
811
811
812 def keys(self):
812 def keys(self):
813 return list(self.iterkeys())
813 return list(self.iterkeys())
814
814
815 def __iter__(self):
815 def __iter__(self):
816 return self.iterkeys()
816 return self.iterkeys()
817
817
818 def __contains__(self, f):
818 def __contains__(self, f):
819 if f is None:
819 if f is None:
820 return False
820 return False
821 self._load()
821 self._load()
822 dir, subpath = _splittopdir(f)
822 dir, subpath = _splittopdir(f)
823 if dir:
823 if dir:
824 if dir in self._lazydirs:
824 if dir in self._lazydirs:
825 self._loadlazy(dir)
825 self._loadlazy(dir)
826
826
827 if dir not in self._dirs:
827 if dir not in self._dirs:
828 return False
828 return False
829
829
830 return self._dirs[dir].__contains__(subpath)
830 return self._dirs[dir].__contains__(subpath)
831 else:
831 else:
832 return f in self._files
832 return f in self._files
833
833
834 def get(self, f, default=None):
834 def get(self, f, default=None):
835 self._load()
835 self._load()
836 dir, subpath = _splittopdir(f)
836 dir, subpath = _splittopdir(f)
837 if dir:
837 if dir:
838 if dir in self._lazydirs:
838 if dir in self._lazydirs:
839 self._loadlazy(dir)
839 self._loadlazy(dir)
840
840
841 if dir not in self._dirs:
841 if dir not in self._dirs:
842 return default
842 return default
843 return self._dirs[dir].get(subpath, default)
843 return self._dirs[dir].get(subpath, default)
844 else:
844 else:
845 return self._files.get(f, default)
845 return self._files.get(f, default)
846
846
847 def __getitem__(self, f):
847 def __getitem__(self, f):
848 self._load()
848 self._load()
849 dir, subpath = _splittopdir(f)
849 dir, subpath = _splittopdir(f)
850 if dir:
850 if dir:
851 if dir in self._lazydirs:
851 if dir in self._lazydirs:
852 self._loadlazy(dir)
852 self._loadlazy(dir)
853
853
854 return self._dirs[dir].__getitem__(subpath)
854 return self._dirs[dir].__getitem__(subpath)
855 else:
855 else:
856 return self._files[f]
856 return self._files[f]
857
857
858 def flags(self, f):
858 def flags(self, f):
859 self._load()
859 self._load()
860 dir, subpath = _splittopdir(f)
860 dir, subpath = _splittopdir(f)
861 if dir:
861 if dir:
862 if dir in self._lazydirs:
862 if dir in self._lazydirs:
863 self._loadlazy(dir)
863 self._loadlazy(dir)
864
864
865 if dir not in self._dirs:
865 if dir not in self._dirs:
866 return ''
866 return ''
867 return self._dirs[dir].flags(subpath)
867 return self._dirs[dir].flags(subpath)
868 else:
868 else:
869 if f in self._lazydirs or f in self._dirs:
869 if f in self._lazydirs or f in self._dirs:
870 return ''
870 return ''
871 return self._flags.get(f, '')
871 return self._flags.get(f, '')
872
872
873 def find(self, f):
873 def find(self, f):
874 self._load()
874 self._load()
875 dir, subpath = _splittopdir(f)
875 dir, subpath = _splittopdir(f)
876 if dir:
876 if dir:
877 if dir in self._lazydirs:
877 if dir in self._lazydirs:
878 self._loadlazy(dir)
878 self._loadlazy(dir)
879
879
880 return self._dirs[dir].find(subpath)
880 return self._dirs[dir].find(subpath)
881 else:
881 else:
882 return self._files[f], self._flags.get(f, '')
882 return self._files[f], self._flags.get(f, '')
883
883
884 def __delitem__(self, f):
884 def __delitem__(self, f):
885 self._load()
885 self._load()
886 dir, subpath = _splittopdir(f)
886 dir, subpath = _splittopdir(f)
887 if dir:
887 if dir:
888 if dir in self._lazydirs:
888 if dir in self._lazydirs:
889 self._loadlazy(dir)
889 self._loadlazy(dir)
890
890
891 self._dirs[dir].__delitem__(subpath)
891 self._dirs[dir].__delitem__(subpath)
892 # If the directory is now empty, remove it
892 # If the directory is now empty, remove it
893 if self._dirs[dir]._isempty():
893 if self._dirs[dir]._isempty():
894 del self._dirs[dir]
894 del self._dirs[dir]
895 else:
895 else:
896 del self._files[f]
896 del self._files[f]
897 if f in self._flags:
897 if f in self._flags:
898 del self._flags[f]
898 del self._flags[f]
899 self._dirty = True
899 self._dirty = True
900
900
901 def __setitem__(self, f, n):
901 def __setitem__(self, f, n):
902 assert n is not None
902 assert n is not None
903 self._load()
903 self._load()
904 dir, subpath = _splittopdir(f)
904 dir, subpath = _splittopdir(f)
905 if dir:
905 if dir:
906 if dir in self._lazydirs:
906 if dir in self._lazydirs:
907 self._loadlazy(dir)
907 self._loadlazy(dir)
908 if dir not in self._dirs:
908 if dir not in self._dirs:
909 self._dirs[dir] = treemanifest(self._subpath(dir))
909 self._dirs[dir] = treemanifest(self._subpath(dir))
910 self._dirs[dir].__setitem__(subpath, n)
910 self._dirs[dir].__setitem__(subpath, n)
911 else:
911 else:
912 self._files[f] = n[:21] # to match manifestdict's behavior
912 self._files[f] = n[:21] # to match manifestdict's behavior
913 self._dirty = True
913 self._dirty = True
914
914
915 def _load(self):
915 def _load(self):
916 if self._loadfunc is not _noop:
916 if self._loadfunc is not _noop:
917 lf, self._loadfunc = self._loadfunc, _noop
917 lf, self._loadfunc = self._loadfunc, _noop
918 lf(self)
918 lf(self)
919 elif self._copyfunc is not _noop:
919 elif self._copyfunc is not _noop:
920 cf, self._copyfunc = self._copyfunc, _noop
920 cf, self._copyfunc = self._copyfunc, _noop
921 cf(self)
921 cf(self)
922
922
923 def setflag(self, f, flags):
923 def setflag(self, f, flags):
924 """Set the flags (symlink, executable) for path f."""
924 """Set the flags (symlink, executable) for path f."""
925 self._load()
925 self._load()
926 dir, subpath = _splittopdir(f)
926 dir, subpath = _splittopdir(f)
927 if dir:
927 if dir:
928 if dir in self._lazydirs:
928 if dir in self._lazydirs:
929 self._loadlazy(dir)
929 self._loadlazy(dir)
930 if dir not in self._dirs:
930 if dir not in self._dirs:
931 self._dirs[dir] = treemanifest(self._subpath(dir))
931 self._dirs[dir] = treemanifest(self._subpath(dir))
932 self._dirs[dir].setflag(subpath, flags)
932 self._dirs[dir].setflag(subpath, flags)
933 else:
933 else:
934 self._flags[f] = flags
934 self._flags[f] = flags
935 self._dirty = True
935 self._dirty = True
936
936
937 def copy(self):
937 def copy(self):
938 copy = treemanifest(self._dir)
938 copy = treemanifest(self._dir)
939 copy._node = self._node
939 copy._node = self._node
940 copy._dirty = self._dirty
940 copy._dirty = self._dirty
941 if self._copyfunc is _noop:
941 if self._copyfunc is _noop:
942 def _copyfunc(s):
942 def _copyfunc(s):
943 self._load()
943 self._load()
944 # OPT: it'd be nice to not load everything here. Unfortunately
944 # OPT: it'd be nice to not load everything here. Unfortunately
945 # this makes a mess of the "dirty" state tracking if we don't.
945 # this makes a mess of the "dirty" state tracking if we don't.
946 self._loadalllazy()
946 self._loadalllazy()
947 sdirs = s._dirs
947 sdirs = s._dirs
948 for d, v in self._dirs.iteritems():
948 for d, v in self._dirs.iteritems():
949 sdirs[d] = v.copy()
949 sdirs[d] = v.copy()
950 s._files = dict.copy(self._files)
950 s._files = dict.copy(self._files)
951 s._flags = dict.copy(self._flags)
951 s._flags = dict.copy(self._flags)
952 if self._loadfunc is _noop:
952 if self._loadfunc is _noop:
953 _copyfunc(copy)
953 _copyfunc(copy)
954 else:
954 else:
955 copy._copyfunc = _copyfunc
955 copy._copyfunc = _copyfunc
956 else:
956 else:
957 copy._copyfunc = self._copyfunc
957 copy._copyfunc = self._copyfunc
958 return copy
958 return copy
959
959
960 def filesnotin(self, m2, match=None):
960 def filesnotin(self, m2, match=None):
961 '''Set of files in this manifest that are not in the other'''
961 '''Set of files in this manifest that are not in the other'''
962 if match and not match.always():
962 if match and not match.always():
963 m1 = self.matches(match)
963 m1 = self.matches(match)
964 m2 = m2.matches(match)
964 m2 = m2.matches(match)
965 return m1.filesnotin(m2)
965 return m1.filesnotin(m2)
966
966
967 files = set()
967 files = set()
968 def _filesnotin(t1, t2):
968 def _filesnotin(t1, t2):
969 if t1._node == t2._node and not t1._dirty and not t2._dirty:
969 if t1._node == t2._node and not t1._dirty and not t2._dirty:
970 return
970 return
971 t1._load()
971 t1._load()
972 t2._load()
972 t2._load()
973 t1._loadalllazy()
973 t1._loadalllazy()
974 t2._loadalllazy()
974 t2._loadalllazy()
975 for d, m1 in t1._dirs.iteritems():
975 for d, m1 in t1._dirs.iteritems():
976 if d in t2._dirs:
976 if d in t2._dirs:
977 m2 = t2._dirs[d]
977 m2 = t2._dirs[d]
978 _filesnotin(m1, m2)
978 _filesnotin(m1, m2)
979 else:
979 else:
980 files.update(m1.iterkeys())
980 files.update(m1.iterkeys())
981
981
982 for fn in t1._files:
982 for fn in t1._files:
983 if fn not in t2._files:
983 if fn not in t2._files:
984 files.add(t1._subpath(fn))
984 files.add(t1._subpath(fn))
985
985
986 _filesnotin(self, m2)
986 _filesnotin(self, m2)
987 return files
987 return files
988
988
989 @propertycache
989 @propertycache
990 def _alldirs(self):
990 def _alldirs(self):
991 return util.dirs(self)
991 return util.dirs(self)
992
992
993 def dirs(self):
993 def dirs(self):
994 return self._alldirs
994 return self._alldirs
995
995
996 def hasdir(self, dir):
996 def hasdir(self, dir):
997 self._load()
997 self._load()
998 topdir, subdir = _splittopdir(dir)
998 topdir, subdir = _splittopdir(dir)
999 if topdir:
999 if topdir:
1000 if topdir in self._lazydirs:
1000 if topdir in self._lazydirs:
1001 self._loadlazy(topdir)
1001 self._loadlazy(topdir)
1002 if topdir in self._dirs:
1002 if topdir in self._dirs:
1003 return self._dirs[topdir].hasdir(subdir)
1003 return self._dirs[topdir].hasdir(subdir)
1004 return False
1004 return False
1005 dirslash = dir + '/'
1005 dirslash = dir + '/'
1006 return dirslash in self._dirs or dirslash in self._lazydirs
1006 return dirslash in self._dirs or dirslash in self._lazydirs
1007
1007
1008 def walk(self, match):
1008 def walk(self, match):
1009 '''Generates matching file names.
1009 '''Generates matching file names.
1010
1010
1011 Equivalent to manifest.matches(match).iterkeys(), but without creating
1011 Equivalent to manifest.matches(match).iterkeys(), but without creating
1012 an entirely new manifest.
1012 an entirely new manifest.
1013
1013
1014 It also reports nonexistent files by marking them bad with match.bad().
1014 It also reports nonexistent files by marking them bad with match.bad().
1015 '''
1015 '''
1016 if match.always():
1016 if match.always():
1017 for f in iter(self):
1017 for f in iter(self):
1018 yield f
1018 yield f
1019 return
1019 return
1020
1020
1021 fset = set(match.files())
1021 fset = set(match.files())
1022
1022
1023 for fn in self._walk(match):
1023 for fn in self._walk(match):
1024 if fn in fset:
1024 if fn in fset:
1025 # specified pattern is the exact name
1025 # specified pattern is the exact name
1026 fset.remove(fn)
1026 fset.remove(fn)
1027 yield fn
1027 yield fn
1028
1028
1029 # for dirstate.walk, files=['.'] means "walk the whole tree".
1029 # for dirstate.walk, files=['.'] means "walk the whole tree".
1030 # follow that here, too
1030 # follow that here, too
1031 fset.discard('.')
1031 fset.discard('.')
1032
1032
1033 for fn in sorted(fset):
1033 for fn in sorted(fset):
1034 if not self.hasdir(fn):
1034 if not self.hasdir(fn):
1035 match.bad(fn, None)
1035 match.bad(fn, None)
1036
1036
1037 def _walk(self, match):
1037 def _walk(self, match):
1038 '''Recursively generates matching file names for walk().'''
1038 '''Recursively generates matching file names for walk().'''
1039 visit = match.visitchildrenset(self._dir[:-1] or '.')
1039 visit = match.visitchildrenset(self._dir[:-1] or '.')
1040 if not visit:
1040 if not visit:
1041 return
1041 return
1042
1042
1043 # yield this dir's files and walk its submanifests
1043 # yield this dir's files and walk its submanifests
1044 self._load()
1044 self._load()
1045 visit = self._loadchildrensetlazy(visit)
1045 visit = self._loadchildrensetlazy(visit)
1046 for p in sorted(list(self._dirs) + list(self._files)):
1046 for p in sorted(list(self._dirs) + list(self._files)):
1047 if p in self._files:
1047 if p in self._files:
1048 fullp = self._subpath(p)
1048 fullp = self._subpath(p)
1049 if match(fullp):
1049 if match(fullp):
1050 yield fullp
1050 yield fullp
1051 else:
1051 else:
1052 if not visit or p[:-1] in visit:
1052 if not visit or p[:-1] in visit:
1053 for f in self._dirs[p]._walk(match):
1053 for f in self._dirs[p]._walk(match):
1054 yield f
1054 yield f
1055
1055
1056 def matches(self, match):
1056 def matches(self, match):
1057 '''generate a new manifest filtered by the match argument'''
1057 '''generate a new manifest filtered by the match argument'''
1058 if match.always():
1058 if match.always():
1059 return self.copy()
1059 return self.copy()
1060
1060
1061 return self._matches(match)
1061 return self._matches(match)
1062
1062
1063 def _matches(self, match):
1063 def _matches(self, match):
1064 '''recursively generate a new manifest filtered by the match argument.
1064 '''recursively generate a new manifest filtered by the match argument.
1065 '''
1065 '''
1066
1066
1067 visit = match.visitchildrenset(self._dir[:-1] or '.')
1067 visit = match.visitchildrenset(self._dir[:-1] or '.')
1068 if visit == 'all':
1068 if visit == 'all':
1069 return self.copy()
1069 return self.copy()
1070 ret = treemanifest(self._dir)
1070 ret = treemanifest(self._dir)
1071 if not visit:
1071 if not visit:
1072 return ret
1072 return ret
1073
1073
1074 self._load()
1074 self._load()
1075 for fn in self._files:
1075 for fn in self._files:
1076 # While visitchildrenset *usually* lists only subdirs, this is
1076 # While visitchildrenset *usually* lists only subdirs, this is
1077 # actually up to the matcher and may have some files in the set().
1077 # actually up to the matcher and may have some files in the set().
1078 # If visit == 'this', we should obviously look at the files in this
1078 # If visit == 'this', we should obviously look at the files in this
1079 # directory; if visit is a set, and fn is in it, we should inspect
1079 # directory; if visit is a set, and fn is in it, we should inspect
1080 # fn (but no need to inspect things not in the set).
1080 # fn (but no need to inspect things not in the set).
1081 if visit != 'this' and fn not in visit:
1081 if visit != 'this' and fn not in visit:
1082 continue
1082 continue
1083 fullp = self._subpath(fn)
1083 fullp = self._subpath(fn)
1084 # visitchildrenset isn't perfect, we still need to call the regular
1084 # visitchildrenset isn't perfect, we still need to call the regular
1085 # matcher code to further filter results.
1085 # matcher code to further filter results.
1086 if not match(fullp):
1086 if not match(fullp):
1087 continue
1087 continue
1088 ret._files[fn] = self._files[fn]
1088 ret._files[fn] = self._files[fn]
1089 if fn in self._flags:
1089 if fn in self._flags:
1090 ret._flags[fn] = self._flags[fn]
1090 ret._flags[fn] = self._flags[fn]
1091
1091
1092 visit = self._loadchildrensetlazy(visit)
1092 visit = self._loadchildrensetlazy(visit)
1093 for dir, subm in self._dirs.iteritems():
1093 for dir, subm in self._dirs.iteritems():
1094 if visit and dir[:-1] not in visit:
1094 if visit and dir[:-1] not in visit:
1095 continue
1095 continue
1096 m = subm._matches(match)
1096 m = subm._matches(match)
1097 if not m._isempty():
1097 if not m._isempty():
1098 ret._dirs[dir] = m
1098 ret._dirs[dir] = m
1099
1099
1100 if not ret._isempty():
1100 if not ret._isempty():
1101 ret._dirty = True
1101 ret._dirty = True
1102 return ret
1102 return ret
1103
1103
1104 def diff(self, m2, match=None, clean=False):
1104 def diff(self, m2, match=None, clean=False):
1105 '''Finds changes between the current manifest and m2.
1105 '''Finds changes between the current manifest and m2.
1106
1106
1107 Args:
1107 Args:
1108 m2: the manifest to which this manifest should be compared.
1108 m2: the manifest to which this manifest should be compared.
1109 clean: if true, include files unchanged between these manifests
1109 clean: if true, include files unchanged between these manifests
1110 with a None value in the returned dictionary.
1110 with a None value in the returned dictionary.
1111
1111
1112 The result is returned as a dict with filename as key and
1112 The result is returned as a dict with filename as key and
1113 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1113 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1114 nodeid in the current/other manifest and fl1/fl2 is the flag
1114 nodeid in the current/other manifest and fl1/fl2 is the flag
1115 in the current/other manifest. Where the file does not exist,
1115 in the current/other manifest. Where the file does not exist,
1116 the nodeid will be None and the flags will be the empty
1116 the nodeid will be None and the flags will be the empty
1117 string.
1117 string.
1118 '''
1118 '''
1119 if match and not match.always():
1119 if match and not match.always():
1120 m1 = self.matches(match)
1120 m1 = self.matches(match)
1121 m2 = m2.matches(match)
1121 m2 = m2.matches(match)
1122 return m1.diff(m2, clean=clean)
1122 return m1.diff(m2, clean=clean)
1123 result = {}
1123 result = {}
1124 emptytree = treemanifest()
1124 emptytree = treemanifest()
1125 def _diff(t1, t2):
1125 def _diff(t1, t2):
1126 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1126 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1127 return
1127 return
1128 t1._load()
1128 t1._load()
1129 t2._load()
1129 t2._load()
1130 # OPT: do we need to load everything?
1130 # OPT: do we need to load everything?
1131 t1._loadalllazy()
1131 t1._loadalllazy()
1132 t2._loadalllazy()
1132 t2._loadalllazy()
1133 for d, m1 in t1._dirs.iteritems():
1133 for d, m1 in t1._dirs.iteritems():
1134 m2 = t2._dirs.get(d, emptytree)
1134 m2 = t2._dirs.get(d, emptytree)
1135 _diff(m1, m2)
1135 _diff(m1, m2)
1136
1136
1137 for d, m2 in t2._dirs.iteritems():
1137 for d, m2 in t2._dirs.iteritems():
1138 if d not in t1._dirs:
1138 if d not in t1._dirs:
1139 _diff(emptytree, m2)
1139 _diff(emptytree, m2)
1140
1140
1141 for fn, n1 in t1._files.iteritems():
1141 for fn, n1 in t1._files.iteritems():
1142 fl1 = t1._flags.get(fn, '')
1142 fl1 = t1._flags.get(fn, '')
1143 n2 = t2._files.get(fn, None)
1143 n2 = t2._files.get(fn, None)
1144 fl2 = t2._flags.get(fn, '')
1144 fl2 = t2._flags.get(fn, '')
1145 if n1 != n2 or fl1 != fl2:
1145 if n1 != n2 or fl1 != fl2:
1146 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1146 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1147 elif clean:
1147 elif clean:
1148 result[t1._subpath(fn)] = None
1148 result[t1._subpath(fn)] = None
1149
1149
1150 for fn, n2 in t2._files.iteritems():
1150 for fn, n2 in t2._files.iteritems():
1151 if fn not in t1._files:
1151 if fn not in t1._files:
1152 fl2 = t2._flags.get(fn, '')
1152 fl2 = t2._flags.get(fn, '')
1153 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1153 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1154
1154
1155 _diff(self, m2)
1155 _diff(self, m2)
1156 return result
1156 return result
1157
1157
1158 def unmodifiedsince(self, m2):
1158 def unmodifiedsince(self, m2):
1159 return not self._dirty and not m2._dirty and self._node == m2._node
1159 return not self._dirty and not m2._dirty and self._node == m2._node
1160
1160
1161 def parse(self, text, readsubtree):
1161 def parse(self, text, readsubtree):
1162 selflazy = self._lazydirs
1162 selflazy = self._lazydirs
1163 subpath = self._subpath
1163 subpath = self._subpath
1164 for f, n, fl in _parse(text):
1164 for f, n, fl in _parse(text):
1165 if fl == 't':
1165 if fl == 't':
1166 f = f + '/'
1166 f = f + '/'
1167 selflazy[f] = (subpath(f), n, readsubtree)
1167 selflazy[f] = (subpath(f), n, readsubtree)
1168 elif '/' in f:
1168 elif '/' in f:
1169 # This is a flat manifest, so use __setitem__ and setflag rather
1169 # This is a flat manifest, so use __setitem__ and setflag rather
1170 # than assigning directly to _files and _flags, so we can
1170 # than assigning directly to _files and _flags, so we can
1171 # assign a path in a subdirectory, and to mark dirty (compared
1171 # assign a path in a subdirectory, and to mark dirty (compared
1172 # to nullid).
1172 # to nullid).
1173 self[f] = n
1173 self[f] = n
1174 if fl:
1174 if fl:
1175 self.setflag(f, fl)
1175 self.setflag(f, fl)
1176 else:
1176 else:
1177 # Assigning to _files and _flags avoids marking as dirty,
1177 # Assigning to _files and _flags avoids marking as dirty,
1178 # and should be a little faster.
1178 # and should be a little faster.
1179 self._files[f] = n
1179 self._files[f] = n
1180 if fl:
1180 if fl:
1181 self._flags[f] = fl
1181 self._flags[f] = fl
1182
1182
1183 def text(self):
1183 def text(self):
1184 """Get the full data of this manifest as a bytestring."""
1184 """Get the full data of this manifest as a bytestring."""
1185 self._load()
1185 self._load()
1186 return _text(self.iterentries())
1186 return _text(self.iterentries())
1187
1187
1188 def dirtext(self):
1188 def dirtext(self):
1189 """Get the full data of this directory as a bytestring. Make sure that
1189 """Get the full data of this directory as a bytestring. Make sure that
1190 any submanifests have been written first, so their nodeids are correct.
1190 any submanifests have been written first, so their nodeids are correct.
1191 """
1191 """
1192 self._load()
1192 self._load()
1193 flags = self.flags
1193 flags = self.flags
1194 lazydirs = [(d[:-1], node, 't') for
1194 lazydirs = [(d[:-1], node, 't') for
1195 d, (path, node, readsubtree) in self._lazydirs.iteritems()]
1195 d, (path, node, readsubtree) in self._lazydirs.iteritems()]
1196 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1196 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1197 files = [(f, self._files[f], flags(f)) for f in self._files]
1197 files = [(f, self._files[f], flags(f)) for f in self._files]
1198 return _text(sorted(dirs + files + lazydirs))
1198 return _text(sorted(dirs + files + lazydirs))
1199
1199
1200 def read(self, gettext, readsubtree):
1200 def read(self, gettext, readsubtree):
1201 def _load_for_read(s):
1201 def _load_for_read(s):
1202 s.parse(gettext(), readsubtree)
1202 s.parse(gettext(), readsubtree)
1203 s._dirty = False
1203 s._dirty = False
1204 self._loadfunc = _load_for_read
1204 self._loadfunc = _load_for_read
1205
1205
1206 def writesubtrees(self, m1, m2, writesubtree, match):
1206 def writesubtrees(self, m1, m2, writesubtree, match):
1207 self._load() # for consistency; should never have any effect here
1207 self._load() # for consistency; should never have any effect here
1208 m1._load()
1208 m1._load()
1209 m2._load()
1209 m2._load()
1210 emptytree = treemanifest()
1210 emptytree = treemanifest()
1211 def getnode(m, d):
1211 def getnode(m, d):
1212 ld = m._lazydirs.get(d)
1212 ld = m._lazydirs.get(d)
1213 if ld:
1213 if ld:
1214 return ld[1]
1214 return ld[1]
1215 return m._dirs.get(d, emptytree)._node
1215 return m._dirs.get(d, emptytree)._node
1216
1216
1217 # we should have always loaded everything by the time we get here for
1217 # we should have always loaded everything by the time we get here for
1218 # `self`, but possibly not in `m1` or `m2`.
1218 # `self`, but possibly not in `m1` or `m2`.
1219 assert not self._lazydirs
1219 assert not self._lazydirs
1220 # let's skip investigating things that `match` says we do not need.
1220 # let's skip investigating things that `match` says we do not need.
1221 visit = match.visitchildrenset(self._dir[:-1] or '.')
1221 visit = match.visitchildrenset(self._dir[:-1] or '.')
1222 if visit == 'this' or visit == 'all':
1222 if visit == 'this' or visit == 'all':
1223 visit = None
1223 visit = None
1224 for d, subm in self._dirs.iteritems():
1224 for d, subm in self._dirs.iteritems():
1225 if visit and d[:-1] not in visit:
1225 if visit and d[:-1] not in visit:
1226 continue
1226 continue
1227 subp1 = getnode(m1, d)
1227 subp1 = getnode(m1, d)
1228 subp2 = getnode(m2, d)
1228 subp2 = getnode(m2, d)
1229 if subp1 == nullid:
1229 if subp1 == nullid:
1230 subp1, subp2 = subp2, subp1
1230 subp1, subp2 = subp2, subp1
1231 writesubtree(subm, subp1, subp2, match)
1231 writesubtree(subm, subp1, subp2, match)
1232
1232
1233 def walksubtrees(self, matcher=None):
1233 def walksubtrees(self, matcher=None):
1234 """Returns an iterator of the subtrees of this manifest, including this
1234 """Returns an iterator of the subtrees of this manifest, including this
1235 manifest itself.
1235 manifest itself.
1236
1236
1237 If `matcher` is provided, it only returns subtrees that match.
1237 If `matcher` is provided, it only returns subtrees that match.
1238 """
1238 """
1239 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1239 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1240 return
1240 return
1241 if not matcher or matcher(self._dir[:-1]):
1241 if not matcher or matcher(self._dir[:-1]):
1242 yield self
1242 yield self
1243
1243
1244 self._load()
1244 self._load()
1245 # OPT: use visitchildrenset to avoid loading everything.
1245 # OPT: use visitchildrenset to avoid loading everything.
1246 self._loadalllazy()
1246 self._loadalllazy()
1247 for d, subm in self._dirs.iteritems():
1247 for d, subm in self._dirs.iteritems():
1248 for subtree in subm.walksubtrees(matcher=matcher):
1248 for subtree in subm.walksubtrees(matcher=matcher):
1249 yield subtree
1249 yield subtree
1250
1250
1251 class manifestfulltextcache(util.lrucachedict):
1251 class manifestfulltextcache(util.lrucachedict):
1252 """File-backed LRU cache for the manifest cache
1252 """File-backed LRU cache for the manifest cache
1253
1253
1254 File consists of entries, up to EOF:
1254 File consists of entries, up to EOF:
1255
1255
1256 - 20 bytes node, 4 bytes length, <length> manifest data
1256 - 20 bytes node, 4 bytes length, <length> manifest data
1257
1257
1258 These are written in reverse cache order (oldest to newest).
1258 These are written in reverse cache order (oldest to newest).
1259
1259
1260 """
1260 """
1261 def __init__(self, max):
1261 def __init__(self, max):
1262 super(manifestfulltextcache, self).__init__(max)
1262 super(manifestfulltextcache, self).__init__(max)
1263 self._dirty = False
1263 self._dirty = False
1264 self._read = False
1264 self._read = False
1265 self._opener = None
1265 self._opener = None
1266
1266
1267 def read(self):
1267 def read(self):
1268 if self._read or self._opener is None:
1268 if self._read or self._opener is None:
1269 return
1269 return
1270
1270
1271 try:
1271 try:
1272 with self._opener('manifestfulltextcache') as fp:
1272 with self._opener('manifestfulltextcache') as fp:
1273 set = super(manifestfulltextcache, self).__setitem__
1273 set = super(manifestfulltextcache, self).__setitem__
1274 # ignore trailing data, this is a cache, corruption is skipped
1274 # ignore trailing data, this is a cache, corruption is skipped
1275 while True:
1275 while True:
1276 node = fp.read(20)
1276 node = fp.read(20)
1277 if len(node) < 20:
1277 if len(node) < 20:
1278 break
1278 break
1279 try:
1279 try:
1280 size = struct.unpack('>L', fp.read(4))[0]
1280 size = struct.unpack('>L', fp.read(4))[0]
1281 except struct.error:
1281 except struct.error:
1282 break
1282 break
1283 value = bytearray(fp.read(size))
1283 value = bytearray(fp.read(size))
1284 if len(value) != size:
1284 if len(value) != size:
1285 break
1285 break
1286 set(node, value)
1286 set(node, value)
1287 except IOError:
1287 except IOError:
1288 # the file is allowed to be missing
1288 # the file is allowed to be missing
1289 pass
1289 pass
1290
1290
1291 self._read = True
1291 self._read = True
1292 self._dirty = False
1292 self._dirty = False
1293
1293
1294 def write(self):
1294 def write(self):
1295 if not self._dirty or self._opener is None:
1295 if not self._dirty or self._opener is None:
1296 return
1296 return
1297 # rotate backwards to the first used node
1297 # rotate backwards to the first used node
1298 with self._opener(
1298 with self._opener(
1299 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
1299 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
1300 ) as fp:
1300 ) as fp:
1301 node = self._head.prev
1301 node = self._head.prev
1302 while True:
1302 while True:
1303 if node.key in self._cache:
1303 if node.key in self._cache:
1304 fp.write(node.key)
1304 fp.write(node.key)
1305 fp.write(struct.pack('>L', len(node.value)))
1305 fp.write(struct.pack('>L', len(node.value)))
1306 fp.write(node.value)
1306 fp.write(node.value)
1307 if node is self._head:
1307 if node is self._head:
1308 break
1308 break
1309 node = node.prev
1309 node = node.prev
1310
1310
1311 def __len__(self):
1311 def __len__(self):
1312 if not self._read:
1312 if not self._read:
1313 self.read()
1313 self.read()
1314 return super(manifestfulltextcache, self).__len__()
1314 return super(manifestfulltextcache, self).__len__()
1315
1315
1316 def __contains__(self, k):
1316 def __contains__(self, k):
1317 if not self._read:
1317 if not self._read:
1318 self.read()
1318 self.read()
1319 return super(manifestfulltextcache, self).__contains__(k)
1319 return super(manifestfulltextcache, self).__contains__(k)
1320
1320
1321 def __iter__(self):
1321 def __iter__(self):
1322 if not self._read:
1322 if not self._read:
1323 self.read()
1323 self.read()
1324 return super(manifestfulltextcache, self).__iter__()
1324 return super(manifestfulltextcache, self).__iter__()
1325
1325
1326 def __getitem__(self, k):
1326 def __getitem__(self, k):
1327 if not self._read:
1327 if not self._read:
1328 self.read()
1328 self.read()
1329 # the cache lru order can change on read
1329 # the cache lru order can change on read
1330 setdirty = self._cache.get(k) is not self._head
1330 setdirty = self._cache.get(k) is not self._head
1331 value = super(manifestfulltextcache, self).__getitem__(k)
1331 value = super(manifestfulltextcache, self).__getitem__(k)
1332 if setdirty:
1332 if setdirty:
1333 self._dirty = True
1333 self._dirty = True
1334 return value
1334 return value
1335
1335
1336 def __setitem__(self, k, v):
1336 def __setitem__(self, k, v):
1337 if not self._read:
1337 if not self._read:
1338 self.read()
1338 self.read()
1339 super(manifestfulltextcache, self).__setitem__(k, v)
1339 super(manifestfulltextcache, self).__setitem__(k, v)
1340 self._dirty = True
1340 self._dirty = True
1341
1341
1342 def __delitem__(self, k):
1342 def __delitem__(self, k):
1343 if not self._read:
1343 if not self._read:
1344 self.read()
1344 self.read()
1345 super(manifestfulltextcache, self).__delitem__(k)
1345 super(manifestfulltextcache, self).__delitem__(k)
1346 self._dirty = True
1346 self._dirty = True
1347
1347
1348 def get(self, k, default=None):
1348 def get(self, k, default=None):
1349 if not self._read:
1349 if not self._read:
1350 self.read()
1350 self.read()
1351 return super(manifestfulltextcache, self).get(k, default=default)
1351 return super(manifestfulltextcache, self).get(k, default=default)
1352
1352
1353 def clear(self, clear_persisted_data=False):
1353 def clear(self, clear_persisted_data=False):
1354 super(manifestfulltextcache, self).clear()
1354 super(manifestfulltextcache, self).clear()
1355 if clear_persisted_data:
1355 if clear_persisted_data:
1356 self._dirty = True
1356 self._dirty = True
1357 self.write()
1357 self.write()
1358 self._read = False
1358 self._read = False
1359
1359
1360 @interfaceutil.implementer(repository.imanifeststorage)
1360 @interfaceutil.implementer(repository.imanifeststorage)
1361 class manifestrevlog(object):
1361 class manifestrevlog(object):
1362 '''A revlog that stores manifest texts. This is responsible for caching the
1362 '''A revlog that stores manifest texts. This is responsible for caching the
1363 full-text manifest contents.
1363 full-text manifest contents.
1364 '''
1364 '''
1365 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1365 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1366 treemanifest=False):
1366 treemanifest=False):
1367 """Constructs a new manifest revlog
1367 """Constructs a new manifest revlog
1368
1368
1369 `indexfile` - used by extensions to have two manifests at once, like
1369 `indexfile` - used by extensions to have two manifests at once, like
1370 when transitioning between flatmanifeset and treemanifests.
1370 when transitioning between flatmanifeset and treemanifests.
1371
1371
1372 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1372 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1373 options can also be used to make this a tree manifest revlog. The opener
1373 options can also be used to make this a tree manifest revlog. The opener
1374 option takes precedence, so if it is set to True, we ignore whatever
1374 option takes precedence, so if it is set to True, we ignore whatever
1375 value is passed in to the constructor.
1375 value is passed in to the constructor.
1376 """
1376 """
1377 # During normal operations, we expect to deal with not more than four
1377 # During normal operations, we expect to deal with not more than four
1378 # revs at a time (such as during commit --amend). When rebasing large
1378 # revs at a time (such as during commit --amend). When rebasing large
1379 # stacks of commits, the number can go up, hence the config knob below.
1379 # stacks of commits, the number can go up, hence the config knob below.
1380 cachesize = 4
1380 cachesize = 4
1381 optiontreemanifest = False
1381 optiontreemanifest = False
1382 opts = getattr(opener, 'options', None)
1382 opts = getattr(opener, 'options', None)
1383 if opts is not None:
1383 if opts is not None:
1384 cachesize = opts.get('manifestcachesize', cachesize)
1384 cachesize = opts.get('manifestcachesize', cachesize)
1385 optiontreemanifest = opts.get('treemanifest', False)
1385 optiontreemanifest = opts.get('treemanifest', False)
1386
1386
1387 self._treeondisk = optiontreemanifest or treemanifest
1387 self._treeondisk = optiontreemanifest or treemanifest
1388
1388
1389 self._fulltextcache = manifestfulltextcache(cachesize)
1389 self._fulltextcache = manifestfulltextcache(cachesize)
1390
1390
1391 if tree:
1391 if tree:
1392 assert self._treeondisk, 'opts is %r' % opts
1392 assert self._treeondisk, 'opts is %r' % opts
1393
1393
1394 if indexfile is None:
1394 if indexfile is None:
1395 indexfile = '00manifest.i'
1395 indexfile = '00manifest.i'
1396 if tree:
1396 if tree:
1397 indexfile = "meta/" + tree + indexfile
1397 indexfile = "meta/" + tree + indexfile
1398
1398
1399 self.tree = tree
1399 self.tree = tree
1400
1400
1401 # The dirlogcache is kept on the root manifest log
1401 # The dirlogcache is kept on the root manifest log
1402 if tree:
1402 if tree:
1403 self._dirlogcache = dirlogcache
1403 self._dirlogcache = dirlogcache
1404 else:
1404 else:
1405 self._dirlogcache = {'': self}
1405 self._dirlogcache = {'': self}
1406
1406
1407 self._revlog = revlog.revlog(opener, indexfile,
1407 self._revlog = revlog.revlog(opener, indexfile,
1408 # only root indexfile is cached
1408 # only root indexfile is cached
1409 checkambig=not bool(tree),
1409 checkambig=not bool(tree),
1410 mmaplargeindex=True)
1410 mmaplargeindex=True)
1411
1411
1412 self.index = self._revlog.index
1412 self.index = self._revlog.index
1413 self.version = self._revlog.version
1413 self.version = self._revlog.version
1414 self._generaldelta = self._revlog._generaldelta
1414 self._generaldelta = self._revlog._generaldelta
1415
1415
1416 def _setupmanifestcachehooks(self, repo):
1416 def _setupmanifestcachehooks(self, repo):
1417 """Persist the manifestfulltextcache on lock release"""
1417 """Persist the manifestfulltextcache on lock release"""
1418 if not util.safehasattr(repo, '_lockref'):
1418 if not util.safehasattr(repo, '_lockref'):
1419 return
1419 return
1420
1420
1421 self._fulltextcache._opener = repo.cachevfs
1421 self._fulltextcache._opener = repo.cachevfs
1422 reporef = weakref.ref(repo)
1422 reporef = weakref.ref(repo)
1423 manifestrevlogref = weakref.ref(self)
1423 manifestrevlogref = weakref.ref(self)
1424
1424
1425 def persistmanifestcache():
1425 def persistmanifestcache():
1426 repo = reporef()
1426 repo = reporef()
1427 self = manifestrevlogref()
1427 self = manifestrevlogref()
1428 if repo is None or self is None:
1428 if repo is None or self is None:
1429 return
1429 return
1430 if repo.manifestlog.getstorage(b'') is not self:
1430 if repo.manifestlog.getstorage(b'') is not self:
1431 # there's a different manifest in play now, abort
1431 # there's a different manifest in play now, abort
1432 return
1432 return
1433 self._fulltextcache.write()
1433 self._fulltextcache.write()
1434
1434
1435 if repo._currentlock(repo._lockref) is not None:
1435 if repo._currentlock(repo._lockref) is not None:
1436 repo._afterlock(persistmanifestcache)
1436 repo._afterlock(persistmanifestcache)
1437
1437
1438 @property
1438 @property
1439 def fulltextcache(self):
1439 def fulltextcache(self):
1440 return self._fulltextcache
1440 return self._fulltextcache
1441
1441
1442 def clearcaches(self, clear_persisted_data=False):
1442 def clearcaches(self, clear_persisted_data=False):
1443 self._revlog.clearcaches()
1443 self._revlog.clearcaches()
1444 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1444 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1445 self._dirlogcache = {self.tree: self}
1445 self._dirlogcache = {self.tree: self}
1446
1446
1447 def dirlog(self, d):
1447 def dirlog(self, d):
1448 if d:
1448 if d:
1449 assert self._treeondisk
1449 assert self._treeondisk
1450 if d not in self._dirlogcache:
1450 if d not in self._dirlogcache:
1451 mfrevlog = manifestrevlog(self.opener, d,
1451 mfrevlog = manifestrevlog(self.opener, d,
1452 self._dirlogcache,
1452 self._dirlogcache,
1453 treemanifest=self._treeondisk)
1453 treemanifest=self._treeondisk)
1454 self._dirlogcache[d] = mfrevlog
1454 self._dirlogcache[d] = mfrevlog
1455 return self._dirlogcache[d]
1455 return self._dirlogcache[d]
1456
1456
1457 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1457 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1458 match=None):
1458 match=None):
1459 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1459 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1460 # If our first parent is in the manifest cache, we can
1460 # If our first parent is in the manifest cache, we can
1461 # compute a delta here using properties we know about the
1461 # compute a delta here using properties we know about the
1462 # manifest up-front, which may save time later for the
1462 # manifest up-front, which may save time later for the
1463 # revlog layer.
1463 # revlog layer.
1464
1464
1465 _checkforbidden(added)
1465 _checkforbidden(added)
1466 # combine the changed lists into one sorted iterator
1466 # combine the changed lists into one sorted iterator
1467 work = heapq.merge([(x, False) for x in added],
1467 work = heapq.merge([(x, False) for x in added],
1468 [(x, True) for x in removed])
1468 [(x, True) for x in removed])
1469
1469
1470 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1470 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1471 cachedelta = self._revlog.rev(p1), deltatext
1471 cachedelta = self._revlog.rev(p1), deltatext
1472 text = util.buffer(arraytext)
1472 text = util.buffer(arraytext)
1473 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1473 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1474 cachedelta)
1474 cachedelta)
1475 else:
1475 else:
1476 # The first parent manifest isn't already loaded, so we'll
1476 # The first parent manifest isn't already loaded, so we'll
1477 # just encode a fulltext of the manifest and pass that
1477 # just encode a fulltext of the manifest and pass that
1478 # through to the revlog layer, and let it handle the delta
1478 # through to the revlog layer, and let it handle the delta
1479 # process.
1479 # process.
1480 if self._treeondisk:
1480 if self._treeondisk:
1481 assert readtree, "readtree must be set for treemanifest writes"
1481 assert readtree, "readtree must be set for treemanifest writes"
1482 assert match, "match must be specified for treemanifest writes"
1482 assert match, "match must be specified for treemanifest writes"
1483 m1 = readtree(self.tree, p1)
1483 m1 = readtree(self.tree, p1)
1484 m2 = readtree(self.tree, p2)
1484 m2 = readtree(self.tree, p2)
1485 n = self._addtree(m, transaction, link, m1, m2, readtree,
1485 n = self._addtree(m, transaction, link, m1, m2, readtree,
1486 match=match)
1486 match=match)
1487 arraytext = None
1487 arraytext = None
1488 else:
1488 else:
1489 text = m.text()
1489 text = m.text()
1490 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1490 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1491 arraytext = bytearray(text)
1491 arraytext = bytearray(text)
1492
1492
1493 if arraytext is not None:
1493 if arraytext is not None:
1494 self.fulltextcache[n] = arraytext
1494 self.fulltextcache[n] = arraytext
1495
1495
1496 return n
1496 return n
1497
1497
1498 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1498 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1499 # If the manifest is unchanged compared to one parent,
1499 # If the manifest is unchanged compared to one parent,
1500 # don't write a new revision
1500 # don't write a new revision
1501 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1501 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1502 m2)):
1502 m2)):
1503 return m.node()
1503 return m.node()
1504 def writesubtree(subm, subp1, subp2, match):
1504 def writesubtree(subm, subp1, subp2, match):
1505 sublog = self.dirlog(subm.dir())
1505 sublog = self.dirlog(subm.dir())
1506 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1506 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1507 readtree=readtree, match=match)
1507 readtree=readtree, match=match)
1508 m.writesubtrees(m1, m2, writesubtree, match)
1508 m.writesubtrees(m1, m2, writesubtree, match)
1509 text = m.dirtext()
1509 text = m.dirtext()
1510 n = None
1510 n = None
1511 if self.tree != '':
1511 if self.tree != '':
1512 # Double-check whether contents are unchanged to one parent
1512 # Double-check whether contents are unchanged to one parent
1513 if text == m1.dirtext():
1513 if text == m1.dirtext():
1514 n = m1.node()
1514 n = m1.node()
1515 elif text == m2.dirtext():
1515 elif text == m2.dirtext():
1516 n = m2.node()
1516 n = m2.node()
1517
1517
1518 if not n:
1518 if not n:
1519 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1519 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1520 m2.node())
1520 m2.node())
1521
1521
1522 # Save nodeid so parent manifest can calculate its nodeid
1522 # Save nodeid so parent manifest can calculate its nodeid
1523 m.setnode(n)
1523 m.setnode(n)
1524 return n
1524 return n
1525
1525
1526 def __len__(self):
1526 def __len__(self):
1527 return len(self._revlog)
1527 return len(self._revlog)
1528
1528
1529 def __iter__(self):
1529 def __iter__(self):
1530 return self._revlog.__iter__()
1530 return self._revlog.__iter__()
1531
1531
1532 def rev(self, node):
1532 def rev(self, node):
1533 return self._revlog.rev(node)
1533 return self._revlog.rev(node)
1534
1534
1535 def node(self, rev):
1535 def node(self, rev):
1536 return self._revlog.node(rev)
1536 return self._revlog.node(rev)
1537
1537
1538 def lookup(self, value):
1538 def lookup(self, value):
1539 return self._revlog.lookup(value)
1539 return self._revlog.lookup(value)
1540
1540
1541 def parentrevs(self, rev):
1541 def parentrevs(self, rev):
1542 return self._revlog.parentrevs(rev)
1542 return self._revlog.parentrevs(rev)
1543
1543
1544 def parents(self, node):
1544 def parents(self, node):
1545 return self._revlog.parents(node)
1545 return self._revlog.parents(node)
1546
1546
1547 def linkrev(self, rev):
1547 def linkrev(self, rev):
1548 return self._revlog.linkrev(rev)
1548 return self._revlog.linkrev(rev)
1549
1549
1550 def checksize(self):
1550 def checksize(self):
1551 return self._revlog.checksize()
1551 return self._revlog.checksize()
1552
1552
1553 def revision(self, node, _df=None, raw=False):
1553 def revision(self, node, _df=None, raw=False):
1554 return self._revlog.revision(node, _df=_df, raw=raw)
1554 return self._revlog.revision(node, _df=_df, raw=raw)
1555
1555
1556 def revdiff(self, rev1, rev2):
1556 def revdiff(self, rev1, rev2):
1557 return self._revlog.revdiff(rev1, rev2)
1557 return self._revlog.revdiff(rev1, rev2)
1558
1558
1559 def cmp(self, node, text):
1559 def cmp(self, node, text):
1560 return self._revlog.cmp(node, text)
1560 return self._revlog.cmp(node, text)
1561
1561
1562 def deltaparent(self, rev):
1562 def deltaparent(self, rev):
1563 return self._revlog.deltaparent(rev)
1563 return self._revlog.deltaparent(rev)
1564
1564
1565 def emitrevisiondeltas(self, requests):
1565 def emitrevisiondeltas(self, requests):
1566 return self._revlog.emitrevisiondeltas(requests)
1566 return self._revlog.emitrevisiondeltas(requests)
1567
1567
1568 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1568 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1569 return self._revlog.addgroup(deltas, linkmapper, transaction,
1569 return self._revlog.addgroup(deltas, linkmapper, transaction,
1570 addrevisioncb=addrevisioncb)
1570 addrevisioncb=addrevisioncb)
1571
1571
1572 def getstrippoint(self, minlink):
1572 def getstrippoint(self, minlink):
1573 return self._revlog.getstrippoint(minlink)
1573 return self._revlog.getstrippoint(minlink)
1574
1574
1575 def strip(self, minlink, transaction):
1575 def strip(self, minlink, transaction):
1576 return self._revlog.strip(minlink, transaction)
1576 return self._revlog.strip(minlink, transaction)
1577
1577
1578 def files(self):
1578 def files(self):
1579 return self._revlog.files()
1579 return self._revlog.files()
1580
1580
1581 def clone(self, tr, destrevlog, **kwargs):
1581 def clone(self, tr, destrevlog, **kwargs):
1582 if not isinstance(destrevlog, manifestrevlog):
1582 if not isinstance(destrevlog, manifestrevlog):
1583 raise error.ProgrammingError('expected manifestrevlog to clone()')
1583 raise error.ProgrammingError('expected manifestrevlog to clone()')
1584
1584
1585 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1585 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1586
1586
1587 @property
1587 @property
1588 def indexfile(self):
1588 def indexfile(self):
1589 return self._revlog.indexfile
1589 return self._revlog.indexfile
1590
1590
1591 @indexfile.setter
1591 @indexfile.setter
1592 def indexfile(self, value):
1592 def indexfile(self, value):
1593 self._revlog.indexfile = value
1593 self._revlog.indexfile = value
1594
1594
1595 @property
1595 @property
1596 def opener(self):
1596 def opener(self):
1597 return self._revlog.opener
1597 return self._revlog.opener
1598
1598
1599 @opener.setter
1599 @opener.setter
1600 def opener(self, value):
1600 def opener(self, value):
1601 self._revlog.opener = value
1601 self._revlog.opener = value
1602
1602
1603 @interfaceutil.implementer(repository.imanifestlog)
1603 @interfaceutil.implementer(repository.imanifestlog)
1604 class manifestlog(object):
1604 class manifestlog(object):
1605 """A collection class representing the collection of manifest snapshots
1605 """A collection class representing the collection of manifest snapshots
1606 referenced by commits in the repository.
1606 referenced by commits in the repository.
1607
1607
1608 In this situation, 'manifest' refers to the abstract concept of a snapshot
1608 In this situation, 'manifest' refers to the abstract concept of a snapshot
1609 of the list of files in the given commit. Consumers of the output of this
1609 of the list of files in the given commit. Consumers of the output of this
1610 class do not care about the implementation details of the actual manifests
1610 class do not care about the implementation details of the actual manifests
1611 they receive (i.e. tree or flat or lazily loaded, etc)."""
1611 they receive (i.e. tree or flat or lazily loaded, etc)."""
1612 def __init__(self, opener, repo):
1612 def __init__(self, opener, repo, rootstore):
1613 usetreemanifest = False
1613 usetreemanifest = False
1614 cachesize = 4
1614 cachesize = 4
1615
1615
1616 opts = getattr(opener, 'options', None)
1616 opts = getattr(opener, 'options', None)
1617 if opts is not None:
1617 if opts is not None:
1618 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1618 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1619 cachesize = opts.get('manifestcachesize', cachesize)
1619 cachesize = opts.get('manifestcachesize', cachesize)
1620
1620
1621 self._treemanifests = usetreemanifest
1621 self._treemanifests = usetreemanifest
1622
1622
1623 self._rootstore = repo._constructmanifest()
1623 self._rootstore = rootstore
1624 self._rootstore._setupmanifestcachehooks(repo)
1624 self._rootstore._setupmanifestcachehooks(repo)
1625 self._narrowmatch = repo.narrowmatch()
1625 self._narrowmatch = repo.narrowmatch()
1626
1626
1627 # A cache of the manifestctx or treemanifestctx for each directory
1627 # A cache of the manifestctx or treemanifestctx for each directory
1628 self._dirmancache = {}
1628 self._dirmancache = {}
1629 self._dirmancache[''] = util.lrucachedict(cachesize)
1629 self._dirmancache[''] = util.lrucachedict(cachesize)
1630
1630
1631 self._cachesize = cachesize
1631 self._cachesize = cachesize
1632
1632
1633 def __getitem__(self, node):
1633 def __getitem__(self, node):
1634 """Retrieves the manifest instance for the given node. Throws a
1634 """Retrieves the manifest instance for the given node. Throws a
1635 LookupError if not found.
1635 LookupError if not found.
1636 """
1636 """
1637 return self.get('', node)
1637 return self.get('', node)
1638
1638
1639 def get(self, tree, node, verify=True):
1639 def get(self, tree, node, verify=True):
1640 """Retrieves the manifest instance for the given node. Throws a
1640 """Retrieves the manifest instance for the given node. Throws a
1641 LookupError if not found.
1641 LookupError if not found.
1642
1642
1643 `verify` - if True an exception will be thrown if the node is not in
1643 `verify` - if True an exception will be thrown if the node is not in
1644 the revlog
1644 the revlog
1645 """
1645 """
1646 if node in self._dirmancache.get(tree, ()):
1646 if node in self._dirmancache.get(tree, ()):
1647 return self._dirmancache[tree][node]
1647 return self._dirmancache[tree][node]
1648
1648
1649 if not self._narrowmatch.always():
1649 if not self._narrowmatch.always():
1650 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1650 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1651 return excludeddirmanifestctx(tree, node)
1651 return excludeddirmanifestctx(tree, node)
1652 if tree:
1652 if tree:
1653 if self._rootstore._treeondisk:
1653 if self._rootstore._treeondisk:
1654 if verify:
1654 if verify:
1655 # Side-effect is LookupError is raised if node doesn't
1655 # Side-effect is LookupError is raised if node doesn't
1656 # exist.
1656 # exist.
1657 self.getstorage(tree).rev(node)
1657 self.getstorage(tree).rev(node)
1658
1658
1659 m = treemanifestctx(self, tree, node)
1659 m = treemanifestctx(self, tree, node)
1660 else:
1660 else:
1661 raise error.Abort(
1661 raise error.Abort(
1662 _("cannot ask for manifest directory '%s' in a flat "
1662 _("cannot ask for manifest directory '%s' in a flat "
1663 "manifest") % tree)
1663 "manifest") % tree)
1664 else:
1664 else:
1665 if verify:
1665 if verify:
1666 # Side-effect is LookupError is raised if node doesn't exist.
1666 # Side-effect is LookupError is raised if node doesn't exist.
1667 self._rootstore.rev(node)
1667 self._rootstore.rev(node)
1668
1668
1669 if self._treemanifests:
1669 if self._treemanifests:
1670 m = treemanifestctx(self, '', node)
1670 m = treemanifestctx(self, '', node)
1671 else:
1671 else:
1672 m = manifestctx(self, node)
1672 m = manifestctx(self, node)
1673
1673
1674 if node != nullid:
1674 if node != nullid:
1675 mancache = self._dirmancache.get(tree)
1675 mancache = self._dirmancache.get(tree)
1676 if not mancache:
1676 if not mancache:
1677 mancache = util.lrucachedict(self._cachesize)
1677 mancache = util.lrucachedict(self._cachesize)
1678 self._dirmancache[tree] = mancache
1678 self._dirmancache[tree] = mancache
1679 mancache[node] = m
1679 mancache[node] = m
1680 return m
1680 return m
1681
1681
1682 def getstorage(self, tree):
1682 def getstorage(self, tree):
1683 return self._rootstore.dirlog(tree)
1683 return self._rootstore.dirlog(tree)
1684
1684
1685 def clearcaches(self, clear_persisted_data=False):
1685 def clearcaches(self, clear_persisted_data=False):
1686 self._dirmancache.clear()
1686 self._dirmancache.clear()
1687 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1687 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1688
1688
1689 def rev(self, node):
1689 def rev(self, node):
1690 return self._rootstore.rev(node)
1690 return self._rootstore.rev(node)
1691
1691
1692 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1692 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1693 class memmanifestctx(object):
1693 class memmanifestctx(object):
1694 def __init__(self, manifestlog):
1694 def __init__(self, manifestlog):
1695 self._manifestlog = manifestlog
1695 self._manifestlog = manifestlog
1696 self._manifestdict = manifestdict()
1696 self._manifestdict = manifestdict()
1697
1697
1698 def _storage(self):
1698 def _storage(self):
1699 return self._manifestlog.getstorage(b'')
1699 return self._manifestlog.getstorage(b'')
1700
1700
1701 def new(self):
1701 def new(self):
1702 return memmanifestctx(self._manifestlog)
1702 return memmanifestctx(self._manifestlog)
1703
1703
1704 def copy(self):
1704 def copy(self):
1705 memmf = memmanifestctx(self._manifestlog)
1705 memmf = memmanifestctx(self._manifestlog)
1706 memmf._manifestdict = self.read().copy()
1706 memmf._manifestdict = self.read().copy()
1707 return memmf
1707 return memmf
1708
1708
1709 def read(self):
1709 def read(self):
1710 return self._manifestdict
1710 return self._manifestdict
1711
1711
1712 def write(self, transaction, link, p1, p2, added, removed, match=None):
1712 def write(self, transaction, link, p1, p2, added, removed, match=None):
1713 return self._storage().add(self._manifestdict, transaction, link,
1713 return self._storage().add(self._manifestdict, transaction, link,
1714 p1, p2, added, removed, match=match)
1714 p1, p2, added, removed, match=match)
1715
1715
1716 @interfaceutil.implementer(repository.imanifestrevisionstored)
1716 @interfaceutil.implementer(repository.imanifestrevisionstored)
1717 class manifestctx(object):
1717 class manifestctx(object):
1718 """A class representing a single revision of a manifest, including its
1718 """A class representing a single revision of a manifest, including its
1719 contents, its parent revs, and its linkrev.
1719 contents, its parent revs, and its linkrev.
1720 """
1720 """
1721 def __init__(self, manifestlog, node):
1721 def __init__(self, manifestlog, node):
1722 self._manifestlog = manifestlog
1722 self._manifestlog = manifestlog
1723 self._data = None
1723 self._data = None
1724
1724
1725 self._node = node
1725 self._node = node
1726
1726
1727 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1727 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1728 # but let's add it later when something needs it and we can load it
1728 # but let's add it later when something needs it and we can load it
1729 # lazily.
1729 # lazily.
1730 #self.p1, self.p2 = store.parents(node)
1730 #self.p1, self.p2 = store.parents(node)
1731 #rev = store.rev(node)
1731 #rev = store.rev(node)
1732 #self.linkrev = store.linkrev(rev)
1732 #self.linkrev = store.linkrev(rev)
1733
1733
1734 def _storage(self):
1734 def _storage(self):
1735 return self._manifestlog.getstorage(b'')
1735 return self._manifestlog.getstorage(b'')
1736
1736
1737 def node(self):
1737 def node(self):
1738 return self._node
1738 return self._node
1739
1739
1740 def new(self):
1740 def new(self):
1741 return memmanifestctx(self._manifestlog)
1741 return memmanifestctx(self._manifestlog)
1742
1742
1743 def copy(self):
1743 def copy(self):
1744 memmf = memmanifestctx(self._manifestlog)
1744 memmf = memmanifestctx(self._manifestlog)
1745 memmf._manifestdict = self.read().copy()
1745 memmf._manifestdict = self.read().copy()
1746 return memmf
1746 return memmf
1747
1747
1748 @propertycache
1748 @propertycache
1749 def parents(self):
1749 def parents(self):
1750 return self._storage().parents(self._node)
1750 return self._storage().parents(self._node)
1751
1751
1752 def read(self):
1752 def read(self):
1753 if self._data is None:
1753 if self._data is None:
1754 if self._node == nullid:
1754 if self._node == nullid:
1755 self._data = manifestdict()
1755 self._data = manifestdict()
1756 else:
1756 else:
1757 store = self._storage()
1757 store = self._storage()
1758 if self._node in store.fulltextcache:
1758 if self._node in store.fulltextcache:
1759 text = pycompat.bytestr(store.fulltextcache[self._node])
1759 text = pycompat.bytestr(store.fulltextcache[self._node])
1760 else:
1760 else:
1761 text = store.revision(self._node)
1761 text = store.revision(self._node)
1762 arraytext = bytearray(text)
1762 arraytext = bytearray(text)
1763 store.fulltextcache[self._node] = arraytext
1763 store.fulltextcache[self._node] = arraytext
1764 self._data = manifestdict(text)
1764 self._data = manifestdict(text)
1765 return self._data
1765 return self._data
1766
1766
1767 def readfast(self, shallow=False):
1767 def readfast(self, shallow=False):
1768 '''Calls either readdelta or read, based on which would be less work.
1768 '''Calls either readdelta or read, based on which would be less work.
1769 readdelta is called if the delta is against the p1, and therefore can be
1769 readdelta is called if the delta is against the p1, and therefore can be
1770 read quickly.
1770 read quickly.
1771
1771
1772 If `shallow` is True, nothing changes since this is a flat manifest.
1772 If `shallow` is True, nothing changes since this is a flat manifest.
1773 '''
1773 '''
1774 store = self._storage()
1774 store = self._storage()
1775 r = store.rev(self._node)
1775 r = store.rev(self._node)
1776 deltaparent = store.deltaparent(r)
1776 deltaparent = store.deltaparent(r)
1777 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1777 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1778 return self.readdelta()
1778 return self.readdelta()
1779 return self.read()
1779 return self.read()
1780
1780
1781 def readdelta(self, shallow=False):
1781 def readdelta(self, shallow=False):
1782 '''Returns a manifest containing just the entries that are present
1782 '''Returns a manifest containing just the entries that are present
1783 in this manifest, but not in its p1 manifest. This is efficient to read
1783 in this manifest, but not in its p1 manifest. This is efficient to read
1784 if the revlog delta is already p1.
1784 if the revlog delta is already p1.
1785
1785
1786 Changing the value of `shallow` has no effect on flat manifests.
1786 Changing the value of `shallow` has no effect on flat manifests.
1787 '''
1787 '''
1788 store = self._storage()
1788 store = self._storage()
1789 r = store.rev(self._node)
1789 r = store.rev(self._node)
1790 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1790 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1791 return manifestdict(d)
1791 return manifestdict(d)
1792
1792
1793 def find(self, key):
1793 def find(self, key):
1794 return self.read().find(key)
1794 return self.read().find(key)
1795
1795
1796 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1796 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1797 class memtreemanifestctx(object):
1797 class memtreemanifestctx(object):
1798 def __init__(self, manifestlog, dir=''):
1798 def __init__(self, manifestlog, dir=''):
1799 self._manifestlog = manifestlog
1799 self._manifestlog = manifestlog
1800 self._dir = dir
1800 self._dir = dir
1801 self._treemanifest = treemanifest()
1801 self._treemanifest = treemanifest()
1802
1802
1803 def _storage(self):
1803 def _storage(self):
1804 return self._manifestlog.getstorage(b'')
1804 return self._manifestlog.getstorage(b'')
1805
1805
1806 def new(self, dir=''):
1806 def new(self, dir=''):
1807 return memtreemanifestctx(self._manifestlog, dir=dir)
1807 return memtreemanifestctx(self._manifestlog, dir=dir)
1808
1808
1809 def copy(self):
1809 def copy(self):
1810 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1810 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1811 memmf._treemanifest = self._treemanifest.copy()
1811 memmf._treemanifest = self._treemanifest.copy()
1812 return memmf
1812 return memmf
1813
1813
1814 def read(self):
1814 def read(self):
1815 return self._treemanifest
1815 return self._treemanifest
1816
1816
1817 def write(self, transaction, link, p1, p2, added, removed, match=None):
1817 def write(self, transaction, link, p1, p2, added, removed, match=None):
1818 def readtree(dir, node):
1818 def readtree(dir, node):
1819 return self._manifestlog.get(dir, node).read()
1819 return self._manifestlog.get(dir, node).read()
1820 return self._storage().add(self._treemanifest, transaction, link,
1820 return self._storage().add(self._treemanifest, transaction, link,
1821 p1, p2, added, removed, readtree=readtree,
1821 p1, p2, added, removed, readtree=readtree,
1822 match=match)
1822 match=match)
1823
1823
1824 @interfaceutil.implementer(repository.imanifestrevisionstored)
1824 @interfaceutil.implementer(repository.imanifestrevisionstored)
1825 class treemanifestctx(object):
1825 class treemanifestctx(object):
1826 def __init__(self, manifestlog, dir, node):
1826 def __init__(self, manifestlog, dir, node):
1827 self._manifestlog = manifestlog
1827 self._manifestlog = manifestlog
1828 self._dir = dir
1828 self._dir = dir
1829 self._data = None
1829 self._data = None
1830
1830
1831 self._node = node
1831 self._node = node
1832
1832
1833 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1833 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1834 # we can instantiate treemanifestctx objects for directories we don't
1834 # we can instantiate treemanifestctx objects for directories we don't
1835 # have on disk.
1835 # have on disk.
1836 #self.p1, self.p2 = store.parents(node)
1836 #self.p1, self.p2 = store.parents(node)
1837 #rev = store.rev(node)
1837 #rev = store.rev(node)
1838 #self.linkrev = store.linkrev(rev)
1838 #self.linkrev = store.linkrev(rev)
1839
1839
1840 def _storage(self):
1840 def _storage(self):
1841 narrowmatch = self._manifestlog._narrowmatch
1841 narrowmatch = self._manifestlog._narrowmatch
1842 if not narrowmatch.always():
1842 if not narrowmatch.always():
1843 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1843 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1844 return excludedmanifestrevlog(self._dir)
1844 return excludedmanifestrevlog(self._dir)
1845 return self._manifestlog.getstorage(self._dir)
1845 return self._manifestlog.getstorage(self._dir)
1846
1846
1847 def read(self):
1847 def read(self):
1848 if self._data is None:
1848 if self._data is None:
1849 store = self._storage()
1849 store = self._storage()
1850 if self._node == nullid:
1850 if self._node == nullid:
1851 self._data = treemanifest()
1851 self._data = treemanifest()
1852 # TODO accessing non-public API
1852 # TODO accessing non-public API
1853 elif store._treeondisk:
1853 elif store._treeondisk:
1854 m = treemanifest(dir=self._dir)
1854 m = treemanifest(dir=self._dir)
1855 def gettext():
1855 def gettext():
1856 return store.revision(self._node)
1856 return store.revision(self._node)
1857 def readsubtree(dir, subm):
1857 def readsubtree(dir, subm):
1858 # Set verify to False since we need to be able to create
1858 # Set verify to False since we need to be able to create
1859 # subtrees for trees that don't exist on disk.
1859 # subtrees for trees that don't exist on disk.
1860 return self._manifestlog.get(dir, subm, verify=False).read()
1860 return self._manifestlog.get(dir, subm, verify=False).read()
1861 m.read(gettext, readsubtree)
1861 m.read(gettext, readsubtree)
1862 m.setnode(self._node)
1862 m.setnode(self._node)
1863 self._data = m
1863 self._data = m
1864 else:
1864 else:
1865 if self._node in store.fulltextcache:
1865 if self._node in store.fulltextcache:
1866 text = pycompat.bytestr(store.fulltextcache[self._node])
1866 text = pycompat.bytestr(store.fulltextcache[self._node])
1867 else:
1867 else:
1868 text = store.revision(self._node)
1868 text = store.revision(self._node)
1869 arraytext = bytearray(text)
1869 arraytext = bytearray(text)
1870 store.fulltextcache[self._node] = arraytext
1870 store.fulltextcache[self._node] = arraytext
1871 self._data = treemanifest(dir=self._dir, text=text)
1871 self._data = treemanifest(dir=self._dir, text=text)
1872
1872
1873 return self._data
1873 return self._data
1874
1874
1875 def node(self):
1875 def node(self):
1876 return self._node
1876 return self._node
1877
1877
1878 def new(self, dir=''):
1878 def new(self, dir=''):
1879 return memtreemanifestctx(self._manifestlog, dir=dir)
1879 return memtreemanifestctx(self._manifestlog, dir=dir)
1880
1880
1881 def copy(self):
1881 def copy(self):
1882 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1882 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1883 memmf._treemanifest = self.read().copy()
1883 memmf._treemanifest = self.read().copy()
1884 return memmf
1884 return memmf
1885
1885
1886 @propertycache
1886 @propertycache
1887 def parents(self):
1887 def parents(self):
1888 return self._storage().parents(self._node)
1888 return self._storage().parents(self._node)
1889
1889
1890 def readdelta(self, shallow=False):
1890 def readdelta(self, shallow=False):
1891 '''Returns a manifest containing just the entries that are present
1891 '''Returns a manifest containing just the entries that are present
1892 in this manifest, but not in its p1 manifest. This is efficient to read
1892 in this manifest, but not in its p1 manifest. This is efficient to read
1893 if the revlog delta is already p1.
1893 if the revlog delta is already p1.
1894
1894
1895 If `shallow` is True, this will read the delta for this directory,
1895 If `shallow` is True, this will read the delta for this directory,
1896 without recursively reading subdirectory manifests. Instead, any
1896 without recursively reading subdirectory manifests. Instead, any
1897 subdirectory entry will be reported as it appears in the manifest, i.e.
1897 subdirectory entry will be reported as it appears in the manifest, i.e.
1898 the subdirectory will be reported among files and distinguished only by
1898 the subdirectory will be reported among files and distinguished only by
1899 its 't' flag.
1899 its 't' flag.
1900 '''
1900 '''
1901 store = self._storage()
1901 store = self._storage()
1902 if shallow:
1902 if shallow:
1903 r = store.rev(self._node)
1903 r = store.rev(self._node)
1904 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1904 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1905 return manifestdict(d)
1905 return manifestdict(d)
1906 else:
1906 else:
1907 # Need to perform a slow delta
1907 # Need to perform a slow delta
1908 r0 = store.deltaparent(store.rev(self._node))
1908 r0 = store.deltaparent(store.rev(self._node))
1909 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1909 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1910 m1 = self.read()
1910 m1 = self.read()
1911 md = treemanifest(dir=self._dir)
1911 md = treemanifest(dir=self._dir)
1912 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1912 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1913 if n1:
1913 if n1:
1914 md[f] = n1
1914 md[f] = n1
1915 if fl1:
1915 if fl1:
1916 md.setflag(f, fl1)
1916 md.setflag(f, fl1)
1917 return md
1917 return md
1918
1918
1919 def readfast(self, shallow=False):
1919 def readfast(self, shallow=False):
1920 '''Calls either readdelta or read, based on which would be less work.
1920 '''Calls either readdelta or read, based on which would be less work.
1921 readdelta is called if the delta is against the p1, and therefore can be
1921 readdelta is called if the delta is against the p1, and therefore can be
1922 read quickly.
1922 read quickly.
1923
1923
1924 If `shallow` is True, it only returns the entries from this manifest,
1924 If `shallow` is True, it only returns the entries from this manifest,
1925 and not any submanifests.
1925 and not any submanifests.
1926 '''
1926 '''
1927 store = self._storage()
1927 store = self._storage()
1928 r = store.rev(self._node)
1928 r = store.rev(self._node)
1929 deltaparent = store.deltaparent(r)
1929 deltaparent = store.deltaparent(r)
1930 if (deltaparent != nullrev and
1930 if (deltaparent != nullrev and
1931 deltaparent in store.parentrevs(r)):
1931 deltaparent in store.parentrevs(r)):
1932 return self.readdelta(shallow=shallow)
1932 return self.readdelta(shallow=shallow)
1933
1933
1934 if shallow:
1934 if shallow:
1935 return manifestdict(store.revision(self._node))
1935 return manifestdict(store.revision(self._node))
1936 else:
1936 else:
1937 return self.read()
1937 return self.read()
1938
1938
1939 def find(self, key):
1939 def find(self, key):
1940 return self.read().find(key)
1940 return self.read().find(key)
1941
1941
1942 class excludeddir(treemanifest):
1942 class excludeddir(treemanifest):
1943 """Stand-in for a directory that is excluded from the repository.
1943 """Stand-in for a directory that is excluded from the repository.
1944
1944
1945 With narrowing active on a repository that uses treemanifests,
1945 With narrowing active on a repository that uses treemanifests,
1946 some of the directory revlogs will be excluded from the resulting
1946 some of the directory revlogs will be excluded from the resulting
1947 clone. This is a huge storage win for clients, but means we need
1947 clone. This is a huge storage win for clients, but means we need
1948 some sort of pseudo-manifest to surface to internals so we can
1948 some sort of pseudo-manifest to surface to internals so we can
1949 detect a merge conflict outside the narrowspec. That's what this
1949 detect a merge conflict outside the narrowspec. That's what this
1950 class is: it stands in for a directory whose node is known, but
1950 class is: it stands in for a directory whose node is known, but
1951 whose contents are unknown.
1951 whose contents are unknown.
1952 """
1952 """
1953 def __init__(self, dir, node):
1953 def __init__(self, dir, node):
1954 super(excludeddir, self).__init__(dir)
1954 super(excludeddir, self).__init__(dir)
1955 self._node = node
1955 self._node = node
1956 # Add an empty file, which will be included by iterators and such,
1956 # Add an empty file, which will be included by iterators and such,
1957 # appearing as the directory itself (i.e. something like "dir/")
1957 # appearing as the directory itself (i.e. something like "dir/")
1958 self._files[''] = node
1958 self._files[''] = node
1959 self._flags[''] = 't'
1959 self._flags[''] = 't'
1960
1960
1961 # Manifests outside the narrowspec should never be modified, so avoid
1961 # Manifests outside the narrowspec should never be modified, so avoid
1962 # copying. This makes a noticeable difference when there are very many
1962 # copying. This makes a noticeable difference when there are very many
1963 # directories outside the narrowspec. Also, it makes sense for the copy to
1963 # directories outside the narrowspec. Also, it makes sense for the copy to
1964 # be of the same type as the original, which would not happen with the
1964 # be of the same type as the original, which would not happen with the
1965 # super type's copy().
1965 # super type's copy().
1966 def copy(self):
1966 def copy(self):
1967 return self
1967 return self
1968
1968
1969 class excludeddirmanifestctx(treemanifestctx):
1969 class excludeddirmanifestctx(treemanifestctx):
1970 """context wrapper for excludeddir - see that docstring for rationale"""
1970 """context wrapper for excludeddir - see that docstring for rationale"""
1971 def __init__(self, dir, node):
1971 def __init__(self, dir, node):
1972 self._dir = dir
1972 self._dir = dir
1973 self._node = node
1973 self._node = node
1974
1974
1975 def read(self):
1975 def read(self):
1976 return excludeddir(self._dir, self._node)
1976 return excludeddir(self._dir, self._node)
1977
1977
1978 def write(self, *args):
1978 def write(self, *args):
1979 raise error.ProgrammingError(
1979 raise error.ProgrammingError(
1980 'attempt to write manifest from excluded dir %s' % self._dir)
1980 'attempt to write manifest from excluded dir %s' % self._dir)
1981
1981
1982 class excludedmanifestrevlog(manifestrevlog):
1982 class excludedmanifestrevlog(manifestrevlog):
1983 """Stand-in for excluded treemanifest revlogs.
1983 """Stand-in for excluded treemanifest revlogs.
1984
1984
1985 When narrowing is active on a treemanifest repository, we'll have
1985 When narrowing is active on a treemanifest repository, we'll have
1986 references to directories we can't see due to the revlog being
1986 references to directories we can't see due to the revlog being
1987 skipped. This class exists to conform to the manifestrevlog
1987 skipped. This class exists to conform to the manifestrevlog
1988 interface for those directories and proactively prevent writes to
1988 interface for those directories and proactively prevent writes to
1989 outside the narrowspec.
1989 outside the narrowspec.
1990 """
1990 """
1991
1991
1992 def __init__(self, dir):
1992 def __init__(self, dir):
1993 self._dir = dir
1993 self._dir = dir
1994
1994
1995 def __len__(self):
1995 def __len__(self):
1996 raise error.ProgrammingError(
1996 raise error.ProgrammingError(
1997 'attempt to get length of excluded dir %s' % self._dir)
1997 'attempt to get length of excluded dir %s' % self._dir)
1998
1998
1999 def rev(self, node):
1999 def rev(self, node):
2000 raise error.ProgrammingError(
2000 raise error.ProgrammingError(
2001 'attempt to get rev from excluded dir %s' % self._dir)
2001 'attempt to get rev from excluded dir %s' % self._dir)
2002
2002
2003 def linkrev(self, node):
2003 def linkrev(self, node):
2004 raise error.ProgrammingError(
2004 raise error.ProgrammingError(
2005 'attempt to get linkrev from excluded dir %s' % self._dir)
2005 'attempt to get linkrev from excluded dir %s' % self._dir)
2006
2006
2007 def node(self, rev):
2007 def node(self, rev):
2008 raise error.ProgrammingError(
2008 raise error.ProgrammingError(
2009 'attempt to get node from excluded dir %s' % self._dir)
2009 'attempt to get node from excluded dir %s' % self._dir)
2010
2010
2011 def add(self, *args, **kwargs):
2011 def add(self, *args, **kwargs):
2012 # We should never write entries in dirlogs outside the narrow clone.
2012 # We should never write entries in dirlogs outside the narrow clone.
2013 # However, the method still gets called from writesubtree() in
2013 # However, the method still gets called from writesubtree() in
2014 # _addtree(), so we need to handle it. We should possibly make that
2014 # _addtree(), so we need to handle it. We should possibly make that
2015 # avoid calling add() with a clean manifest (_dirty is always False
2015 # avoid calling add() with a clean manifest (_dirty is always False
2016 # in excludeddir instances).
2016 # in excludeddir instances).
2017 pass
2017 pass
@@ -1,224 +1,225 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 namespaces,
20 namespaces,
21 pathutil,
21 pathutil,
22 url,
22 url,
23 util,
23 util,
24 vfs as vfsmod,
24 vfs as vfsmod,
25 )
25 )
26
26
27 urlerr = util.urlerr
27 urlerr = util.urlerr
28 urlreq = util.urlreq
28 urlreq = util.urlreq
29
29
30 class httprangereader(object):
30 class httprangereader(object):
31 def __init__(self, url, opener):
31 def __init__(self, url, opener):
32 # we assume opener has HTTPRangeHandler
32 # we assume opener has HTTPRangeHandler
33 self.url = url
33 self.url = url
34 self.pos = 0
34 self.pos = 0
35 self.opener = opener
35 self.opener = opener
36 self.name = url
36 self.name = url
37
37
38 def __enter__(self):
38 def __enter__(self):
39 return self
39 return self
40
40
41 def __exit__(self, exc_type, exc_value, traceback):
41 def __exit__(self, exc_type, exc_value, traceback):
42 self.close()
42 self.close()
43
43
44 def seek(self, pos):
44 def seek(self, pos):
45 self.pos = pos
45 self.pos = pos
46 def read(self, bytes=None):
46 def read(self, bytes=None):
47 req = urlreq.request(self.url)
47 req = urlreq.request(self.url)
48 end = ''
48 end = ''
49 if bytes:
49 if bytes:
50 end = self.pos + bytes - 1
50 end = self.pos + bytes - 1
51 if self.pos or end:
51 if self.pos or end:
52 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
52 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
53
53
54 try:
54 try:
55 f = self.opener.open(req)
55 f = self.opener.open(req)
56 data = f.read()
56 data = f.read()
57 code = f.code
57 code = f.code
58 except urlerr.httperror as inst:
58 except urlerr.httperror as inst:
59 num = inst.code == 404 and errno.ENOENT or None
59 num = inst.code == 404 and errno.ENOENT or None
60 raise IOError(num, inst)
60 raise IOError(num, inst)
61 except urlerr.urlerror as inst:
61 except urlerr.urlerror as inst:
62 raise IOError(None, inst.reason[1])
62 raise IOError(None, inst.reason[1])
63
63
64 if code == 200:
64 if code == 200:
65 # HTTPRangeHandler does nothing if remote does not support
65 # HTTPRangeHandler does nothing if remote does not support
66 # Range headers and returns the full entity. Let's slice it.
66 # Range headers and returns the full entity. Let's slice it.
67 if bytes:
67 if bytes:
68 data = data[self.pos:self.pos + bytes]
68 data = data[self.pos:self.pos + bytes]
69 else:
69 else:
70 data = data[self.pos:]
70 data = data[self.pos:]
71 elif bytes:
71 elif bytes:
72 data = data[:bytes]
72 data = data[:bytes]
73 self.pos += len(data)
73 self.pos += len(data)
74 return data
74 return data
75 def readlines(self):
75 def readlines(self):
76 return self.read().splitlines(True)
76 return self.read().splitlines(True)
77 def __iter__(self):
77 def __iter__(self):
78 return iter(self.readlines())
78 return iter(self.readlines())
79 def close(self):
79 def close(self):
80 pass
80 pass
81
81
82 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
82 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
83 # which was itself extracted from urlgrabber. See the last version of
83 # which was itself extracted from urlgrabber. See the last version of
84 # byterange.py from history if you need more information.
84 # byterange.py from history if you need more information.
85 class _RangeError(IOError):
85 class _RangeError(IOError):
86 """Error raised when an unsatisfiable range is requested."""
86 """Error raised when an unsatisfiable range is requested."""
87
87
88 class _HTTPRangeHandler(urlreq.basehandler):
88 class _HTTPRangeHandler(urlreq.basehandler):
89 """Handler that enables HTTP Range headers.
89 """Handler that enables HTTP Range headers.
90
90
91 This was extremely simple. The Range header is a HTTP feature to
91 This was extremely simple. The Range header is a HTTP feature to
92 begin with so all this class does is tell urllib2 that the
92 begin with so all this class does is tell urllib2 that the
93 "206 Partial Content" response from the HTTP server is what we
93 "206 Partial Content" response from the HTTP server is what we
94 expected.
94 expected.
95 """
95 """
96
96
97 def http_error_206(self, req, fp, code, msg, hdrs):
97 def http_error_206(self, req, fp, code, msg, hdrs):
98 # 206 Partial Content Response
98 # 206 Partial Content Response
99 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
99 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
100 r.code = code
100 r.code = code
101 r.msg = msg
101 r.msg = msg
102 return r
102 return r
103
103
104 def http_error_416(self, req, fp, code, msg, hdrs):
104 def http_error_416(self, req, fp, code, msg, hdrs):
105 # HTTP's Range Not Satisfiable error
105 # HTTP's Range Not Satisfiable error
106 raise _RangeError('Requested Range Not Satisfiable')
106 raise _RangeError('Requested Range Not Satisfiable')
107
107
108 def build_opener(ui, authinfo):
108 def build_opener(ui, authinfo):
109 # urllib cannot handle URLs with embedded user or passwd
109 # urllib cannot handle URLs with embedded user or passwd
110 urlopener = url.opener(ui, authinfo)
110 urlopener = url.opener(ui, authinfo)
111 urlopener.add_handler(_HTTPRangeHandler())
111 urlopener.add_handler(_HTTPRangeHandler())
112
112
113 class statichttpvfs(vfsmod.abstractvfs):
113 class statichttpvfs(vfsmod.abstractvfs):
114 def __init__(self, base):
114 def __init__(self, base):
115 self.base = base
115 self.base = base
116
116
117 def __call__(self, path, mode='r', *args, **kw):
117 def __call__(self, path, mode='r', *args, **kw):
118 if mode not in ('r', 'rb'):
118 if mode not in ('r', 'rb'):
119 raise IOError('Permission denied')
119 raise IOError('Permission denied')
120 f = "/".join((self.base, urlreq.quote(path)))
120 f = "/".join((self.base, urlreq.quote(path)))
121 return httprangereader(f, urlopener)
121 return httprangereader(f, urlopener)
122
122
123 def join(self, path):
123 def join(self, path):
124 if path:
124 if path:
125 return pathutil.join(self.base, path)
125 return pathutil.join(self.base, path)
126 else:
126 else:
127 return self.base
127 return self.base
128
128
129 return statichttpvfs
129 return statichttpvfs
130
130
131 class statichttppeer(localrepo.localpeer):
131 class statichttppeer(localrepo.localpeer):
132 def local(self):
132 def local(self):
133 return None
133 return None
134 def canpush(self):
134 def canpush(self):
135 return False
135 return False
136
136
137 class statichttprepository(localrepo.localrepository):
137 class statichttprepository(localrepo.localrepository):
138 supported = localrepo.localrepository._basesupported
138 supported = localrepo.localrepository._basesupported
139
139
140 def __init__(self, ui, path):
140 def __init__(self, ui, path):
141 self._url = path
141 self._url = path
142 self.ui = ui
142 self.ui = ui
143
143
144 self.root = path
144 self.root = path
145 u = util.url(path.rstrip('/') + "/.hg")
145 u = util.url(path.rstrip('/') + "/.hg")
146 self.path, authinfo = u.authinfo()
146 self.path, authinfo = u.authinfo()
147
147
148 vfsclass = build_opener(ui, authinfo)
148 vfsclass = build_opener(ui, authinfo)
149 self.vfs = vfsclass(self.path)
149 self.vfs = vfsclass(self.path)
150 self.cachevfs = vfsclass(self.vfs.join('cache'))
150 self.cachevfs = vfsclass(self.vfs.join('cache'))
151 self._phasedefaults = []
151 self._phasedefaults = []
152
152
153 self.names = namespaces.namespaces()
153 self.names = namespaces.namespaces()
154 self.filtername = None
154 self.filtername = None
155
155
156 try:
156 try:
157 requirements = set(self.vfs.read(b'requires').splitlines())
157 requirements = set(self.vfs.read(b'requires').splitlines())
158 except IOError as inst:
158 except IOError as inst:
159 if inst.errno != errno.ENOENT:
159 if inst.errno != errno.ENOENT:
160 raise
160 raise
161 requirements = set()
161 requirements = set()
162
162
163 # check if it is a non-empty old-style repository
163 # check if it is a non-empty old-style repository
164 try:
164 try:
165 fp = self.vfs("00changelog.i")
165 fp = self.vfs("00changelog.i")
166 fp.read(1)
166 fp.read(1)
167 fp.close()
167 fp.close()
168 except IOError as inst:
168 except IOError as inst:
169 if inst.errno != errno.ENOENT:
169 if inst.errno != errno.ENOENT:
170 raise
170 raise
171 # we do not care about empty old-style repositories here
171 # we do not care about empty old-style repositories here
172 msg = _("'%s' does not appear to be an hg repository") % path
172 msg = _("'%s' does not appear to be an hg repository") % path
173 raise error.RepoError(msg)
173 raise error.RepoError(msg)
174
174
175 supportedrequirements = localrepo.gathersupportedrequirements(ui)
175 supportedrequirements = localrepo.gathersupportedrequirements(ui)
176 localrepo.ensurerequirementsrecognized(requirements,
176 localrepo.ensurerequirementsrecognized(requirements,
177 supportedrequirements)
177 supportedrequirements)
178 localrepo.ensurerequirementscompatible(ui, requirements)
178 localrepo.ensurerequirementscompatible(ui, requirements)
179
179
180 # setup store
180 # setup store
181 self.store = localrepo.makestore(requirements, self.path, vfsclass)
181 self.store = localrepo.makestore(requirements, self.path, vfsclass)
182 self.spath = self.store.path
182 self.spath = self.store.path
183 self.svfs = self.store.opener
183 self.svfs = self.store.opener
184 self.sjoin = self.store.join
184 self.sjoin = self.store.join
185 self._filecache = {}
185 self._filecache = {}
186 self.requirements = requirements
186 self.requirements = requirements
187
187
188 self.manifestlog = manifest.manifestlog(self.svfs, self)
188 rootmanifest = manifest.manifestrevlog(self.svfs)
189 self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest)
189 self.changelog = changelog.changelog(self.svfs)
190 self.changelog = changelog.changelog(self.svfs)
190 self._tags = None
191 self._tags = None
191 self.nodetagscache = None
192 self.nodetagscache = None
192 self._branchcaches = {}
193 self._branchcaches = {}
193 self._revbranchcache = None
194 self._revbranchcache = None
194 self.encodepats = None
195 self.encodepats = None
195 self.decodepats = None
196 self.decodepats = None
196 self._transref = None
197 self._transref = None
197
198
198 def _restrictcapabilities(self, caps):
199 def _restrictcapabilities(self, caps):
199 caps = super(statichttprepository, self)._restrictcapabilities(caps)
200 caps = super(statichttprepository, self)._restrictcapabilities(caps)
200 return caps.difference(["pushkey"])
201 return caps.difference(["pushkey"])
201
202
202 def url(self):
203 def url(self):
203 return self._url
204 return self._url
204
205
205 def local(self):
206 def local(self):
206 return False
207 return False
207
208
208 def peer(self):
209 def peer(self):
209 return statichttppeer(self)
210 return statichttppeer(self)
210
211
211 def wlock(self, wait=True):
212 def wlock(self, wait=True):
212 raise error.LockUnavailable(0, _('lock not available'), 'lock',
213 raise error.LockUnavailable(0, _('lock not available'), 'lock',
213 _('cannot lock static-http repository'))
214 _('cannot lock static-http repository'))
214
215
215 def lock(self, wait=True):
216 def lock(self, wait=True):
216 raise error.Abort(_('cannot lock static-http repository'))
217 raise error.Abort(_('cannot lock static-http repository'))
217
218
218 def _writecaches(self):
219 def _writecaches(self):
219 pass # statichttprepository are read only
220 pass # statichttprepository are read only
220
221
221 def instance(ui, path, create, intents=None, createopts=None):
222 def instance(ui, path, create, intents=None, createopts=None):
222 if create:
223 if create:
223 raise error.Abort(_('cannot create new static-http repository'))
224 raise error.Abort(_('cannot create new static-http repository'))
224 return statichttprepository(ui, path[7:])
225 return statichttprepository(ui, path[7:])
@@ -1,284 +1,290 b''
1 # unionrepo.py - repository class for viewing union of repository changesets
1 # unionrepo.py - repository class for viewing union of repository changesets
2 #
2 #
3 # Derived from bundlerepo.py
3 # Derived from bundlerepo.py
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Repository class for "in-memory pull" of one local repository to another,
10 """Repository class for "in-memory pull" of one local repository to another,
11 allowing operations like diff and log with revsets.
11 allowing operations like diff and log with revsets.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 cmdutil,
21 cmdutil,
22 error,
22 error,
23 filelog,
23 filelog,
24 localrepo,
24 localrepo,
25 manifest,
25 manifest,
26 mdiff,
26 mdiff,
27 pathutil,
27 pathutil,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34 class unionrevlog(revlog.revlog):
34 class unionrevlog(revlog.revlog):
35 def __init__(self, opener, indexfile, revlog2, linkmapper):
35 def __init__(self, opener, indexfile, revlog2, linkmapper):
36 # How it works:
36 # How it works:
37 # To retrieve a revision, we just need to know the node id so we can
37 # To retrieve a revision, we just need to know the node id so we can
38 # look it up in revlog2.
38 # look it up in revlog2.
39 #
39 #
40 # To differentiate a rev in the second revlog from a rev in the revlog,
40 # To differentiate a rev in the second revlog from a rev in the revlog,
41 # we check revision against repotiprev.
41 # we check revision against repotiprev.
42 opener = vfsmod.readonlyvfs(opener)
42 opener = vfsmod.readonlyvfs(opener)
43 revlog.revlog.__init__(self, opener, indexfile)
43 revlog.revlog.__init__(self, opener, indexfile)
44 self.revlog2 = revlog2
44 self.revlog2 = revlog2
45
45
46 n = len(self)
46 n = len(self)
47 self.repotiprev = n - 1
47 self.repotiprev = n - 1
48 self.bundlerevs = set() # used by 'bundle()' revset expression
48 self.bundlerevs = set() # used by 'bundle()' revset expression
49 for rev2 in self.revlog2:
49 for rev2 in self.revlog2:
50 rev = self.revlog2.index[rev2]
50 rev = self.revlog2.index[rev2]
51 # rev numbers - in revlog2, very different from self.rev
51 # rev numbers - in revlog2, very different from self.rev
52 _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
52 _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
53 flags = _start & 0xFFFF
53 flags = _start & 0xFFFF
54
54
55 if linkmapper is None: # link is to same revlog
55 if linkmapper is None: # link is to same revlog
56 assert linkrev == rev2 # we never link back
56 assert linkrev == rev2 # we never link back
57 link = n
57 link = n
58 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
58 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
59 link = linkmapper(linkrev)
59 link = linkmapper(linkrev)
60
60
61 if linkmapper is not None: # link is to same revlog
61 if linkmapper is not None: # link is to same revlog
62 base = linkmapper(base)
62 base = linkmapper(base)
63
63
64 if node in self.nodemap:
64 if node in self.nodemap:
65 # this happens for the common revlog revisions
65 # this happens for the common revlog revisions
66 self.bundlerevs.add(self.nodemap[node])
66 self.bundlerevs.add(self.nodemap[node])
67 continue
67 continue
68
68
69 p1node = self.revlog2.node(p1rev)
69 p1node = self.revlog2.node(p1rev)
70 p2node = self.revlog2.node(p2rev)
70 p2node = self.revlog2.node(p2rev)
71
71
72 # TODO: it's probably wrong to set compressed length to None, but
72 # TODO: it's probably wrong to set compressed length to None, but
73 # I have no idea if csize is valid in the base revlog context.
73 # I have no idea if csize is valid in the base revlog context.
74 e = (flags, None, rsize, base,
74 e = (flags, None, rsize, base,
75 link, self.rev(p1node), self.rev(p2node), node)
75 link, self.rev(p1node), self.rev(p2node), node)
76 self.index.append(e)
76 self.index.append(e)
77 self.nodemap[node] = n
77 self.nodemap[node] = n
78 self.bundlerevs.add(n)
78 self.bundlerevs.add(n)
79 n += 1
79 n += 1
80
80
81 def _chunk(self, rev):
81 def _chunk(self, rev):
82 if rev <= self.repotiprev:
82 if rev <= self.repotiprev:
83 return revlog.revlog._chunk(self, rev)
83 return revlog.revlog._chunk(self, rev)
84 return self.revlog2._chunk(self.node(rev))
84 return self.revlog2._chunk(self.node(rev))
85
85
86 def revdiff(self, rev1, rev2):
86 def revdiff(self, rev1, rev2):
87 """return or calculate a delta between two revisions"""
87 """return or calculate a delta between two revisions"""
88 if rev1 > self.repotiprev and rev2 > self.repotiprev:
88 if rev1 > self.repotiprev and rev2 > self.repotiprev:
89 return self.revlog2.revdiff(
89 return self.revlog2.revdiff(
90 self.revlog2.rev(self.node(rev1)),
90 self.revlog2.rev(self.node(rev1)),
91 self.revlog2.rev(self.node(rev2)))
91 self.revlog2.rev(self.node(rev2)))
92 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
92 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
93 return self.baserevdiff(rev1, rev2)
93 return self.baserevdiff(rev1, rev2)
94
94
95 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
95 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
96
96
97 def revision(self, nodeorrev, _df=None, raw=False):
97 def revision(self, nodeorrev, _df=None, raw=False):
98 """return an uncompressed revision of a given node or revision
98 """return an uncompressed revision of a given node or revision
99 number.
99 number.
100 """
100 """
101 if isinstance(nodeorrev, int):
101 if isinstance(nodeorrev, int):
102 rev = nodeorrev
102 rev = nodeorrev
103 node = self.node(rev)
103 node = self.node(rev)
104 else:
104 else:
105 node = nodeorrev
105 node = nodeorrev
106 rev = self.rev(node)
106 rev = self.rev(node)
107
107
108 if node == nullid:
108 if node == nullid:
109 return ""
109 return ""
110
110
111 if rev > self.repotiprev:
111 if rev > self.repotiprev:
112 text = self.revlog2.revision(node)
112 text = self.revlog2.revision(node)
113 self._cache = (node, rev, text)
113 self._cache = (node, rev, text)
114 else:
114 else:
115 text = self.baserevision(rev)
115 text = self.baserevision(rev)
116 # already cached
116 # already cached
117 return text
117 return text
118
118
119 def baserevision(self, nodeorrev):
119 def baserevision(self, nodeorrev):
120 # Revlog subclasses may override 'revision' method to modify format of
120 # Revlog subclasses may override 'revision' method to modify format of
121 # content retrieved from revlog. To use unionrevlog with such class one
121 # content retrieved from revlog. To use unionrevlog with such class one
122 # needs to override 'baserevision' and make more specific call here.
122 # needs to override 'baserevision' and make more specific call here.
123 return revlog.revlog.revision(self, nodeorrev)
123 return revlog.revlog.revision(self, nodeorrev)
124
124
125 def baserevdiff(self, rev1, rev2):
125 def baserevdiff(self, rev1, rev2):
126 # Exists for the same purpose as baserevision.
126 # Exists for the same purpose as baserevision.
127 return revlog.revlog.revdiff(self, rev1, rev2)
127 return revlog.revlog.revdiff(self, rev1, rev2)
128
128
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
130 raise NotImplementedError
130 raise NotImplementedError
131 def addgroup(self, deltas, transaction, addrevisioncb=None):
131 def addgroup(self, deltas, transaction, addrevisioncb=None):
132 raise NotImplementedError
132 raise NotImplementedError
133 def strip(self, rev, minlink):
133 def strip(self, rev, minlink):
134 raise NotImplementedError
134 raise NotImplementedError
135 def checksize(self):
135 def checksize(self):
136 raise NotImplementedError
136 raise NotImplementedError
137
137
138 class unionchangelog(unionrevlog, changelog.changelog):
138 class unionchangelog(unionrevlog, changelog.changelog):
139 def __init__(self, opener, opener2):
139 def __init__(self, opener, opener2):
140 changelog.changelog.__init__(self, opener)
140 changelog.changelog.__init__(self, opener)
141 linkmapper = None
141 linkmapper = None
142 changelog2 = changelog.changelog(opener2)
142 changelog2 = changelog.changelog(opener2)
143 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
143 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
144 linkmapper)
144 linkmapper)
145
145
146 def baserevision(self, nodeorrev):
146 def baserevision(self, nodeorrev):
147 # Although changelog doesn't override 'revision' method, some extensions
147 # Although changelog doesn't override 'revision' method, some extensions
148 # may replace this class with another that does. Same story with
148 # may replace this class with another that does. Same story with
149 # manifest and filelog classes.
149 # manifest and filelog classes.
150 return changelog.changelog.revision(self, nodeorrev)
150 return changelog.changelog.revision(self, nodeorrev)
151
151
152 def baserevdiff(self, rev1, rev2):
152 def baserevdiff(self, rev1, rev2):
153 return changelog.changelog.revdiff(self, rev1, rev2)
153 return changelog.changelog.revdiff(self, rev1, rev2)
154
154
155 class unionmanifest(unionrevlog, manifest.manifestrevlog):
155 class unionmanifest(unionrevlog, manifest.manifestrevlog):
156 def __init__(self, opener, opener2, linkmapper):
156 def __init__(self, opener, opener2, linkmapper):
157 manifest.manifestrevlog.__init__(self, opener)
157 manifest.manifestrevlog.__init__(self, opener)
158 manifest2 = manifest.manifestrevlog(opener2)
158 manifest2 = manifest.manifestrevlog(opener2)
159 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
159 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
160 linkmapper)
160 linkmapper)
161
161
162 def baserevision(self, nodeorrev):
162 def baserevision(self, nodeorrev):
163 return manifest.manifestrevlog.revision(self, nodeorrev)
163 return manifest.manifestrevlog.revision(self, nodeorrev)
164
164
165 def baserevdiff(self, rev1, rev2):
165 def baserevdiff(self, rev1, rev2):
166 return manifest.manifestrevlog.revdiff(self, rev1, rev2)
166 return manifest.manifestrevlog.revdiff(self, rev1, rev2)
167
167
168 class unionfilelog(filelog.filelog):
168 class unionfilelog(filelog.filelog):
169 def __init__(self, opener, path, opener2, linkmapper, repo):
169 def __init__(self, opener, path, opener2, linkmapper, repo):
170 filelog.filelog.__init__(self, opener, path)
170 filelog.filelog.__init__(self, opener, path)
171 filelog2 = filelog.filelog(opener2, path)
171 filelog2 = filelog.filelog(opener2, path)
172 self._revlog = unionrevlog(opener, self.indexfile,
172 self._revlog = unionrevlog(opener, self.indexfile,
173 filelog2._revlog, linkmapper)
173 filelog2._revlog, linkmapper)
174 self._repo = repo
174 self._repo = repo
175 self.repotiprev = self._revlog.repotiprev
175 self.repotiprev = self._revlog.repotiprev
176 self.revlog2 = self._revlog.revlog2
176 self.revlog2 = self._revlog.revlog2
177
177
178 def baserevision(self, nodeorrev):
178 def baserevision(self, nodeorrev):
179 return filelog.filelog.revision(self, nodeorrev)
179 return filelog.filelog.revision(self, nodeorrev)
180
180
181 def baserevdiff(self, rev1, rev2):
181 def baserevdiff(self, rev1, rev2):
182 return filelog.filelog.revdiff(self, rev1, rev2)
182 return filelog.filelog.revdiff(self, rev1, rev2)
183
183
184 def iscensored(self, rev):
184 def iscensored(self, rev):
185 """Check if a revision is censored."""
185 """Check if a revision is censored."""
186 if rev <= self.repotiprev:
186 if rev <= self.repotiprev:
187 return filelog.filelog.iscensored(self, rev)
187 return filelog.filelog.iscensored(self, rev)
188 node = self.node(rev)
188 node = self.node(rev)
189 return self.revlog2.iscensored(self.revlog2.rev(node))
189 return self.revlog2.iscensored(self.revlog2.rev(node))
190
190
191 class unionpeer(localrepo.localpeer):
191 class unionpeer(localrepo.localpeer):
192 def canpush(self):
192 def canpush(self):
193 return False
193 return False
194
194
195 class unionrepository(object):
195 class unionrepository(object):
196 """Represents the union of data in 2 repositories.
196 """Represents the union of data in 2 repositories.
197
197
198 Instances are not usable if constructed directly. Use ``instance()``
198 Instances are not usable if constructed directly. Use ``instance()``
199 or ``makeunionrepository()`` to create a usable instance.
199 or ``makeunionrepository()`` to create a usable instance.
200 """
200 """
201 def __init__(self, repo2, url):
201 def __init__(self, repo2, url):
202 self.repo2 = repo2
202 self.repo2 = repo2
203 self._url = url
203 self._url = url
204
204
205 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
205 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
206
206
207 @localrepo.unfilteredpropertycache
207 @localrepo.unfilteredpropertycache
208 def changelog(self):
208 def changelog(self):
209 return unionchangelog(self.svfs, self.repo2.svfs)
209 return unionchangelog(self.svfs, self.repo2.svfs)
210
210
211 @localrepo.unfilteredpropertycache
212 def manifestlog(self):
213 rootstore = unionmanifest(self.svfs, self.repo2.svfs,
214 self.unfiltered()._clrev)
215 return manifest.manifestlog(self.svfs, self, rootstore)
216
211 def _clrev(self, rev2):
217 def _clrev(self, rev2):
212 """map from repo2 changelog rev to temporary rev in self.changelog"""
218 """map from repo2 changelog rev to temporary rev in self.changelog"""
213 node = self.repo2.changelog.node(rev2)
219 node = self.repo2.changelog.node(rev2)
214 return self.changelog.rev(node)
220 return self.changelog.rev(node)
215
221
216 def _constructmanifest(self):
222 def _constructmanifest(self):
217 return unionmanifest(self.svfs, self.repo2.svfs,
223 return unionmanifest(self.svfs, self.repo2.svfs,
218 self.unfiltered()._clrev)
224 self.unfiltered()._clrev)
219
225
220 def url(self):
226 def url(self):
221 return self._url
227 return self._url
222
228
223 def file(self, f):
229 def file(self, f):
224 return unionfilelog(self.svfs, f, self.repo2.svfs,
230 return unionfilelog(self.svfs, f, self.repo2.svfs,
225 self.unfiltered()._clrev, self)
231 self.unfiltered()._clrev, self)
226
232
227 def close(self):
233 def close(self):
228 self.repo2.close()
234 self.repo2.close()
229
235
230 def cancopy(self):
236 def cancopy(self):
231 return False
237 return False
232
238
233 def peer(self):
239 def peer(self):
234 return unionpeer(self)
240 return unionpeer(self)
235
241
236 def getcwd(self):
242 def getcwd(self):
237 return pycompat.getcwd() # always outside the repo
243 return pycompat.getcwd() # always outside the repo
238
244
239 def instance(ui, path, create, intents=None, createopts=None):
245 def instance(ui, path, create, intents=None, createopts=None):
240 if create:
246 if create:
241 raise error.Abort(_('cannot create new union repository'))
247 raise error.Abort(_('cannot create new union repository'))
242 parentpath = ui.config("bundle", "mainreporoot")
248 parentpath = ui.config("bundle", "mainreporoot")
243 if not parentpath:
249 if not parentpath:
244 # try to find the correct path to the working directory repo
250 # try to find the correct path to the working directory repo
245 parentpath = cmdutil.findrepo(pycompat.getcwd())
251 parentpath = cmdutil.findrepo(pycompat.getcwd())
246 if parentpath is None:
252 if parentpath is None:
247 parentpath = ''
253 parentpath = ''
248 if parentpath:
254 if parentpath:
249 # Try to make the full path relative so we get a nice, short URL.
255 # Try to make the full path relative so we get a nice, short URL.
250 # In particular, we don't want temp dir names in test outputs.
256 # In particular, we don't want temp dir names in test outputs.
251 cwd = pycompat.getcwd()
257 cwd = pycompat.getcwd()
252 if parentpath == cwd:
258 if parentpath == cwd:
253 parentpath = ''
259 parentpath = ''
254 else:
260 else:
255 cwd = pathutil.normasprefix(cwd)
261 cwd = pathutil.normasprefix(cwd)
256 if parentpath.startswith(cwd):
262 if parentpath.startswith(cwd):
257 parentpath = parentpath[len(cwd):]
263 parentpath = parentpath[len(cwd):]
258 if path.startswith('union:'):
264 if path.startswith('union:'):
259 s = path.split(":", 1)[1].split("+", 1)
265 s = path.split(":", 1)[1].split("+", 1)
260 if len(s) == 1:
266 if len(s) == 1:
261 repopath, repopath2 = parentpath, s[0]
267 repopath, repopath2 = parentpath, s[0]
262 else:
268 else:
263 repopath, repopath2 = s
269 repopath, repopath2 = s
264 else:
270 else:
265 repopath, repopath2 = parentpath, path
271 repopath, repopath2 = parentpath, path
266
272
267 return makeunionrepository(ui, repopath, repopath2)
273 return makeunionrepository(ui, repopath, repopath2)
268
274
269 def makeunionrepository(ui, repopath1, repopath2):
275 def makeunionrepository(ui, repopath1, repopath2):
270 """Make a union repository object from 2 local repo paths."""
276 """Make a union repository object from 2 local repo paths."""
271 repo1 = localrepo.instance(ui, repopath1, create=False)
277 repo1 = localrepo.instance(ui, repopath1, create=False)
272 repo2 = localrepo.instance(ui, repopath2, create=False)
278 repo2 = localrepo.instance(ui, repopath2, create=False)
273
279
274 url = 'union:%s+%s' % (util.expandpath(repopath1),
280 url = 'union:%s+%s' % (util.expandpath(repopath1),
275 util.expandpath(repopath2))
281 util.expandpath(repopath2))
276
282
277 class derivedunionrepository(unionrepository, repo1.__class__):
283 class derivedunionrepository(unionrepository, repo1.__class__):
278 pass
284 pass
279
285
280 repo = repo1
286 repo = repo1
281 repo.__class__ = derivedunionrepository
287 repo.__class__ = derivedunionrepository
282 unionrepository.__init__(repo1, repo2, url)
288 unionrepository.__init__(repo1, repo2, url)
283
289
284 return repo
290 return repo
@@ -1,230 +1,230 b''
1 # Test that certain objects conform to well-defined interfaces.
1 # Test that certain objects conform to well-defined interfaces.
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 from mercurial import encoding
5 from mercurial import encoding
6 encoding.environ[b'HGREALINTERFACES'] = b'1'
6 encoding.environ[b'HGREALINTERFACES'] = b'1'
7
7
8 import os
8 import os
9 import subprocess
9 import subprocess
10 import sys
10 import sys
11
11
12 # Only run if tests are run in a repo
12 # Only run if tests are run in a repo
13 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
13 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
14 'test-repo']):
14 'test-repo']):
15 sys.exit(80)
15 sys.exit(80)
16
16
17 from mercurial.thirdparty.zope import (
17 from mercurial.thirdparty.zope import (
18 interface as zi,
18 interface as zi,
19 )
19 )
20 from mercurial.thirdparty.zope.interface import (
20 from mercurial.thirdparty.zope.interface import (
21 verify as ziverify,
21 verify as ziverify,
22 )
22 )
23 from mercurial import (
23 from mercurial import (
24 changegroup,
24 changegroup,
25 bundlerepo,
25 bundlerepo,
26 filelog,
26 filelog,
27 httppeer,
27 httppeer,
28 localrepo,
28 localrepo,
29 manifest,
29 manifest,
30 pycompat,
30 pycompat,
31 repository,
31 repository,
32 revlog,
32 revlog,
33 sshpeer,
33 sshpeer,
34 statichttprepo,
34 statichttprepo,
35 ui as uimod,
35 ui as uimod,
36 unionrepo,
36 unionrepo,
37 vfs as vfsmod,
37 vfs as vfsmod,
38 wireprotoserver,
38 wireprotoserver,
39 wireprototypes,
39 wireprototypes,
40 wireprotov1peer,
40 wireprotov1peer,
41 wireprotov2server,
41 wireprotov2server,
42 )
42 )
43
43
44 rootdir = pycompat.fsencode(
44 rootdir = pycompat.fsencode(
45 os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
45 os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
46
46
47 def checkzobject(o, allowextra=False):
47 def checkzobject(o, allowextra=False):
48 """Verify an object with a zope interface."""
48 """Verify an object with a zope interface."""
49 ifaces = zi.providedBy(o)
49 ifaces = zi.providedBy(o)
50 if not ifaces:
50 if not ifaces:
51 print('%r does not provide any zope interfaces' % o)
51 print('%r does not provide any zope interfaces' % o)
52 return
52 return
53
53
54 # Run zope.interface's built-in verification routine. This verifies that
54 # Run zope.interface's built-in verification routine. This verifies that
55 # everything that is supposed to be present is present.
55 # everything that is supposed to be present is present.
56 for iface in ifaces:
56 for iface in ifaces:
57 ziverify.verifyObject(iface, o)
57 ziverify.verifyObject(iface, o)
58
58
59 if allowextra:
59 if allowextra:
60 return
60 return
61
61
62 # Now verify that the object provides no extra public attributes that
62 # Now verify that the object provides no extra public attributes that
63 # aren't declared as part of interfaces.
63 # aren't declared as part of interfaces.
64 allowed = set()
64 allowed = set()
65 for iface in ifaces:
65 for iface in ifaces:
66 allowed |= set(iface.names(all=True))
66 allowed |= set(iface.names(all=True))
67
67
68 public = {a for a in dir(o) if not a.startswith('_')}
68 public = {a for a in dir(o) if not a.startswith('_')}
69
69
70 for attr in sorted(public - allowed):
70 for attr in sorted(public - allowed):
71 print('public attribute not declared in interfaces: %s.%s' % (
71 print('public attribute not declared in interfaces: %s.%s' % (
72 o.__class__.__name__, attr))
72 o.__class__.__name__, attr))
73
73
74 # Facilitates testing localpeer.
74 # Facilitates testing localpeer.
75 class dummyrepo(object):
75 class dummyrepo(object):
76 def __init__(self):
76 def __init__(self):
77 self.ui = uimod.ui()
77 self.ui = uimod.ui()
78 def filtered(self, name):
78 def filtered(self, name):
79 pass
79 pass
80 def _restrictcapabilities(self, caps):
80 def _restrictcapabilities(self, caps):
81 pass
81 pass
82
82
83 class dummyopener(object):
83 class dummyopener(object):
84 handlers = []
84 handlers = []
85
85
86 # Facilitates testing sshpeer without requiring a server.
86 # Facilitates testing sshpeer without requiring a server.
87 class badpeer(httppeer.httppeer):
87 class badpeer(httppeer.httppeer):
88 def __init__(self):
88 def __init__(self):
89 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
89 super(badpeer, self).__init__(None, None, None, dummyopener(), None,
90 None)
90 None)
91 self.badattribute = True
91 self.badattribute = True
92
92
93 def badmethod(self):
93 def badmethod(self):
94 pass
94 pass
95
95
96 class dummypipe(object):
96 class dummypipe(object):
97 def close(self):
97 def close(self):
98 pass
98 pass
99
99
100 def main():
100 def main():
101 ui = uimod.ui()
101 ui = uimod.ui()
102 # Needed so we can open a local repo with obsstore without a warning.
102 # Needed so we can open a local repo with obsstore without a warning.
103 ui.setconfig('experimental', 'evolution.createmarkers', True)
103 ui.setconfig('experimental', 'evolution.createmarkers', True)
104
104
105 checkzobject(badpeer())
105 checkzobject(badpeer())
106
106
107 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
107 ziverify.verifyClass(repository.ipeerbase, httppeer.httppeer)
108 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
108 checkzobject(httppeer.httppeer(None, None, None, dummyopener(), None, None))
109
109
110 ziverify.verifyClass(repository.ipeerconnection,
110 ziverify.verifyClass(repository.ipeerconnection,
111 httppeer.httpv2peer)
111 httppeer.httpv2peer)
112 ziverify.verifyClass(repository.ipeercapabilities,
112 ziverify.verifyClass(repository.ipeercapabilities,
113 httppeer.httpv2peer)
113 httppeer.httpv2peer)
114 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
114 checkzobject(httppeer.httpv2peer(None, b'', b'', None, None, None))
115
115
116 ziverify.verifyClass(repository.ipeerbase,
116 ziverify.verifyClass(repository.ipeerbase,
117 localrepo.localpeer)
117 localrepo.localpeer)
118 checkzobject(localrepo.localpeer(dummyrepo()))
118 checkzobject(localrepo.localpeer(dummyrepo()))
119
119
120 ziverify.verifyClass(repository.ipeercommandexecutor,
120 ziverify.verifyClass(repository.ipeercommandexecutor,
121 localrepo.localcommandexecutor)
121 localrepo.localcommandexecutor)
122 checkzobject(localrepo.localcommandexecutor(None))
122 checkzobject(localrepo.localcommandexecutor(None))
123
123
124 ziverify.verifyClass(repository.ipeercommandexecutor,
124 ziverify.verifyClass(repository.ipeercommandexecutor,
125 wireprotov1peer.peerexecutor)
125 wireprotov1peer.peerexecutor)
126 checkzobject(wireprotov1peer.peerexecutor(None))
126 checkzobject(wireprotov1peer.peerexecutor(None))
127
127
128 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
128 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv1peer)
129 checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
129 checkzobject(sshpeer.sshv1peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
130 dummypipe(), None, None))
130 dummypipe(), None, None))
131
131
132 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
132 ziverify.verifyClass(repository.ipeerbase, sshpeer.sshv2peer)
133 checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
133 checkzobject(sshpeer.sshv2peer(ui, b'ssh://localhost/foo', b'', dummypipe(),
134 dummypipe(), None, None))
134 dummypipe(), None, None))
135
135
136 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
136 ziverify.verifyClass(repository.ipeerbase, bundlerepo.bundlepeer)
137 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
137 checkzobject(bundlerepo.bundlepeer(dummyrepo()))
138
138
139 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
139 ziverify.verifyClass(repository.ipeerbase, statichttprepo.statichttppeer)
140 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
140 checkzobject(statichttprepo.statichttppeer(dummyrepo()))
141
141
142 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
142 ziverify.verifyClass(repository.ipeerbase, unionrepo.unionpeer)
143 checkzobject(unionrepo.unionpeer(dummyrepo()))
143 checkzobject(unionrepo.unionpeer(dummyrepo()))
144
144
145 ziverify.verifyClass(repository.completelocalrepository,
145 ziverify.verifyClass(repository.completelocalrepository,
146 localrepo.localrepository)
146 localrepo.localrepository)
147 repo = localrepo.makelocalrepository(ui, rootdir)
147 repo = localrepo.makelocalrepository(ui, rootdir)
148 checkzobject(repo)
148 checkzobject(repo)
149
149
150 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
150 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
151 wireprotoserver.sshv1protocolhandler)
151 wireprotoserver.sshv1protocolhandler)
152 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
152 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
153 wireprotoserver.sshv2protocolhandler)
153 wireprotoserver.sshv2protocolhandler)
154 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
154 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
155 wireprotoserver.httpv1protocolhandler)
155 wireprotoserver.httpv1protocolhandler)
156 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
156 ziverify.verifyClass(wireprototypes.baseprotocolhandler,
157 wireprotov2server.httpv2protocolhandler)
157 wireprotov2server.httpv2protocolhandler)
158
158
159 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
159 sshv1 = wireprotoserver.sshv1protocolhandler(None, None, None)
160 checkzobject(sshv1)
160 checkzobject(sshv1)
161 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
161 sshv2 = wireprotoserver.sshv2protocolhandler(None, None, None)
162 checkzobject(sshv2)
162 checkzobject(sshv2)
163
163
164 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
164 httpv1 = wireprotoserver.httpv1protocolhandler(None, None, None)
165 checkzobject(httpv1)
165 checkzobject(httpv1)
166 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
166 httpv2 = wireprotov2server.httpv2protocolhandler(None, None)
167 checkzobject(httpv2)
167 checkzobject(httpv2)
168
168
169 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
169 ziverify.verifyClass(repository.ifilestorage, filelog.filelog)
170 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
170 ziverify.verifyClass(repository.imanifestdict, manifest.manifestdict)
171 ziverify.verifyClass(repository.imanifestrevisionstored,
171 ziverify.verifyClass(repository.imanifestrevisionstored,
172 manifest.manifestctx)
172 manifest.manifestctx)
173 ziverify.verifyClass(repository.imanifestrevisionwritable,
173 ziverify.verifyClass(repository.imanifestrevisionwritable,
174 manifest.memmanifestctx)
174 manifest.memmanifestctx)
175 ziverify.verifyClass(repository.imanifestrevisionstored,
175 ziverify.verifyClass(repository.imanifestrevisionstored,
176 manifest.treemanifestctx)
176 manifest.treemanifestctx)
177 ziverify.verifyClass(repository.imanifestrevisionwritable,
177 ziverify.verifyClass(repository.imanifestrevisionwritable,
178 manifest.memtreemanifestctx)
178 manifest.memtreemanifestctx)
179 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
179 ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
180 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
180 ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
181
181
182 vfs = vfsmod.vfs(b'.')
182 vfs = vfsmod.vfs(b'.')
183 fl = filelog.filelog(vfs, b'dummy.i')
183 fl = filelog.filelog(vfs, b'dummy.i')
184 checkzobject(fl, allowextra=True)
184 checkzobject(fl, allowextra=True)
185
185
186 # Conforms to imanifestlog.
186 # Conforms to imanifestlog.
187 ml = manifest.manifestlog(vfs, repo)
187 ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs))
188 checkzobject(ml)
188 checkzobject(ml)
189 checkzobject(repo.manifestlog)
189 checkzobject(repo.manifestlog)
190
190
191 # Conforms to imanifestrevision.
191 # Conforms to imanifestrevision.
192 mctx = ml[repo[0].manifestnode()]
192 mctx = ml[repo[0].manifestnode()]
193 checkzobject(mctx)
193 checkzobject(mctx)
194
194
195 # Conforms to imanifestrevisionwritable.
195 # Conforms to imanifestrevisionwritable.
196 checkzobject(mctx.new())
196 checkzobject(mctx.new())
197 checkzobject(mctx.copy())
197 checkzobject(mctx.copy())
198
198
199 # Conforms to imanifestdict.
199 # Conforms to imanifestdict.
200 checkzobject(mctx.read())
200 checkzobject(mctx.read())
201
201
202 mrl = manifest.manifestrevlog(vfs)
202 mrl = manifest.manifestrevlog(vfs)
203 checkzobject(mrl)
203 checkzobject(mrl)
204
204
205 ziverify.verifyClass(repository.irevisiondelta,
205 ziverify.verifyClass(repository.irevisiondelta,
206 revlog.revlogrevisiondelta)
206 revlog.revlogrevisiondelta)
207 ziverify.verifyClass(repository.irevisiondeltarequest,
207 ziverify.verifyClass(repository.irevisiondeltarequest,
208 changegroup.revisiondeltarequest)
208 changegroup.revisiondeltarequest)
209
209
210 rd = revlog.revlogrevisiondelta(
210 rd = revlog.revlogrevisiondelta(
211 node=b'',
211 node=b'',
212 p1node=b'',
212 p1node=b'',
213 p2node=b'',
213 p2node=b'',
214 basenode=b'',
214 basenode=b'',
215 linknode=b'',
215 linknode=b'',
216 flags=b'',
216 flags=b'',
217 baserevisionsize=None,
217 baserevisionsize=None,
218 revision=b'',
218 revision=b'',
219 delta=None)
219 delta=None)
220 checkzobject(rd)
220 checkzobject(rd)
221
221
222 rdr = changegroup.revisiondeltarequest(
222 rdr = changegroup.revisiondeltarequest(
223 node=b'',
223 node=b'',
224 linknode=b'',
224 linknode=b'',
225 p1node=b'',
225 p1node=b'',
226 p2node=b'',
226 p2node=b'',
227 basenode=b'')
227 basenode=b'')
228 checkzobject(rdr)
228 checkzobject(rdr)
229
229
230 main()
230 main()
General Comments 0
You need to be logged in to leave comments. Login now