##// END OF EJS Templates
py3: introduce and use pycompat.getargspec...
Augie Fackler -
r36196:64600233 default
parent child Browse files
Show More
@@ -1,1730 +1,1735 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import gc
23 import gc
24 import os
24 import os
25 import random
25 import random
26 import struct
26 import struct
27 import sys
27 import sys
28 import threading
28 import threading
29 import time
29 import time
30 from mercurial import (
30 from mercurial import (
31 changegroup,
31 changegroup,
32 cmdutil,
32 cmdutil,
33 commands,
33 commands,
34 copies,
34 copies,
35 error,
35 error,
36 extensions,
36 extensions,
37 mdiff,
37 mdiff,
38 merge,
38 merge,
39 revlog,
39 revlog,
40 util,
40 util,
41 )
41 )
42
42
43 # for "historical portability":
43 # for "historical portability":
44 # try to import modules separately (in dict order), and ignore
44 # try to import modules separately (in dict order), and ignore
45 # failure, because these aren't available with early Mercurial
45 # failure, because these aren't available with early Mercurial
46 try:
46 try:
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 except ImportError:
48 except ImportError:
49 pass
49 pass
50 try:
50 try:
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 except ImportError:
52 except ImportError:
53 pass
53 pass
54 try:
54 try:
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 dir(registrar) # forcibly load it
56 dir(registrar) # forcibly load it
57 except ImportError:
57 except ImportError:
58 registrar = None
58 registrar = None
59 try:
59 try:
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 except ImportError:
61 except ImportError:
62 pass
62 pass
63 try:
63 try:
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 except ImportError:
65 except ImportError:
66 pass
66 pass
67 try:
68 from mercurial import pycompat
69 getargspec = pycompat.getargspec # added to module after 4.5
70 except (ImportError, AttributeError):
71 import inspect
72 getargspec = inspect.getargspec
67
73
68 # for "historical portability":
74 # for "historical portability":
69 # define util.safehasattr forcibly, because util.safehasattr has been
75 # define util.safehasattr forcibly, because util.safehasattr has been
70 # available since 1.9.3 (or 94b200a11cf7)
76 # available since 1.9.3 (or 94b200a11cf7)
71 _undefined = object()
77 _undefined = object()
72 def safehasattr(thing, attr):
78 def safehasattr(thing, attr):
73 return getattr(thing, attr, _undefined) is not _undefined
79 return getattr(thing, attr, _undefined) is not _undefined
74 setattr(util, 'safehasattr', safehasattr)
80 setattr(util, 'safehasattr', safehasattr)
75
81
76 # for "historical portability":
82 # for "historical portability":
77 # define util.timer forcibly, because util.timer has been available
83 # define util.timer forcibly, because util.timer has been available
78 # since ae5d60bb70c9
84 # since ae5d60bb70c9
79 if safehasattr(time, 'perf_counter'):
85 if safehasattr(time, 'perf_counter'):
80 util.timer = time.perf_counter
86 util.timer = time.perf_counter
81 elif os.name == 'nt':
87 elif os.name == 'nt':
82 util.timer = time.clock
88 util.timer = time.clock
83 else:
89 else:
84 util.timer = time.time
90 util.timer = time.time
85
91
86 # for "historical portability":
92 # for "historical portability":
87 # use locally defined empty option list, if formatteropts isn't
93 # use locally defined empty option list, if formatteropts isn't
88 # available, because commands.formatteropts has been available since
94 # available, because commands.formatteropts has been available since
89 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
95 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
90 # available since 2.2 (or ae5f92e154d3)
96 # available since 2.2 (or ae5f92e154d3)
91 formatteropts = getattr(cmdutil, "formatteropts",
97 formatteropts = getattr(cmdutil, "formatteropts",
92 getattr(commands, "formatteropts", []))
98 getattr(commands, "formatteropts", []))
93
99
94 # for "historical portability":
100 # for "historical portability":
95 # use locally defined option list, if debugrevlogopts isn't available,
101 # use locally defined option list, if debugrevlogopts isn't available,
96 # because commands.debugrevlogopts has been available since 3.7 (or
102 # because commands.debugrevlogopts has been available since 3.7 (or
97 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
103 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
98 # since 1.9 (or a79fea6b3e77).
104 # since 1.9 (or a79fea6b3e77).
99 revlogopts = getattr(cmdutil, "debugrevlogopts",
105 revlogopts = getattr(cmdutil, "debugrevlogopts",
100 getattr(commands, "debugrevlogopts", [
106 getattr(commands, "debugrevlogopts", [
101 ('c', 'changelog', False, ('open changelog')),
107 ('c', 'changelog', False, ('open changelog')),
102 ('m', 'manifest', False, ('open manifest')),
108 ('m', 'manifest', False, ('open manifest')),
103 ('', 'dir', False, ('open directory manifest')),
109 ('', 'dir', False, ('open directory manifest')),
104 ]))
110 ]))
105
111
106 cmdtable = {}
112 cmdtable = {}
107
113
108 # for "historical portability":
114 # for "historical portability":
109 # define parsealiases locally, because cmdutil.parsealiases has been
115 # define parsealiases locally, because cmdutil.parsealiases has been
110 # available since 1.5 (or 6252852b4332)
116 # available since 1.5 (or 6252852b4332)
111 def parsealiases(cmd):
117 def parsealiases(cmd):
112 return cmd.lstrip("^").split("|")
118 return cmd.lstrip("^").split("|")
113
119
114 if safehasattr(registrar, 'command'):
120 if safehasattr(registrar, 'command'):
115 command = registrar.command(cmdtable)
121 command = registrar.command(cmdtable)
116 elif safehasattr(cmdutil, 'command'):
122 elif safehasattr(cmdutil, 'command'):
117 import inspect
118 command = cmdutil.command(cmdtable)
123 command = cmdutil.command(cmdtable)
119 if 'norepo' not in inspect.getargspec(command)[0]:
124 if 'norepo' not in getargspec(command).args:
120 # for "historical portability":
125 # for "historical portability":
121 # wrap original cmdutil.command, because "norepo" option has
126 # wrap original cmdutil.command, because "norepo" option has
122 # been available since 3.1 (or 75a96326cecb)
127 # been available since 3.1 (or 75a96326cecb)
123 _command = command
128 _command = command
124 def command(name, options=(), synopsis=None, norepo=False):
129 def command(name, options=(), synopsis=None, norepo=False):
125 if norepo:
130 if norepo:
126 commands.norepo += ' %s' % ' '.join(parsealiases(name))
131 commands.norepo += ' %s' % ' '.join(parsealiases(name))
127 return _command(name, list(options), synopsis)
132 return _command(name, list(options), synopsis)
128 else:
133 else:
129 # for "historical portability":
134 # for "historical portability":
130 # define "@command" annotation locally, because cmdutil.command
135 # define "@command" annotation locally, because cmdutil.command
131 # has been available since 1.9 (or 2daa5179e73f)
136 # has been available since 1.9 (or 2daa5179e73f)
132 def command(name, options=(), synopsis=None, norepo=False):
137 def command(name, options=(), synopsis=None, norepo=False):
133 def decorator(func):
138 def decorator(func):
134 if synopsis:
139 if synopsis:
135 cmdtable[name] = func, list(options), synopsis
140 cmdtable[name] = func, list(options), synopsis
136 else:
141 else:
137 cmdtable[name] = func, list(options)
142 cmdtable[name] = func, list(options)
138 if norepo:
143 if norepo:
139 commands.norepo += ' %s' % ' '.join(parsealiases(name))
144 commands.norepo += ' %s' % ' '.join(parsealiases(name))
140 return func
145 return func
141 return decorator
146 return decorator
142
147
143 try:
148 try:
144 import mercurial.registrar
149 import mercurial.registrar
145 import mercurial.configitems
150 import mercurial.configitems
146 configtable = {}
151 configtable = {}
147 configitem = mercurial.registrar.configitem(configtable)
152 configitem = mercurial.registrar.configitem(configtable)
148 configitem('perf', 'presleep',
153 configitem('perf', 'presleep',
149 default=mercurial.configitems.dynamicdefault,
154 default=mercurial.configitems.dynamicdefault,
150 )
155 )
151 configitem('perf', 'stub',
156 configitem('perf', 'stub',
152 default=mercurial.configitems.dynamicdefault,
157 default=mercurial.configitems.dynamicdefault,
153 )
158 )
154 configitem('perf', 'parentscount',
159 configitem('perf', 'parentscount',
155 default=mercurial.configitems.dynamicdefault,
160 default=mercurial.configitems.dynamicdefault,
156 )
161 )
157 except (ImportError, AttributeError):
162 except (ImportError, AttributeError):
158 pass
163 pass
159
164
160 def getlen(ui):
165 def getlen(ui):
161 if ui.configbool("perf", "stub", False):
166 if ui.configbool("perf", "stub", False):
162 return lambda x: 1
167 return lambda x: 1
163 return len
168 return len
164
169
165 def gettimer(ui, opts=None):
170 def gettimer(ui, opts=None):
166 """return a timer function and formatter: (timer, formatter)
171 """return a timer function and formatter: (timer, formatter)
167
172
168 This function exists to gather the creation of formatter in a single
173 This function exists to gather the creation of formatter in a single
169 place instead of duplicating it in all performance commands."""
174 place instead of duplicating it in all performance commands."""
170
175
171 # enforce an idle period before execution to counteract power management
176 # enforce an idle period before execution to counteract power management
172 # experimental config: perf.presleep
177 # experimental config: perf.presleep
173 time.sleep(getint(ui, "perf", "presleep", 1))
178 time.sleep(getint(ui, "perf", "presleep", 1))
174
179
175 if opts is None:
180 if opts is None:
176 opts = {}
181 opts = {}
177 # redirect all to stderr unless buffer api is in use
182 # redirect all to stderr unless buffer api is in use
178 if not ui._buffers:
183 if not ui._buffers:
179 ui = ui.copy()
184 ui = ui.copy()
180 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
185 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
181 if uifout:
186 if uifout:
182 # for "historical portability":
187 # for "historical portability":
183 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
188 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
184 uifout.set(ui.ferr)
189 uifout.set(ui.ferr)
185
190
186 # get a formatter
191 # get a formatter
187 uiformatter = getattr(ui, 'formatter', None)
192 uiformatter = getattr(ui, 'formatter', None)
188 if uiformatter:
193 if uiformatter:
189 fm = uiformatter('perf', opts)
194 fm = uiformatter('perf', opts)
190 else:
195 else:
191 # for "historical portability":
196 # for "historical portability":
192 # define formatter locally, because ui.formatter has been
197 # define formatter locally, because ui.formatter has been
193 # available since 2.2 (or ae5f92e154d3)
198 # available since 2.2 (or ae5f92e154d3)
194 from mercurial import node
199 from mercurial import node
195 class defaultformatter(object):
200 class defaultformatter(object):
196 """Minimized composition of baseformatter and plainformatter
201 """Minimized composition of baseformatter and plainformatter
197 """
202 """
198 def __init__(self, ui, topic, opts):
203 def __init__(self, ui, topic, opts):
199 self._ui = ui
204 self._ui = ui
200 if ui.debugflag:
205 if ui.debugflag:
201 self.hexfunc = node.hex
206 self.hexfunc = node.hex
202 else:
207 else:
203 self.hexfunc = node.short
208 self.hexfunc = node.short
204 def __nonzero__(self):
209 def __nonzero__(self):
205 return False
210 return False
206 __bool__ = __nonzero__
211 __bool__ = __nonzero__
207 def startitem(self):
212 def startitem(self):
208 pass
213 pass
209 def data(self, **data):
214 def data(self, **data):
210 pass
215 pass
211 def write(self, fields, deftext, *fielddata, **opts):
216 def write(self, fields, deftext, *fielddata, **opts):
212 self._ui.write(deftext % fielddata, **opts)
217 self._ui.write(deftext % fielddata, **opts)
213 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
218 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
214 if cond:
219 if cond:
215 self._ui.write(deftext % fielddata, **opts)
220 self._ui.write(deftext % fielddata, **opts)
216 def plain(self, text, **opts):
221 def plain(self, text, **opts):
217 self._ui.write(text, **opts)
222 self._ui.write(text, **opts)
218 def end(self):
223 def end(self):
219 pass
224 pass
220 fm = defaultformatter(ui, 'perf', opts)
225 fm = defaultformatter(ui, 'perf', opts)
221
226
222 # stub function, runs code only once instead of in a loop
227 # stub function, runs code only once instead of in a loop
223 # experimental config: perf.stub
228 # experimental config: perf.stub
224 if ui.configbool("perf", "stub", False):
229 if ui.configbool("perf", "stub", False):
225 return functools.partial(stub_timer, fm), fm
230 return functools.partial(stub_timer, fm), fm
226 return functools.partial(_timer, fm), fm
231 return functools.partial(_timer, fm), fm
227
232
228 def stub_timer(fm, func, title=None):
233 def stub_timer(fm, func, title=None):
229 func()
234 func()
230
235
231 def _timer(fm, func, title=None):
236 def _timer(fm, func, title=None):
232 gc.collect()
237 gc.collect()
233 results = []
238 results = []
234 begin = util.timer()
239 begin = util.timer()
235 count = 0
240 count = 0
236 while True:
241 while True:
237 ostart = os.times()
242 ostart = os.times()
238 cstart = util.timer()
243 cstart = util.timer()
239 r = func()
244 r = func()
240 cstop = util.timer()
245 cstop = util.timer()
241 ostop = os.times()
246 ostop = os.times()
242 count += 1
247 count += 1
243 a, b = ostart, ostop
248 a, b = ostart, ostop
244 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
249 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
245 if cstop - begin > 3 and count >= 100:
250 if cstop - begin > 3 and count >= 100:
246 break
251 break
247 if cstop - begin > 10 and count >= 3:
252 if cstop - begin > 10 and count >= 3:
248 break
253 break
249
254
250 fm.startitem()
255 fm.startitem()
251
256
252 if title:
257 if title:
253 fm.write('title', '! %s\n', title)
258 fm.write('title', '! %s\n', title)
254 if r:
259 if r:
255 fm.write('result', '! result: %s\n', r)
260 fm.write('result', '! result: %s\n', r)
256 m = min(results)
261 m = min(results)
257 fm.plain('!')
262 fm.plain('!')
258 fm.write('wall', ' wall %f', m[0])
263 fm.write('wall', ' wall %f', m[0])
259 fm.write('comb', ' comb %f', m[1] + m[2])
264 fm.write('comb', ' comb %f', m[1] + m[2])
260 fm.write('user', ' user %f', m[1])
265 fm.write('user', ' user %f', m[1])
261 fm.write('sys', ' sys %f', m[2])
266 fm.write('sys', ' sys %f', m[2])
262 fm.write('count', ' (best of %d)', count)
267 fm.write('count', ' (best of %d)', count)
263 fm.plain('\n')
268 fm.plain('\n')
264
269
265 # utilities for historical portability
270 # utilities for historical portability
266
271
267 def getint(ui, section, name, default):
272 def getint(ui, section, name, default):
268 # for "historical portability":
273 # for "historical portability":
269 # ui.configint has been available since 1.9 (or fa2b596db182)
274 # ui.configint has been available since 1.9 (or fa2b596db182)
270 v = ui.config(section, name, None)
275 v = ui.config(section, name, None)
271 if v is None:
276 if v is None:
272 return default
277 return default
273 try:
278 try:
274 return int(v)
279 return int(v)
275 except ValueError:
280 except ValueError:
276 raise error.ConfigError(("%s.%s is not an integer ('%s')")
281 raise error.ConfigError(("%s.%s is not an integer ('%s')")
277 % (section, name, v))
282 % (section, name, v))
278
283
279 def safeattrsetter(obj, name, ignoremissing=False):
284 def safeattrsetter(obj, name, ignoremissing=False):
280 """Ensure that 'obj' has 'name' attribute before subsequent setattr
285 """Ensure that 'obj' has 'name' attribute before subsequent setattr
281
286
282 This function is aborted, if 'obj' doesn't have 'name' attribute
287 This function is aborted, if 'obj' doesn't have 'name' attribute
283 at runtime. This avoids overlooking removal of an attribute, which
288 at runtime. This avoids overlooking removal of an attribute, which
284 breaks assumption of performance measurement, in the future.
289 breaks assumption of performance measurement, in the future.
285
290
286 This function returns the object to (1) assign a new value, and
291 This function returns the object to (1) assign a new value, and
287 (2) restore an original value to the attribute.
292 (2) restore an original value to the attribute.
288
293
289 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
294 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
290 abortion, and this function returns None. This is useful to
295 abortion, and this function returns None. This is useful to
291 examine an attribute, which isn't ensured in all Mercurial
296 examine an attribute, which isn't ensured in all Mercurial
292 versions.
297 versions.
293 """
298 """
294 if not util.safehasattr(obj, name):
299 if not util.safehasattr(obj, name):
295 if ignoremissing:
300 if ignoremissing:
296 return None
301 return None
297 raise error.Abort(("missing attribute %s of %s might break assumption"
302 raise error.Abort(("missing attribute %s of %s might break assumption"
298 " of performance measurement") % (name, obj))
303 " of performance measurement") % (name, obj))
299
304
300 origvalue = getattr(obj, name)
305 origvalue = getattr(obj, name)
301 class attrutil(object):
306 class attrutil(object):
302 def set(self, newvalue):
307 def set(self, newvalue):
303 setattr(obj, name, newvalue)
308 setattr(obj, name, newvalue)
304 def restore(self):
309 def restore(self):
305 setattr(obj, name, origvalue)
310 setattr(obj, name, origvalue)
306
311
307 return attrutil()
312 return attrutil()
308
313
309 # utilities to examine each internal API changes
314 # utilities to examine each internal API changes
310
315
311 def getbranchmapsubsettable():
316 def getbranchmapsubsettable():
312 # for "historical portability":
317 # for "historical portability":
313 # subsettable is defined in:
318 # subsettable is defined in:
314 # - branchmap since 2.9 (or 175c6fd8cacc)
319 # - branchmap since 2.9 (or 175c6fd8cacc)
315 # - repoview since 2.5 (or 59a9f18d4587)
320 # - repoview since 2.5 (or 59a9f18d4587)
316 for mod in (branchmap, repoview):
321 for mod in (branchmap, repoview):
317 subsettable = getattr(mod, 'subsettable', None)
322 subsettable = getattr(mod, 'subsettable', None)
318 if subsettable:
323 if subsettable:
319 return subsettable
324 return subsettable
320
325
321 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
326 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
322 # branchmap and repoview modules exist, but subsettable attribute
327 # branchmap and repoview modules exist, but subsettable attribute
323 # doesn't)
328 # doesn't)
324 raise error.Abort(("perfbranchmap not available with this Mercurial"),
329 raise error.Abort(("perfbranchmap not available with this Mercurial"),
325 hint="use 2.5 or later")
330 hint="use 2.5 or later")
326
331
327 def getsvfs(repo):
332 def getsvfs(repo):
328 """Return appropriate object to access files under .hg/store
333 """Return appropriate object to access files under .hg/store
329 """
334 """
330 # for "historical portability":
335 # for "historical portability":
331 # repo.svfs has been available since 2.3 (or 7034365089bf)
336 # repo.svfs has been available since 2.3 (or 7034365089bf)
332 svfs = getattr(repo, 'svfs', None)
337 svfs = getattr(repo, 'svfs', None)
333 if svfs:
338 if svfs:
334 return svfs
339 return svfs
335 else:
340 else:
336 return getattr(repo, 'sopener')
341 return getattr(repo, 'sopener')
337
342
338 def getvfs(repo):
343 def getvfs(repo):
339 """Return appropriate object to access files under .hg
344 """Return appropriate object to access files under .hg
340 """
345 """
341 # for "historical portability":
346 # for "historical portability":
342 # repo.vfs has been available since 2.3 (or 7034365089bf)
347 # repo.vfs has been available since 2.3 (or 7034365089bf)
343 vfs = getattr(repo, 'vfs', None)
348 vfs = getattr(repo, 'vfs', None)
344 if vfs:
349 if vfs:
345 return vfs
350 return vfs
346 else:
351 else:
347 return getattr(repo, 'opener')
352 return getattr(repo, 'opener')
348
353
349 def repocleartagscachefunc(repo):
354 def repocleartagscachefunc(repo):
350 """Return the function to clear tags cache according to repo internal API
355 """Return the function to clear tags cache according to repo internal API
351 """
356 """
352 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
357 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
353 # in this case, setattr(repo, '_tagscache', None) or so isn't
358 # in this case, setattr(repo, '_tagscache', None) or so isn't
354 # correct way to clear tags cache, because existing code paths
359 # correct way to clear tags cache, because existing code paths
355 # expect _tagscache to be a structured object.
360 # expect _tagscache to be a structured object.
356 def clearcache():
361 def clearcache():
357 # _tagscache has been filteredpropertycache since 2.5 (or
362 # _tagscache has been filteredpropertycache since 2.5 (or
358 # 98c867ac1330), and delattr() can't work in such case
363 # 98c867ac1330), and delattr() can't work in such case
359 if '_tagscache' in vars(repo):
364 if '_tagscache' in vars(repo):
360 del repo.__dict__['_tagscache']
365 del repo.__dict__['_tagscache']
361 return clearcache
366 return clearcache
362
367
363 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
368 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
364 if repotags: # since 1.4 (or 5614a628d173)
369 if repotags: # since 1.4 (or 5614a628d173)
365 return lambda : repotags.set(None)
370 return lambda : repotags.set(None)
366
371
367 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
372 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
368 if repotagscache: # since 0.6 (or d7df759d0e97)
373 if repotagscache: # since 0.6 (or d7df759d0e97)
369 return lambda : repotagscache.set(None)
374 return lambda : repotagscache.set(None)
370
375
371 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
376 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
372 # this point, but it isn't so problematic, because:
377 # this point, but it isn't so problematic, because:
373 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
378 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
374 # in perftags() causes failure soon
379 # in perftags() causes failure soon
375 # - perf.py itself has been available since 1.1 (or eb240755386d)
380 # - perf.py itself has been available since 1.1 (or eb240755386d)
376 raise error.Abort(("tags API of this hg command is unknown"))
381 raise error.Abort(("tags API of this hg command is unknown"))
377
382
378 # utilities to clear cache
383 # utilities to clear cache
379
384
380 def clearfilecache(repo, attrname):
385 def clearfilecache(repo, attrname):
381 unfi = repo.unfiltered()
386 unfi = repo.unfiltered()
382 if attrname in vars(unfi):
387 if attrname in vars(unfi):
383 delattr(unfi, attrname)
388 delattr(unfi, attrname)
384 unfi._filecache.pop(attrname, None)
389 unfi._filecache.pop(attrname, None)
385
390
386 # perf commands
391 # perf commands
387
392
388 @command('perfwalk', formatteropts)
393 @command('perfwalk', formatteropts)
389 def perfwalk(ui, repo, *pats, **opts):
394 def perfwalk(ui, repo, *pats, **opts):
390 timer, fm = gettimer(ui, opts)
395 timer, fm = gettimer(ui, opts)
391 m = scmutil.match(repo[None], pats, {})
396 m = scmutil.match(repo[None], pats, {})
392 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
397 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
393 ignored=False))))
398 ignored=False))))
394 fm.end()
399 fm.end()
395
400
396 @command('perfannotate', formatteropts)
401 @command('perfannotate', formatteropts)
397 def perfannotate(ui, repo, f, **opts):
402 def perfannotate(ui, repo, f, **opts):
398 timer, fm = gettimer(ui, opts)
403 timer, fm = gettimer(ui, opts)
399 fc = repo['.'][f]
404 fc = repo['.'][f]
400 timer(lambda: len(fc.annotate(True)))
405 timer(lambda: len(fc.annotate(True)))
401 fm.end()
406 fm.end()
402
407
403 @command('perfstatus',
408 @command('perfstatus',
404 [('u', 'unknown', False,
409 [('u', 'unknown', False,
405 'ask status to look for unknown files')] + formatteropts)
410 'ask status to look for unknown files')] + formatteropts)
406 def perfstatus(ui, repo, **opts):
411 def perfstatus(ui, repo, **opts):
407 #m = match.always(repo.root, repo.getcwd())
412 #m = match.always(repo.root, repo.getcwd())
408 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
413 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
409 # False))))
414 # False))))
410 timer, fm = gettimer(ui, opts)
415 timer, fm = gettimer(ui, opts)
411 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
416 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
412 fm.end()
417 fm.end()
413
418
414 @command('perfaddremove', formatteropts)
419 @command('perfaddremove', formatteropts)
415 def perfaddremove(ui, repo, **opts):
420 def perfaddremove(ui, repo, **opts):
416 timer, fm = gettimer(ui, opts)
421 timer, fm = gettimer(ui, opts)
417 try:
422 try:
418 oldquiet = repo.ui.quiet
423 oldquiet = repo.ui.quiet
419 repo.ui.quiet = True
424 repo.ui.quiet = True
420 matcher = scmutil.match(repo[None])
425 matcher = scmutil.match(repo[None])
421 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
426 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
422 finally:
427 finally:
423 repo.ui.quiet = oldquiet
428 repo.ui.quiet = oldquiet
424 fm.end()
429 fm.end()
425
430
426 def clearcaches(cl):
431 def clearcaches(cl):
427 # behave somewhat consistently across internal API changes
432 # behave somewhat consistently across internal API changes
428 if util.safehasattr(cl, 'clearcaches'):
433 if util.safehasattr(cl, 'clearcaches'):
429 cl.clearcaches()
434 cl.clearcaches()
430 elif util.safehasattr(cl, '_nodecache'):
435 elif util.safehasattr(cl, '_nodecache'):
431 from mercurial.node import nullid, nullrev
436 from mercurial.node import nullid, nullrev
432 cl._nodecache = {nullid: nullrev}
437 cl._nodecache = {nullid: nullrev}
433 cl._nodepos = None
438 cl._nodepos = None
434
439
435 @command('perfheads', formatteropts)
440 @command('perfheads', formatteropts)
436 def perfheads(ui, repo, **opts):
441 def perfheads(ui, repo, **opts):
437 timer, fm = gettimer(ui, opts)
442 timer, fm = gettimer(ui, opts)
438 cl = repo.changelog
443 cl = repo.changelog
439 def d():
444 def d():
440 len(cl.headrevs())
445 len(cl.headrevs())
441 clearcaches(cl)
446 clearcaches(cl)
442 timer(d)
447 timer(d)
443 fm.end()
448 fm.end()
444
449
445 @command('perftags', formatteropts)
450 @command('perftags', formatteropts)
446 def perftags(ui, repo, **opts):
451 def perftags(ui, repo, **opts):
447 import mercurial.changelog
452 import mercurial.changelog
448 import mercurial.manifest
453 import mercurial.manifest
449 timer, fm = gettimer(ui, opts)
454 timer, fm = gettimer(ui, opts)
450 svfs = getsvfs(repo)
455 svfs = getsvfs(repo)
451 repocleartagscache = repocleartagscachefunc(repo)
456 repocleartagscache = repocleartagscachefunc(repo)
452 def t():
457 def t():
453 repo.changelog = mercurial.changelog.changelog(svfs)
458 repo.changelog = mercurial.changelog.changelog(svfs)
454 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
459 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
455 repocleartagscache()
460 repocleartagscache()
456 return len(repo.tags())
461 return len(repo.tags())
457 timer(t)
462 timer(t)
458 fm.end()
463 fm.end()
459
464
460 @command('perfancestors', formatteropts)
465 @command('perfancestors', formatteropts)
461 def perfancestors(ui, repo, **opts):
466 def perfancestors(ui, repo, **opts):
462 timer, fm = gettimer(ui, opts)
467 timer, fm = gettimer(ui, opts)
463 heads = repo.changelog.headrevs()
468 heads = repo.changelog.headrevs()
464 def d():
469 def d():
465 for a in repo.changelog.ancestors(heads):
470 for a in repo.changelog.ancestors(heads):
466 pass
471 pass
467 timer(d)
472 timer(d)
468 fm.end()
473 fm.end()
469
474
470 @command('perfancestorset', formatteropts)
475 @command('perfancestorset', formatteropts)
471 def perfancestorset(ui, repo, revset, **opts):
476 def perfancestorset(ui, repo, revset, **opts):
472 timer, fm = gettimer(ui, opts)
477 timer, fm = gettimer(ui, opts)
473 revs = repo.revs(revset)
478 revs = repo.revs(revset)
474 heads = repo.changelog.headrevs()
479 heads = repo.changelog.headrevs()
475 def d():
480 def d():
476 s = repo.changelog.ancestors(heads)
481 s = repo.changelog.ancestors(heads)
477 for rev in revs:
482 for rev in revs:
478 rev in s
483 rev in s
479 timer(d)
484 timer(d)
480 fm.end()
485 fm.end()
481
486
482 @command('perfbookmarks', formatteropts)
487 @command('perfbookmarks', formatteropts)
483 def perfbookmarks(ui, repo, **opts):
488 def perfbookmarks(ui, repo, **opts):
484 """benchmark parsing bookmarks from disk to memory"""
489 """benchmark parsing bookmarks from disk to memory"""
485 timer, fm = gettimer(ui, opts)
490 timer, fm = gettimer(ui, opts)
486 def d():
491 def d():
487 clearfilecache(repo, '_bookmarks')
492 clearfilecache(repo, '_bookmarks')
488 repo._bookmarks
493 repo._bookmarks
489 timer(d)
494 timer(d)
490 fm.end()
495 fm.end()
491
496
492 @command('perfbundleread', formatteropts, 'BUNDLE')
497 @command('perfbundleread', formatteropts, 'BUNDLE')
493 def perfbundleread(ui, repo, bundlepath, **opts):
498 def perfbundleread(ui, repo, bundlepath, **opts):
494 """Benchmark reading of bundle files.
499 """Benchmark reading of bundle files.
495
500
496 This command is meant to isolate the I/O part of bundle reading as
501 This command is meant to isolate the I/O part of bundle reading as
497 much as possible.
502 much as possible.
498 """
503 """
499 from mercurial import (
504 from mercurial import (
500 bundle2,
505 bundle2,
501 exchange,
506 exchange,
502 streamclone,
507 streamclone,
503 )
508 )
504
509
505 def makebench(fn):
510 def makebench(fn):
506 def run():
511 def run():
507 with open(bundlepath, 'rb') as fh:
512 with open(bundlepath, 'rb') as fh:
508 bundle = exchange.readbundle(ui, fh, bundlepath)
513 bundle = exchange.readbundle(ui, fh, bundlepath)
509 fn(bundle)
514 fn(bundle)
510
515
511 return run
516 return run
512
517
513 def makereadnbytes(size):
518 def makereadnbytes(size):
514 def run():
519 def run():
515 with open(bundlepath, 'rb') as fh:
520 with open(bundlepath, 'rb') as fh:
516 bundle = exchange.readbundle(ui, fh, bundlepath)
521 bundle = exchange.readbundle(ui, fh, bundlepath)
517 while bundle.read(size):
522 while bundle.read(size):
518 pass
523 pass
519
524
520 return run
525 return run
521
526
522 def makestdioread(size):
527 def makestdioread(size):
523 def run():
528 def run():
524 with open(bundlepath, 'rb') as fh:
529 with open(bundlepath, 'rb') as fh:
525 while fh.read(size):
530 while fh.read(size):
526 pass
531 pass
527
532
528 return run
533 return run
529
534
530 # bundle1
535 # bundle1
531
536
532 def deltaiter(bundle):
537 def deltaiter(bundle):
533 for delta in bundle.deltaiter():
538 for delta in bundle.deltaiter():
534 pass
539 pass
535
540
536 def iterchunks(bundle):
541 def iterchunks(bundle):
537 for chunk in bundle.getchunks():
542 for chunk in bundle.getchunks():
538 pass
543 pass
539
544
540 # bundle2
545 # bundle2
541
546
542 def forwardchunks(bundle):
547 def forwardchunks(bundle):
543 for chunk in bundle._forwardchunks():
548 for chunk in bundle._forwardchunks():
544 pass
549 pass
545
550
546 def iterparts(bundle):
551 def iterparts(bundle):
547 for part in bundle.iterparts():
552 for part in bundle.iterparts():
548 pass
553 pass
549
554
550 def iterpartsseekable(bundle):
555 def iterpartsseekable(bundle):
551 for part in bundle.iterparts(seekable=True):
556 for part in bundle.iterparts(seekable=True):
552 pass
557 pass
553
558
554 def seek(bundle):
559 def seek(bundle):
555 for part in bundle.iterparts(seekable=True):
560 for part in bundle.iterparts(seekable=True):
556 part.seek(0, os.SEEK_END)
561 part.seek(0, os.SEEK_END)
557
562
558 def makepartreadnbytes(size):
563 def makepartreadnbytes(size):
559 def run():
564 def run():
560 with open(bundlepath, 'rb') as fh:
565 with open(bundlepath, 'rb') as fh:
561 bundle = exchange.readbundle(ui, fh, bundlepath)
566 bundle = exchange.readbundle(ui, fh, bundlepath)
562 for part in bundle.iterparts():
567 for part in bundle.iterparts():
563 while part.read(size):
568 while part.read(size):
564 pass
569 pass
565
570
566 return run
571 return run
567
572
568 benches = [
573 benches = [
569 (makestdioread(8192), 'read(8k)'),
574 (makestdioread(8192), 'read(8k)'),
570 (makestdioread(16384), 'read(16k)'),
575 (makestdioread(16384), 'read(16k)'),
571 (makestdioread(32768), 'read(32k)'),
576 (makestdioread(32768), 'read(32k)'),
572 (makestdioread(131072), 'read(128k)'),
577 (makestdioread(131072), 'read(128k)'),
573 ]
578 ]
574
579
575 with open(bundlepath, 'rb') as fh:
580 with open(bundlepath, 'rb') as fh:
576 bundle = exchange.readbundle(ui, fh, bundlepath)
581 bundle = exchange.readbundle(ui, fh, bundlepath)
577
582
578 if isinstance(bundle, changegroup.cg1unpacker):
583 if isinstance(bundle, changegroup.cg1unpacker):
579 benches.extend([
584 benches.extend([
580 (makebench(deltaiter), 'cg1 deltaiter()'),
585 (makebench(deltaiter), 'cg1 deltaiter()'),
581 (makebench(iterchunks), 'cg1 getchunks()'),
586 (makebench(iterchunks), 'cg1 getchunks()'),
582 (makereadnbytes(8192), 'cg1 read(8k)'),
587 (makereadnbytes(8192), 'cg1 read(8k)'),
583 (makereadnbytes(16384), 'cg1 read(16k)'),
588 (makereadnbytes(16384), 'cg1 read(16k)'),
584 (makereadnbytes(32768), 'cg1 read(32k)'),
589 (makereadnbytes(32768), 'cg1 read(32k)'),
585 (makereadnbytes(131072), 'cg1 read(128k)'),
590 (makereadnbytes(131072), 'cg1 read(128k)'),
586 ])
591 ])
587 elif isinstance(bundle, bundle2.unbundle20):
592 elif isinstance(bundle, bundle2.unbundle20):
588 benches.extend([
593 benches.extend([
589 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
594 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
590 (makebench(iterparts), 'bundle2 iterparts()'),
595 (makebench(iterparts), 'bundle2 iterparts()'),
591 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
596 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
592 (makebench(seek), 'bundle2 part seek()'),
597 (makebench(seek), 'bundle2 part seek()'),
593 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
598 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
594 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
599 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
595 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
600 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
596 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
601 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
597 ])
602 ])
598 elif isinstance(bundle, streamclone.streamcloneapplier):
603 elif isinstance(bundle, streamclone.streamcloneapplier):
599 raise error.Abort('stream clone bundles not supported')
604 raise error.Abort('stream clone bundles not supported')
600 else:
605 else:
601 raise error.Abort('unhandled bundle type: %s' % type(bundle))
606 raise error.Abort('unhandled bundle type: %s' % type(bundle))
602
607
603 for fn, title in benches:
608 for fn, title in benches:
604 timer, fm = gettimer(ui, opts)
609 timer, fm = gettimer(ui, opts)
605 timer(fn, title=title)
610 timer(fn, title=title)
606 fm.end()
611 fm.end()
607
612
608 @command('perfchangegroupchangelog', formatteropts +
613 @command('perfchangegroupchangelog', formatteropts +
609 [('', 'version', '02', 'changegroup version'),
614 [('', 'version', '02', 'changegroup version'),
610 ('r', 'rev', '', 'revisions to add to changegroup')])
615 ('r', 'rev', '', 'revisions to add to changegroup')])
611 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
616 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
612 """Benchmark producing a changelog group for a changegroup.
617 """Benchmark producing a changelog group for a changegroup.
613
618
614 This measures the time spent processing the changelog during a
619 This measures the time spent processing the changelog during a
615 bundle operation. This occurs during `hg bundle` and on a server
620 bundle operation. This occurs during `hg bundle` and on a server
616 processing a `getbundle` wire protocol request (handles clones
621 processing a `getbundle` wire protocol request (handles clones
617 and pull requests).
622 and pull requests).
618
623
619 By default, all revisions are added to the changegroup.
624 By default, all revisions are added to the changegroup.
620 """
625 """
621 cl = repo.changelog
626 cl = repo.changelog
622 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
627 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
623 bundler = changegroup.getbundler(version, repo)
628 bundler = changegroup.getbundler(version, repo)
624
629
625 def lookup(node):
630 def lookup(node):
626 # The real bundler reads the revision in order to access the
631 # The real bundler reads the revision in order to access the
627 # manifest node and files list. Do that here.
632 # manifest node and files list. Do that here.
628 cl.read(node)
633 cl.read(node)
629 return node
634 return node
630
635
631 def d():
636 def d():
632 for chunk in bundler.group(revs, cl, lookup):
637 for chunk in bundler.group(revs, cl, lookup):
633 pass
638 pass
634
639
635 timer, fm = gettimer(ui, opts)
640 timer, fm = gettimer(ui, opts)
636 timer(d)
641 timer(d)
637 fm.end()
642 fm.end()
638
643
639 @command('perfdirs', formatteropts)
644 @command('perfdirs', formatteropts)
640 def perfdirs(ui, repo, **opts):
645 def perfdirs(ui, repo, **opts):
641 timer, fm = gettimer(ui, opts)
646 timer, fm = gettimer(ui, opts)
642 dirstate = repo.dirstate
647 dirstate = repo.dirstate
643 'a' in dirstate
648 'a' in dirstate
644 def d():
649 def d():
645 dirstate.hasdir('a')
650 dirstate.hasdir('a')
646 del dirstate._map._dirs
651 del dirstate._map._dirs
647 timer(d)
652 timer(d)
648 fm.end()
653 fm.end()
649
654
650 @command('perfdirstate', formatteropts)
655 @command('perfdirstate', formatteropts)
651 def perfdirstate(ui, repo, **opts):
656 def perfdirstate(ui, repo, **opts):
652 timer, fm = gettimer(ui, opts)
657 timer, fm = gettimer(ui, opts)
653 "a" in repo.dirstate
658 "a" in repo.dirstate
654 def d():
659 def d():
655 repo.dirstate.invalidate()
660 repo.dirstate.invalidate()
656 "a" in repo.dirstate
661 "a" in repo.dirstate
657 timer(d)
662 timer(d)
658 fm.end()
663 fm.end()
659
664
660 @command('perfdirstatedirs', formatteropts)
665 @command('perfdirstatedirs', formatteropts)
661 def perfdirstatedirs(ui, repo, **opts):
666 def perfdirstatedirs(ui, repo, **opts):
662 timer, fm = gettimer(ui, opts)
667 timer, fm = gettimer(ui, opts)
663 "a" in repo.dirstate
668 "a" in repo.dirstate
664 def d():
669 def d():
665 repo.dirstate.hasdir("a")
670 repo.dirstate.hasdir("a")
666 del repo.dirstate._map._dirs
671 del repo.dirstate._map._dirs
667 timer(d)
672 timer(d)
668 fm.end()
673 fm.end()
669
674
670 @command('perfdirstatefoldmap', formatteropts)
675 @command('perfdirstatefoldmap', formatteropts)
671 def perfdirstatefoldmap(ui, repo, **opts):
676 def perfdirstatefoldmap(ui, repo, **opts):
672 timer, fm = gettimer(ui, opts)
677 timer, fm = gettimer(ui, opts)
673 dirstate = repo.dirstate
678 dirstate = repo.dirstate
674 'a' in dirstate
679 'a' in dirstate
675 def d():
680 def d():
676 dirstate._map.filefoldmap.get('a')
681 dirstate._map.filefoldmap.get('a')
677 del dirstate._map.filefoldmap
682 del dirstate._map.filefoldmap
678 timer(d)
683 timer(d)
679 fm.end()
684 fm.end()
680
685
681 @command('perfdirfoldmap', formatteropts)
686 @command('perfdirfoldmap', formatteropts)
682 def perfdirfoldmap(ui, repo, **opts):
687 def perfdirfoldmap(ui, repo, **opts):
683 timer, fm = gettimer(ui, opts)
688 timer, fm = gettimer(ui, opts)
684 dirstate = repo.dirstate
689 dirstate = repo.dirstate
685 'a' in dirstate
690 'a' in dirstate
686 def d():
691 def d():
687 dirstate._map.dirfoldmap.get('a')
692 dirstate._map.dirfoldmap.get('a')
688 del dirstate._map.dirfoldmap
693 del dirstate._map.dirfoldmap
689 del dirstate._map._dirs
694 del dirstate._map._dirs
690 timer(d)
695 timer(d)
691 fm.end()
696 fm.end()
692
697
693 @command('perfdirstatewrite', formatteropts)
698 @command('perfdirstatewrite', formatteropts)
694 def perfdirstatewrite(ui, repo, **opts):
699 def perfdirstatewrite(ui, repo, **opts):
695 timer, fm = gettimer(ui, opts)
700 timer, fm = gettimer(ui, opts)
696 ds = repo.dirstate
701 ds = repo.dirstate
697 "a" in ds
702 "a" in ds
698 def d():
703 def d():
699 ds._dirty = True
704 ds._dirty = True
700 ds.write(repo.currenttransaction())
705 ds.write(repo.currenttransaction())
701 timer(d)
706 timer(d)
702 fm.end()
707 fm.end()
703
708
704 @command('perfmergecalculate',
709 @command('perfmergecalculate',
705 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
710 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
706 def perfmergecalculate(ui, repo, rev, **opts):
711 def perfmergecalculate(ui, repo, rev, **opts):
707 timer, fm = gettimer(ui, opts)
712 timer, fm = gettimer(ui, opts)
708 wctx = repo[None]
713 wctx = repo[None]
709 rctx = scmutil.revsingle(repo, rev, rev)
714 rctx = scmutil.revsingle(repo, rev, rev)
710 ancestor = wctx.ancestor(rctx)
715 ancestor = wctx.ancestor(rctx)
711 # we don't want working dir files to be stat'd in the benchmark, so prime
716 # we don't want working dir files to be stat'd in the benchmark, so prime
712 # that cache
717 # that cache
713 wctx.dirty()
718 wctx.dirty()
714 def d():
719 def d():
715 # acceptremote is True because we don't want prompts in the middle of
720 # acceptremote is True because we don't want prompts in the middle of
716 # our benchmark
721 # our benchmark
717 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
722 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
718 acceptremote=True, followcopies=True)
723 acceptremote=True, followcopies=True)
719 timer(d)
724 timer(d)
720 fm.end()
725 fm.end()
721
726
722 @command('perfpathcopies', [], "REV REV")
727 @command('perfpathcopies', [], "REV REV")
723 def perfpathcopies(ui, repo, rev1, rev2, **opts):
728 def perfpathcopies(ui, repo, rev1, rev2, **opts):
724 timer, fm = gettimer(ui, opts)
729 timer, fm = gettimer(ui, opts)
725 ctx1 = scmutil.revsingle(repo, rev1, rev1)
730 ctx1 = scmutil.revsingle(repo, rev1, rev1)
726 ctx2 = scmutil.revsingle(repo, rev2, rev2)
731 ctx2 = scmutil.revsingle(repo, rev2, rev2)
727 def d():
732 def d():
728 copies.pathcopies(ctx1, ctx2)
733 copies.pathcopies(ctx1, ctx2)
729 timer(d)
734 timer(d)
730 fm.end()
735 fm.end()
731
736
732 @command('perfphases',
737 @command('perfphases',
733 [('', 'full', False, 'include file reading time too'),
738 [('', 'full', False, 'include file reading time too'),
734 ], "")
739 ], "")
735 def perfphases(ui, repo, **opts):
740 def perfphases(ui, repo, **opts):
736 """benchmark phasesets computation"""
741 """benchmark phasesets computation"""
737 timer, fm = gettimer(ui, opts)
742 timer, fm = gettimer(ui, opts)
738 _phases = repo._phasecache
743 _phases = repo._phasecache
739 full = opts.get('full')
744 full = opts.get('full')
740 def d():
745 def d():
741 phases = _phases
746 phases = _phases
742 if full:
747 if full:
743 clearfilecache(repo, '_phasecache')
748 clearfilecache(repo, '_phasecache')
744 phases = repo._phasecache
749 phases = repo._phasecache
745 phases.invalidate()
750 phases.invalidate()
746 phases.loadphaserevs(repo)
751 phases.loadphaserevs(repo)
747 timer(d)
752 timer(d)
748 fm.end()
753 fm.end()
749
754
750 @command('perfmanifest', [], 'REV')
755 @command('perfmanifest', [], 'REV')
751 def perfmanifest(ui, repo, rev, **opts):
756 def perfmanifest(ui, repo, rev, **opts):
752 timer, fm = gettimer(ui, opts)
757 timer, fm = gettimer(ui, opts)
753 ctx = scmutil.revsingle(repo, rev, rev)
758 ctx = scmutil.revsingle(repo, rev, rev)
754 t = ctx.manifestnode()
759 t = ctx.manifestnode()
755 def d():
760 def d():
756 repo.manifestlog.clearcaches()
761 repo.manifestlog.clearcaches()
757 repo.manifestlog[t].read()
762 repo.manifestlog[t].read()
758 timer(d)
763 timer(d)
759 fm.end()
764 fm.end()
760
765
761 @command('perfchangeset', formatteropts)
766 @command('perfchangeset', formatteropts)
762 def perfchangeset(ui, repo, rev, **opts):
767 def perfchangeset(ui, repo, rev, **opts):
763 timer, fm = gettimer(ui, opts)
768 timer, fm = gettimer(ui, opts)
764 n = repo[rev].node()
769 n = repo[rev].node()
765 def d():
770 def d():
766 repo.changelog.read(n)
771 repo.changelog.read(n)
767 #repo.changelog._cache = None
772 #repo.changelog._cache = None
768 timer(d)
773 timer(d)
769 fm.end()
774 fm.end()
770
775
771 @command('perfindex', formatteropts)
776 @command('perfindex', formatteropts)
772 def perfindex(ui, repo, **opts):
777 def perfindex(ui, repo, **opts):
773 import mercurial.revlog
778 import mercurial.revlog
774 timer, fm = gettimer(ui, opts)
779 timer, fm = gettimer(ui, opts)
775 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
780 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
776 n = repo["tip"].node()
781 n = repo["tip"].node()
777 svfs = getsvfs(repo)
782 svfs = getsvfs(repo)
778 def d():
783 def d():
779 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
784 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
780 cl.rev(n)
785 cl.rev(n)
781 timer(d)
786 timer(d)
782 fm.end()
787 fm.end()
783
788
784 @command('perfstartup', formatteropts)
789 @command('perfstartup', formatteropts)
785 def perfstartup(ui, repo, **opts):
790 def perfstartup(ui, repo, **opts):
786 timer, fm = gettimer(ui, opts)
791 timer, fm = gettimer(ui, opts)
787 cmd = sys.argv[0]
792 cmd = sys.argv[0]
788 def d():
793 def d():
789 if os.name != 'nt':
794 if os.name != 'nt':
790 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
795 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
791 else:
796 else:
792 os.environ['HGRCPATH'] = ' '
797 os.environ['HGRCPATH'] = ' '
793 os.system("%s version -q > NUL" % cmd)
798 os.system("%s version -q > NUL" % cmd)
794 timer(d)
799 timer(d)
795 fm.end()
800 fm.end()
796
801
797 @command('perfparents', formatteropts)
802 @command('perfparents', formatteropts)
798 def perfparents(ui, repo, **opts):
803 def perfparents(ui, repo, **opts):
799 timer, fm = gettimer(ui, opts)
804 timer, fm = gettimer(ui, opts)
800 # control the number of commits perfparents iterates over
805 # control the number of commits perfparents iterates over
801 # experimental config: perf.parentscount
806 # experimental config: perf.parentscount
802 count = getint(ui, "perf", "parentscount", 1000)
807 count = getint(ui, "perf", "parentscount", 1000)
803 if len(repo.changelog) < count:
808 if len(repo.changelog) < count:
804 raise error.Abort("repo needs %d commits for this test" % count)
809 raise error.Abort("repo needs %d commits for this test" % count)
805 repo = repo.unfiltered()
810 repo = repo.unfiltered()
806 nl = [repo.changelog.node(i) for i in xrange(count)]
811 nl = [repo.changelog.node(i) for i in xrange(count)]
807 def d():
812 def d():
808 for n in nl:
813 for n in nl:
809 repo.changelog.parents(n)
814 repo.changelog.parents(n)
810 timer(d)
815 timer(d)
811 fm.end()
816 fm.end()
812
817
813 @command('perfctxfiles', formatteropts)
818 @command('perfctxfiles', formatteropts)
814 def perfctxfiles(ui, repo, x, **opts):
819 def perfctxfiles(ui, repo, x, **opts):
815 x = int(x)
820 x = int(x)
816 timer, fm = gettimer(ui, opts)
821 timer, fm = gettimer(ui, opts)
817 def d():
822 def d():
818 len(repo[x].files())
823 len(repo[x].files())
819 timer(d)
824 timer(d)
820 fm.end()
825 fm.end()
821
826
822 @command('perfrawfiles', formatteropts)
827 @command('perfrawfiles', formatteropts)
823 def perfrawfiles(ui, repo, x, **opts):
828 def perfrawfiles(ui, repo, x, **opts):
824 x = int(x)
829 x = int(x)
825 timer, fm = gettimer(ui, opts)
830 timer, fm = gettimer(ui, opts)
826 cl = repo.changelog
831 cl = repo.changelog
827 def d():
832 def d():
828 len(cl.read(x)[3])
833 len(cl.read(x)[3])
829 timer(d)
834 timer(d)
830 fm.end()
835 fm.end()
831
836
832 @command('perflookup', formatteropts)
837 @command('perflookup', formatteropts)
833 def perflookup(ui, repo, rev, **opts):
838 def perflookup(ui, repo, rev, **opts):
834 timer, fm = gettimer(ui, opts)
839 timer, fm = gettimer(ui, opts)
835 timer(lambda: len(repo.lookup(rev)))
840 timer(lambda: len(repo.lookup(rev)))
836 fm.end()
841 fm.end()
837
842
838 @command('perfrevrange', formatteropts)
843 @command('perfrevrange', formatteropts)
839 def perfrevrange(ui, repo, *specs, **opts):
844 def perfrevrange(ui, repo, *specs, **opts):
840 timer, fm = gettimer(ui, opts)
845 timer, fm = gettimer(ui, opts)
841 revrange = scmutil.revrange
846 revrange = scmutil.revrange
842 timer(lambda: len(revrange(repo, specs)))
847 timer(lambda: len(revrange(repo, specs)))
843 fm.end()
848 fm.end()
844
849
845 @command('perfnodelookup', formatteropts)
850 @command('perfnodelookup', formatteropts)
846 def perfnodelookup(ui, repo, rev, **opts):
851 def perfnodelookup(ui, repo, rev, **opts):
847 timer, fm = gettimer(ui, opts)
852 timer, fm = gettimer(ui, opts)
848 import mercurial.revlog
853 import mercurial.revlog
849 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
854 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
850 n = repo[rev].node()
855 n = repo[rev].node()
851 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
856 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
852 def d():
857 def d():
853 cl.rev(n)
858 cl.rev(n)
854 clearcaches(cl)
859 clearcaches(cl)
855 timer(d)
860 timer(d)
856 fm.end()
861 fm.end()
857
862
858 @command('perflog',
863 @command('perflog',
859 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
864 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
860 def perflog(ui, repo, rev=None, **opts):
865 def perflog(ui, repo, rev=None, **opts):
861 if rev is None:
866 if rev is None:
862 rev=[]
867 rev=[]
863 timer, fm = gettimer(ui, opts)
868 timer, fm = gettimer(ui, opts)
864 ui.pushbuffer()
869 ui.pushbuffer()
865 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
870 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
866 copies=opts.get('rename')))
871 copies=opts.get('rename')))
867 ui.popbuffer()
872 ui.popbuffer()
868 fm.end()
873 fm.end()
869
874
870 @command('perfmoonwalk', formatteropts)
875 @command('perfmoonwalk', formatteropts)
871 def perfmoonwalk(ui, repo, **opts):
876 def perfmoonwalk(ui, repo, **opts):
872 """benchmark walking the changelog backwards
877 """benchmark walking the changelog backwards
873
878
874 This also loads the changelog data for each revision in the changelog.
879 This also loads the changelog data for each revision in the changelog.
875 """
880 """
876 timer, fm = gettimer(ui, opts)
881 timer, fm = gettimer(ui, opts)
877 def moonwalk():
882 def moonwalk():
878 for i in xrange(len(repo), -1, -1):
883 for i in xrange(len(repo), -1, -1):
879 ctx = repo[i]
884 ctx = repo[i]
880 ctx.branch() # read changelog data (in addition to the index)
885 ctx.branch() # read changelog data (in addition to the index)
881 timer(moonwalk)
886 timer(moonwalk)
882 fm.end()
887 fm.end()
883
888
884 @command('perftemplating', formatteropts)
889 @command('perftemplating', formatteropts)
885 def perftemplating(ui, repo, rev=None, **opts):
890 def perftemplating(ui, repo, rev=None, **opts):
886 if rev is None:
891 if rev is None:
887 rev=[]
892 rev=[]
888 timer, fm = gettimer(ui, opts)
893 timer, fm = gettimer(ui, opts)
889 ui.pushbuffer()
894 ui.pushbuffer()
890 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
895 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
891 template='{date|shortdate} [{rev}:{node|short}]'
896 template='{date|shortdate} [{rev}:{node|short}]'
892 ' {author|person}: {desc|firstline}\n'))
897 ' {author|person}: {desc|firstline}\n'))
893 ui.popbuffer()
898 ui.popbuffer()
894 fm.end()
899 fm.end()
895
900
896 @command('perfcca', formatteropts)
901 @command('perfcca', formatteropts)
897 def perfcca(ui, repo, **opts):
902 def perfcca(ui, repo, **opts):
898 timer, fm = gettimer(ui, opts)
903 timer, fm = gettimer(ui, opts)
899 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
904 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
900 fm.end()
905 fm.end()
901
906
902 @command('perffncacheload', formatteropts)
907 @command('perffncacheload', formatteropts)
903 def perffncacheload(ui, repo, **opts):
908 def perffncacheload(ui, repo, **opts):
904 timer, fm = gettimer(ui, opts)
909 timer, fm = gettimer(ui, opts)
905 s = repo.store
910 s = repo.store
906 def d():
911 def d():
907 s.fncache._load()
912 s.fncache._load()
908 timer(d)
913 timer(d)
909 fm.end()
914 fm.end()
910
915
911 @command('perffncachewrite', formatteropts)
916 @command('perffncachewrite', formatteropts)
912 def perffncachewrite(ui, repo, **opts):
917 def perffncachewrite(ui, repo, **opts):
913 timer, fm = gettimer(ui, opts)
918 timer, fm = gettimer(ui, opts)
914 s = repo.store
919 s = repo.store
915 s.fncache._load()
920 s.fncache._load()
916 lock = repo.lock()
921 lock = repo.lock()
917 tr = repo.transaction('perffncachewrite')
922 tr = repo.transaction('perffncachewrite')
918 def d():
923 def d():
919 s.fncache._dirty = True
924 s.fncache._dirty = True
920 s.fncache.write(tr)
925 s.fncache.write(tr)
921 timer(d)
926 timer(d)
922 tr.close()
927 tr.close()
923 lock.release()
928 lock.release()
924 fm.end()
929 fm.end()
925
930
926 @command('perffncacheencode', formatteropts)
931 @command('perffncacheencode', formatteropts)
927 def perffncacheencode(ui, repo, **opts):
932 def perffncacheencode(ui, repo, **opts):
928 timer, fm = gettimer(ui, opts)
933 timer, fm = gettimer(ui, opts)
929 s = repo.store
934 s = repo.store
930 s.fncache._load()
935 s.fncache._load()
931 def d():
936 def d():
932 for p in s.fncache.entries:
937 for p in s.fncache.entries:
933 s.encode(p)
938 s.encode(p)
934 timer(d)
939 timer(d)
935 fm.end()
940 fm.end()
936
941
937 def _bdiffworker(q, ready, done):
942 def _bdiffworker(q, ready, done):
938 while not done.is_set():
943 while not done.is_set():
939 pair = q.get()
944 pair = q.get()
940 while pair is not None:
945 while pair is not None:
941 mdiff.textdiff(*pair)
946 mdiff.textdiff(*pair)
942 q.task_done()
947 q.task_done()
943 pair = q.get()
948 pair = q.get()
944 q.task_done() # for the None one
949 q.task_done() # for the None one
945 with ready:
950 with ready:
946 ready.wait()
951 ready.wait()
947
952
948 @command('perfbdiff', revlogopts + formatteropts + [
953 @command('perfbdiff', revlogopts + formatteropts + [
949 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
954 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
950 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
955 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
951 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
956 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
952 ],
957 ],
953
958
954 '-c|-m|FILE REV')
959 '-c|-m|FILE REV')
955 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
960 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
956 """benchmark a bdiff between revisions
961 """benchmark a bdiff between revisions
957
962
958 By default, benchmark a bdiff between its delta parent and itself.
963 By default, benchmark a bdiff between its delta parent and itself.
959
964
960 With ``--count``, benchmark bdiffs between delta parents and self for N
965 With ``--count``, benchmark bdiffs between delta parents and self for N
961 revisions starting at the specified revision.
966 revisions starting at the specified revision.
962
967
963 With ``--alldata``, assume the requested revision is a changeset and
968 With ``--alldata``, assume the requested revision is a changeset and
964 measure bdiffs for all changes related to that changeset (manifest
969 measure bdiffs for all changes related to that changeset (manifest
965 and filelogs).
970 and filelogs).
966 """
971 """
967 if opts['alldata']:
972 if opts['alldata']:
968 opts['changelog'] = True
973 opts['changelog'] = True
969
974
970 if opts.get('changelog') or opts.get('manifest'):
975 if opts.get('changelog') or opts.get('manifest'):
971 file_, rev = None, file_
976 file_, rev = None, file_
972 elif rev is None:
977 elif rev is None:
973 raise error.CommandError('perfbdiff', 'invalid arguments')
978 raise error.CommandError('perfbdiff', 'invalid arguments')
974
979
975 textpairs = []
980 textpairs = []
976
981
977 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
982 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
978
983
979 startrev = r.rev(r.lookup(rev))
984 startrev = r.rev(r.lookup(rev))
980 for rev in range(startrev, min(startrev + count, len(r) - 1)):
985 for rev in range(startrev, min(startrev + count, len(r) - 1)):
981 if opts['alldata']:
986 if opts['alldata']:
982 # Load revisions associated with changeset.
987 # Load revisions associated with changeset.
983 ctx = repo[rev]
988 ctx = repo[rev]
984 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
989 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
985 for pctx in ctx.parents():
990 for pctx in ctx.parents():
986 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
991 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
987 textpairs.append((pman, mtext))
992 textpairs.append((pman, mtext))
988
993
989 # Load filelog revisions by iterating manifest delta.
994 # Load filelog revisions by iterating manifest delta.
990 man = ctx.manifest()
995 man = ctx.manifest()
991 pman = ctx.p1().manifest()
996 pman = ctx.p1().manifest()
992 for filename, change in pman.diff(man).items():
997 for filename, change in pman.diff(man).items():
993 fctx = repo.file(filename)
998 fctx = repo.file(filename)
994 f1 = fctx.revision(change[0][0] or -1)
999 f1 = fctx.revision(change[0][0] or -1)
995 f2 = fctx.revision(change[1][0] or -1)
1000 f2 = fctx.revision(change[1][0] or -1)
996 textpairs.append((f1, f2))
1001 textpairs.append((f1, f2))
997 else:
1002 else:
998 dp = r.deltaparent(rev)
1003 dp = r.deltaparent(rev)
999 textpairs.append((r.revision(dp), r.revision(rev)))
1004 textpairs.append((r.revision(dp), r.revision(rev)))
1000
1005
1001 withthreads = threads > 0
1006 withthreads = threads > 0
1002 if not withthreads:
1007 if not withthreads:
1003 def d():
1008 def d():
1004 for pair in textpairs:
1009 for pair in textpairs:
1005 mdiff.textdiff(*pair)
1010 mdiff.textdiff(*pair)
1006 else:
1011 else:
1007 q = util.queue()
1012 q = util.queue()
1008 for i in xrange(threads):
1013 for i in xrange(threads):
1009 q.put(None)
1014 q.put(None)
1010 ready = threading.Condition()
1015 ready = threading.Condition()
1011 done = threading.Event()
1016 done = threading.Event()
1012 for i in xrange(threads):
1017 for i in xrange(threads):
1013 threading.Thread(target=_bdiffworker, args=(q, ready, done)).start()
1018 threading.Thread(target=_bdiffworker, args=(q, ready, done)).start()
1014 q.join()
1019 q.join()
1015 def d():
1020 def d():
1016 for pair in textpairs:
1021 for pair in textpairs:
1017 q.put(pair)
1022 q.put(pair)
1018 for i in xrange(threads):
1023 for i in xrange(threads):
1019 q.put(None)
1024 q.put(None)
1020 with ready:
1025 with ready:
1021 ready.notify_all()
1026 ready.notify_all()
1022 q.join()
1027 q.join()
1023 timer, fm = gettimer(ui, opts)
1028 timer, fm = gettimer(ui, opts)
1024 timer(d)
1029 timer(d)
1025 fm.end()
1030 fm.end()
1026
1031
1027 if withthreads:
1032 if withthreads:
1028 done.set()
1033 done.set()
1029 for i in xrange(threads):
1034 for i in xrange(threads):
1030 q.put(None)
1035 q.put(None)
1031 with ready:
1036 with ready:
1032 ready.notify_all()
1037 ready.notify_all()
1033
1038
1034 @command('perfunidiff', revlogopts + formatteropts + [
1039 @command('perfunidiff', revlogopts + formatteropts + [
1035 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1040 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1036 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1041 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1037 ], '-c|-m|FILE REV')
1042 ], '-c|-m|FILE REV')
1038 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1043 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1039 """benchmark a unified diff between revisions
1044 """benchmark a unified diff between revisions
1040
1045
1041 This doesn't include any copy tracing - it's just a unified diff
1046 This doesn't include any copy tracing - it's just a unified diff
1042 of the texts.
1047 of the texts.
1043
1048
1044 By default, benchmark a diff between its delta parent and itself.
1049 By default, benchmark a diff between its delta parent and itself.
1045
1050
1046 With ``--count``, benchmark diffs between delta parents and self for N
1051 With ``--count``, benchmark diffs between delta parents and self for N
1047 revisions starting at the specified revision.
1052 revisions starting at the specified revision.
1048
1053
1049 With ``--alldata``, assume the requested revision is a changeset and
1054 With ``--alldata``, assume the requested revision is a changeset and
1050 measure diffs for all changes related to that changeset (manifest
1055 measure diffs for all changes related to that changeset (manifest
1051 and filelogs).
1056 and filelogs).
1052 """
1057 """
1053 if opts['alldata']:
1058 if opts['alldata']:
1054 opts['changelog'] = True
1059 opts['changelog'] = True
1055
1060
1056 if opts.get('changelog') or opts.get('manifest'):
1061 if opts.get('changelog') or opts.get('manifest'):
1057 file_, rev = None, file_
1062 file_, rev = None, file_
1058 elif rev is None:
1063 elif rev is None:
1059 raise error.CommandError('perfunidiff', 'invalid arguments')
1064 raise error.CommandError('perfunidiff', 'invalid arguments')
1060
1065
1061 textpairs = []
1066 textpairs = []
1062
1067
1063 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1068 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1064
1069
1065 startrev = r.rev(r.lookup(rev))
1070 startrev = r.rev(r.lookup(rev))
1066 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1071 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1067 if opts['alldata']:
1072 if opts['alldata']:
1068 # Load revisions associated with changeset.
1073 # Load revisions associated with changeset.
1069 ctx = repo[rev]
1074 ctx = repo[rev]
1070 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1075 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1071 for pctx in ctx.parents():
1076 for pctx in ctx.parents():
1072 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1077 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1073 textpairs.append((pman, mtext))
1078 textpairs.append((pman, mtext))
1074
1079
1075 # Load filelog revisions by iterating manifest delta.
1080 # Load filelog revisions by iterating manifest delta.
1076 man = ctx.manifest()
1081 man = ctx.manifest()
1077 pman = ctx.p1().manifest()
1082 pman = ctx.p1().manifest()
1078 for filename, change in pman.diff(man).items():
1083 for filename, change in pman.diff(man).items():
1079 fctx = repo.file(filename)
1084 fctx = repo.file(filename)
1080 f1 = fctx.revision(change[0][0] or -1)
1085 f1 = fctx.revision(change[0][0] or -1)
1081 f2 = fctx.revision(change[1][0] or -1)
1086 f2 = fctx.revision(change[1][0] or -1)
1082 textpairs.append((f1, f2))
1087 textpairs.append((f1, f2))
1083 else:
1088 else:
1084 dp = r.deltaparent(rev)
1089 dp = r.deltaparent(rev)
1085 textpairs.append((r.revision(dp), r.revision(rev)))
1090 textpairs.append((r.revision(dp), r.revision(rev)))
1086
1091
1087 def d():
1092 def d():
1088 for left, right in textpairs:
1093 for left, right in textpairs:
1089 # The date strings don't matter, so we pass empty strings.
1094 # The date strings don't matter, so we pass empty strings.
1090 headerlines, hunks = mdiff.unidiff(
1095 headerlines, hunks = mdiff.unidiff(
1091 left, '', right, '', 'left', 'right', binary=False)
1096 left, '', right, '', 'left', 'right', binary=False)
1092 # consume iterators in roughly the way patch.py does
1097 # consume iterators in roughly the way patch.py does
1093 b'\n'.join(headerlines)
1098 b'\n'.join(headerlines)
1094 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1099 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1095 timer, fm = gettimer(ui, opts)
1100 timer, fm = gettimer(ui, opts)
1096 timer(d)
1101 timer(d)
1097 fm.end()
1102 fm.end()
1098
1103
1099 @command('perfdiffwd', formatteropts)
1104 @command('perfdiffwd', formatteropts)
1100 def perfdiffwd(ui, repo, **opts):
1105 def perfdiffwd(ui, repo, **opts):
1101 """Profile diff of working directory changes"""
1106 """Profile diff of working directory changes"""
1102 timer, fm = gettimer(ui, opts)
1107 timer, fm = gettimer(ui, opts)
1103 options = {
1108 options = {
1104 'w': 'ignore_all_space',
1109 'w': 'ignore_all_space',
1105 'b': 'ignore_space_change',
1110 'b': 'ignore_space_change',
1106 'B': 'ignore_blank_lines',
1111 'B': 'ignore_blank_lines',
1107 }
1112 }
1108
1113
1109 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1114 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1110 opts = dict((options[c], '1') for c in diffopt)
1115 opts = dict((options[c], '1') for c in diffopt)
1111 def d():
1116 def d():
1112 ui.pushbuffer()
1117 ui.pushbuffer()
1113 commands.diff(ui, repo, **opts)
1118 commands.diff(ui, repo, **opts)
1114 ui.popbuffer()
1119 ui.popbuffer()
1115 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1120 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1116 timer(d, title)
1121 timer(d, title)
1117 fm.end()
1122 fm.end()
1118
1123
1119 @command('perfrevlogindex', revlogopts + formatteropts,
1124 @command('perfrevlogindex', revlogopts + formatteropts,
1120 '-c|-m|FILE')
1125 '-c|-m|FILE')
1121 def perfrevlogindex(ui, repo, file_=None, **opts):
1126 def perfrevlogindex(ui, repo, file_=None, **opts):
1122 """Benchmark operations against a revlog index.
1127 """Benchmark operations against a revlog index.
1123
1128
1124 This tests constructing a revlog instance, reading index data,
1129 This tests constructing a revlog instance, reading index data,
1125 parsing index data, and performing various operations related to
1130 parsing index data, and performing various operations related to
1126 index data.
1131 index data.
1127 """
1132 """
1128
1133
1129 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1134 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1130
1135
1131 opener = getattr(rl, 'opener') # trick linter
1136 opener = getattr(rl, 'opener') # trick linter
1132 indexfile = rl.indexfile
1137 indexfile = rl.indexfile
1133 data = opener.read(indexfile)
1138 data = opener.read(indexfile)
1134
1139
1135 header = struct.unpack('>I', data[0:4])[0]
1140 header = struct.unpack('>I', data[0:4])[0]
1136 version = header & 0xFFFF
1141 version = header & 0xFFFF
1137 if version == 1:
1142 if version == 1:
1138 revlogio = revlog.revlogio()
1143 revlogio = revlog.revlogio()
1139 inline = header & (1 << 16)
1144 inline = header & (1 << 16)
1140 else:
1145 else:
1141 raise error.Abort(('unsupported revlog version: %d') % version)
1146 raise error.Abort(('unsupported revlog version: %d') % version)
1142
1147
1143 rllen = len(rl)
1148 rllen = len(rl)
1144
1149
1145 node0 = rl.node(0)
1150 node0 = rl.node(0)
1146 node25 = rl.node(rllen // 4)
1151 node25 = rl.node(rllen // 4)
1147 node50 = rl.node(rllen // 2)
1152 node50 = rl.node(rllen // 2)
1148 node75 = rl.node(rllen // 4 * 3)
1153 node75 = rl.node(rllen // 4 * 3)
1149 node100 = rl.node(rllen - 1)
1154 node100 = rl.node(rllen - 1)
1150
1155
1151 allrevs = range(rllen)
1156 allrevs = range(rllen)
1152 allrevsrev = list(reversed(allrevs))
1157 allrevsrev = list(reversed(allrevs))
1153 allnodes = [rl.node(rev) for rev in range(rllen)]
1158 allnodes = [rl.node(rev) for rev in range(rllen)]
1154 allnodesrev = list(reversed(allnodes))
1159 allnodesrev = list(reversed(allnodes))
1155
1160
1156 def constructor():
1161 def constructor():
1157 revlog.revlog(opener, indexfile)
1162 revlog.revlog(opener, indexfile)
1158
1163
1159 def read():
1164 def read():
1160 with opener(indexfile) as fh:
1165 with opener(indexfile) as fh:
1161 fh.read()
1166 fh.read()
1162
1167
1163 def parseindex():
1168 def parseindex():
1164 revlogio.parseindex(data, inline)
1169 revlogio.parseindex(data, inline)
1165
1170
1166 def getentry(revornode):
1171 def getentry(revornode):
1167 index = revlogio.parseindex(data, inline)[0]
1172 index = revlogio.parseindex(data, inline)[0]
1168 index[revornode]
1173 index[revornode]
1169
1174
1170 def getentries(revs, count=1):
1175 def getentries(revs, count=1):
1171 index = revlogio.parseindex(data, inline)[0]
1176 index = revlogio.parseindex(data, inline)[0]
1172
1177
1173 for i in range(count):
1178 for i in range(count):
1174 for rev in revs:
1179 for rev in revs:
1175 index[rev]
1180 index[rev]
1176
1181
1177 def resolvenode(node):
1182 def resolvenode(node):
1178 nodemap = revlogio.parseindex(data, inline)[1]
1183 nodemap = revlogio.parseindex(data, inline)[1]
1179 # This only works for the C code.
1184 # This only works for the C code.
1180 if nodemap is None:
1185 if nodemap is None:
1181 return
1186 return
1182
1187
1183 try:
1188 try:
1184 nodemap[node]
1189 nodemap[node]
1185 except error.RevlogError:
1190 except error.RevlogError:
1186 pass
1191 pass
1187
1192
1188 def resolvenodes(nodes, count=1):
1193 def resolvenodes(nodes, count=1):
1189 nodemap = revlogio.parseindex(data, inline)[1]
1194 nodemap = revlogio.parseindex(data, inline)[1]
1190 if nodemap is None:
1195 if nodemap is None:
1191 return
1196 return
1192
1197
1193 for i in range(count):
1198 for i in range(count):
1194 for node in nodes:
1199 for node in nodes:
1195 try:
1200 try:
1196 nodemap[node]
1201 nodemap[node]
1197 except error.RevlogError:
1202 except error.RevlogError:
1198 pass
1203 pass
1199
1204
1200 benches = [
1205 benches = [
1201 (constructor, 'revlog constructor'),
1206 (constructor, 'revlog constructor'),
1202 (read, 'read'),
1207 (read, 'read'),
1203 (parseindex, 'create index object'),
1208 (parseindex, 'create index object'),
1204 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1209 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1205 (lambda: resolvenode('a' * 20), 'look up missing node'),
1210 (lambda: resolvenode('a' * 20), 'look up missing node'),
1206 (lambda: resolvenode(node0), 'look up node at rev 0'),
1211 (lambda: resolvenode(node0), 'look up node at rev 0'),
1207 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1212 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1208 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1213 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1209 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1214 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1210 (lambda: resolvenode(node100), 'look up node at tip'),
1215 (lambda: resolvenode(node100), 'look up node at tip'),
1211 # 2x variation is to measure caching impact.
1216 # 2x variation is to measure caching impact.
1212 (lambda: resolvenodes(allnodes),
1217 (lambda: resolvenodes(allnodes),
1213 'look up all nodes (forward)'),
1218 'look up all nodes (forward)'),
1214 (lambda: resolvenodes(allnodes, 2),
1219 (lambda: resolvenodes(allnodes, 2),
1215 'look up all nodes 2x (forward)'),
1220 'look up all nodes 2x (forward)'),
1216 (lambda: resolvenodes(allnodesrev),
1221 (lambda: resolvenodes(allnodesrev),
1217 'look up all nodes (reverse)'),
1222 'look up all nodes (reverse)'),
1218 (lambda: resolvenodes(allnodesrev, 2),
1223 (lambda: resolvenodes(allnodesrev, 2),
1219 'look up all nodes 2x (reverse)'),
1224 'look up all nodes 2x (reverse)'),
1220 (lambda: getentries(allrevs),
1225 (lambda: getentries(allrevs),
1221 'retrieve all index entries (forward)'),
1226 'retrieve all index entries (forward)'),
1222 (lambda: getentries(allrevs, 2),
1227 (lambda: getentries(allrevs, 2),
1223 'retrieve all index entries 2x (forward)'),
1228 'retrieve all index entries 2x (forward)'),
1224 (lambda: getentries(allrevsrev),
1229 (lambda: getentries(allrevsrev),
1225 'retrieve all index entries (reverse)'),
1230 'retrieve all index entries (reverse)'),
1226 (lambda: getentries(allrevsrev, 2),
1231 (lambda: getentries(allrevsrev, 2),
1227 'retrieve all index entries 2x (reverse)'),
1232 'retrieve all index entries 2x (reverse)'),
1228 ]
1233 ]
1229
1234
1230 for fn, title in benches:
1235 for fn, title in benches:
1231 timer, fm = gettimer(ui, opts)
1236 timer, fm = gettimer(ui, opts)
1232 timer(fn, title=title)
1237 timer(fn, title=title)
1233 fm.end()
1238 fm.end()
1234
1239
1235 @command('perfrevlogrevisions', revlogopts + formatteropts +
1240 @command('perfrevlogrevisions', revlogopts + formatteropts +
1236 [('d', 'dist', 100, 'distance between the revisions'),
1241 [('d', 'dist', 100, 'distance between the revisions'),
1237 ('s', 'startrev', 0, 'revision to start reading at'),
1242 ('s', 'startrev', 0, 'revision to start reading at'),
1238 ('', 'reverse', False, 'read in reverse')],
1243 ('', 'reverse', False, 'read in reverse')],
1239 '-c|-m|FILE')
1244 '-c|-m|FILE')
1240 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1245 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1241 **opts):
1246 **opts):
1242 """Benchmark reading a series of revisions from a revlog.
1247 """Benchmark reading a series of revisions from a revlog.
1243
1248
1244 By default, we read every ``-d/--dist`` revision from 0 to tip of
1249 By default, we read every ``-d/--dist`` revision from 0 to tip of
1245 the specified revlog.
1250 the specified revlog.
1246
1251
1247 The start revision can be defined via ``-s/--startrev``.
1252 The start revision can be defined via ``-s/--startrev``.
1248 """
1253 """
1249 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1254 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1250 rllen = getlen(ui)(rl)
1255 rllen = getlen(ui)(rl)
1251
1256
1252 def d():
1257 def d():
1253 rl.clearcaches()
1258 rl.clearcaches()
1254
1259
1255 beginrev = startrev
1260 beginrev = startrev
1256 endrev = rllen
1261 endrev = rllen
1257 dist = opts['dist']
1262 dist = opts['dist']
1258
1263
1259 if reverse:
1264 if reverse:
1260 beginrev, endrev = endrev, beginrev
1265 beginrev, endrev = endrev, beginrev
1261 dist = -1 * dist
1266 dist = -1 * dist
1262
1267
1263 for x in xrange(beginrev, endrev, dist):
1268 for x in xrange(beginrev, endrev, dist):
1264 # Old revisions don't support passing int.
1269 # Old revisions don't support passing int.
1265 n = rl.node(x)
1270 n = rl.node(x)
1266 rl.revision(n)
1271 rl.revision(n)
1267
1272
1268 timer, fm = gettimer(ui, opts)
1273 timer, fm = gettimer(ui, opts)
1269 timer(d)
1274 timer(d)
1270 fm.end()
1275 fm.end()
1271
1276
1272 @command('perfrevlogchunks', revlogopts + formatteropts +
1277 @command('perfrevlogchunks', revlogopts + formatteropts +
1273 [('e', 'engines', '', 'compression engines to use'),
1278 [('e', 'engines', '', 'compression engines to use'),
1274 ('s', 'startrev', 0, 'revision to start at')],
1279 ('s', 'startrev', 0, 'revision to start at')],
1275 '-c|-m|FILE')
1280 '-c|-m|FILE')
1276 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1281 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1277 """Benchmark operations on revlog chunks.
1282 """Benchmark operations on revlog chunks.
1278
1283
1279 Logically, each revlog is a collection of fulltext revisions. However,
1284 Logically, each revlog is a collection of fulltext revisions. However,
1280 stored within each revlog are "chunks" of possibly compressed data. This
1285 stored within each revlog are "chunks" of possibly compressed data. This
1281 data needs to be read and decompressed or compressed and written.
1286 data needs to be read and decompressed or compressed and written.
1282
1287
1283 This command measures the time it takes to read+decompress and recompress
1288 This command measures the time it takes to read+decompress and recompress
1284 chunks in a revlog. It effectively isolates I/O and compression performance.
1289 chunks in a revlog. It effectively isolates I/O and compression performance.
1285 For measurements of higher-level operations like resolving revisions,
1290 For measurements of higher-level operations like resolving revisions,
1286 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1291 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1287 """
1292 """
1288 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1293 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1289
1294
1290 # _chunkraw was renamed to _getsegmentforrevs.
1295 # _chunkraw was renamed to _getsegmentforrevs.
1291 try:
1296 try:
1292 segmentforrevs = rl._getsegmentforrevs
1297 segmentforrevs = rl._getsegmentforrevs
1293 except AttributeError:
1298 except AttributeError:
1294 segmentforrevs = rl._chunkraw
1299 segmentforrevs = rl._chunkraw
1295
1300
1296 # Verify engines argument.
1301 # Verify engines argument.
1297 if engines:
1302 if engines:
1298 engines = set(e.strip() for e in engines.split(','))
1303 engines = set(e.strip() for e in engines.split(','))
1299 for engine in engines:
1304 for engine in engines:
1300 try:
1305 try:
1301 util.compressionengines[engine]
1306 util.compressionengines[engine]
1302 except KeyError:
1307 except KeyError:
1303 raise error.Abort('unknown compression engine: %s' % engine)
1308 raise error.Abort('unknown compression engine: %s' % engine)
1304 else:
1309 else:
1305 engines = []
1310 engines = []
1306 for e in util.compengines:
1311 for e in util.compengines:
1307 engine = util.compengines[e]
1312 engine = util.compengines[e]
1308 try:
1313 try:
1309 if engine.available():
1314 if engine.available():
1310 engine.revlogcompressor().compress('dummy')
1315 engine.revlogcompressor().compress('dummy')
1311 engines.append(e)
1316 engines.append(e)
1312 except NotImplementedError:
1317 except NotImplementedError:
1313 pass
1318 pass
1314
1319
1315 revs = list(rl.revs(startrev, len(rl) - 1))
1320 revs = list(rl.revs(startrev, len(rl) - 1))
1316
1321
1317 def rlfh(rl):
1322 def rlfh(rl):
1318 if rl._inline:
1323 if rl._inline:
1319 return getsvfs(repo)(rl.indexfile)
1324 return getsvfs(repo)(rl.indexfile)
1320 else:
1325 else:
1321 return getsvfs(repo)(rl.datafile)
1326 return getsvfs(repo)(rl.datafile)
1322
1327
1323 def doread():
1328 def doread():
1324 rl.clearcaches()
1329 rl.clearcaches()
1325 for rev in revs:
1330 for rev in revs:
1326 segmentforrevs(rev, rev)
1331 segmentforrevs(rev, rev)
1327
1332
1328 def doreadcachedfh():
1333 def doreadcachedfh():
1329 rl.clearcaches()
1334 rl.clearcaches()
1330 fh = rlfh(rl)
1335 fh = rlfh(rl)
1331 for rev in revs:
1336 for rev in revs:
1332 segmentforrevs(rev, rev, df=fh)
1337 segmentforrevs(rev, rev, df=fh)
1333
1338
1334 def doreadbatch():
1339 def doreadbatch():
1335 rl.clearcaches()
1340 rl.clearcaches()
1336 segmentforrevs(revs[0], revs[-1])
1341 segmentforrevs(revs[0], revs[-1])
1337
1342
1338 def doreadbatchcachedfh():
1343 def doreadbatchcachedfh():
1339 rl.clearcaches()
1344 rl.clearcaches()
1340 fh = rlfh(rl)
1345 fh = rlfh(rl)
1341 segmentforrevs(revs[0], revs[-1], df=fh)
1346 segmentforrevs(revs[0], revs[-1], df=fh)
1342
1347
1343 def dochunk():
1348 def dochunk():
1344 rl.clearcaches()
1349 rl.clearcaches()
1345 fh = rlfh(rl)
1350 fh = rlfh(rl)
1346 for rev in revs:
1351 for rev in revs:
1347 rl._chunk(rev, df=fh)
1352 rl._chunk(rev, df=fh)
1348
1353
1349 chunks = [None]
1354 chunks = [None]
1350
1355
1351 def dochunkbatch():
1356 def dochunkbatch():
1352 rl.clearcaches()
1357 rl.clearcaches()
1353 fh = rlfh(rl)
1358 fh = rlfh(rl)
1354 # Save chunks as a side-effect.
1359 # Save chunks as a side-effect.
1355 chunks[0] = rl._chunks(revs, df=fh)
1360 chunks[0] = rl._chunks(revs, df=fh)
1356
1361
1357 def docompress(compressor):
1362 def docompress(compressor):
1358 rl.clearcaches()
1363 rl.clearcaches()
1359
1364
1360 try:
1365 try:
1361 # Swap in the requested compression engine.
1366 # Swap in the requested compression engine.
1362 oldcompressor = rl._compressor
1367 oldcompressor = rl._compressor
1363 rl._compressor = compressor
1368 rl._compressor = compressor
1364 for chunk in chunks[0]:
1369 for chunk in chunks[0]:
1365 rl.compress(chunk)
1370 rl.compress(chunk)
1366 finally:
1371 finally:
1367 rl._compressor = oldcompressor
1372 rl._compressor = oldcompressor
1368
1373
1369 benches = [
1374 benches = [
1370 (lambda: doread(), 'read'),
1375 (lambda: doread(), 'read'),
1371 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1376 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1372 (lambda: doreadbatch(), 'read batch'),
1377 (lambda: doreadbatch(), 'read batch'),
1373 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1378 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1374 (lambda: dochunk(), 'chunk'),
1379 (lambda: dochunk(), 'chunk'),
1375 (lambda: dochunkbatch(), 'chunk batch'),
1380 (lambda: dochunkbatch(), 'chunk batch'),
1376 ]
1381 ]
1377
1382
1378 for engine in sorted(engines):
1383 for engine in sorted(engines):
1379 compressor = util.compengines[engine].revlogcompressor()
1384 compressor = util.compengines[engine].revlogcompressor()
1380 benches.append((functools.partial(docompress, compressor),
1385 benches.append((functools.partial(docompress, compressor),
1381 'compress w/ %s' % engine))
1386 'compress w/ %s' % engine))
1382
1387
1383 for fn, title in benches:
1388 for fn, title in benches:
1384 timer, fm = gettimer(ui, opts)
1389 timer, fm = gettimer(ui, opts)
1385 timer(fn, title=title)
1390 timer(fn, title=title)
1386 fm.end()
1391 fm.end()
1387
1392
1388 @command('perfrevlogrevision', revlogopts + formatteropts +
1393 @command('perfrevlogrevision', revlogopts + formatteropts +
1389 [('', 'cache', False, 'use caches instead of clearing')],
1394 [('', 'cache', False, 'use caches instead of clearing')],
1390 '-c|-m|FILE REV')
1395 '-c|-m|FILE REV')
1391 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1396 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1392 """Benchmark obtaining a revlog revision.
1397 """Benchmark obtaining a revlog revision.
1393
1398
1394 Obtaining a revlog revision consists of roughly the following steps:
1399 Obtaining a revlog revision consists of roughly the following steps:
1395
1400
1396 1. Compute the delta chain
1401 1. Compute the delta chain
1397 2. Obtain the raw chunks for that delta chain
1402 2. Obtain the raw chunks for that delta chain
1398 3. Decompress each raw chunk
1403 3. Decompress each raw chunk
1399 4. Apply binary patches to obtain fulltext
1404 4. Apply binary patches to obtain fulltext
1400 5. Verify hash of fulltext
1405 5. Verify hash of fulltext
1401
1406
1402 This command measures the time spent in each of these phases.
1407 This command measures the time spent in each of these phases.
1403 """
1408 """
1404 if opts.get('changelog') or opts.get('manifest'):
1409 if opts.get('changelog') or opts.get('manifest'):
1405 file_, rev = None, file_
1410 file_, rev = None, file_
1406 elif rev is None:
1411 elif rev is None:
1407 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1412 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1408
1413
1409 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1414 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1410
1415
1411 # _chunkraw was renamed to _getsegmentforrevs.
1416 # _chunkraw was renamed to _getsegmentforrevs.
1412 try:
1417 try:
1413 segmentforrevs = r._getsegmentforrevs
1418 segmentforrevs = r._getsegmentforrevs
1414 except AttributeError:
1419 except AttributeError:
1415 segmentforrevs = r._chunkraw
1420 segmentforrevs = r._chunkraw
1416
1421
1417 node = r.lookup(rev)
1422 node = r.lookup(rev)
1418 rev = r.rev(node)
1423 rev = r.rev(node)
1419
1424
1420 def getrawchunks(data, chain):
1425 def getrawchunks(data, chain):
1421 start = r.start
1426 start = r.start
1422 length = r.length
1427 length = r.length
1423 inline = r._inline
1428 inline = r._inline
1424 iosize = r._io.size
1429 iosize = r._io.size
1425 buffer = util.buffer
1430 buffer = util.buffer
1426 offset = start(chain[0])
1431 offset = start(chain[0])
1427
1432
1428 chunks = []
1433 chunks = []
1429 ladd = chunks.append
1434 ladd = chunks.append
1430
1435
1431 for rev in chain:
1436 for rev in chain:
1432 chunkstart = start(rev)
1437 chunkstart = start(rev)
1433 if inline:
1438 if inline:
1434 chunkstart += (rev + 1) * iosize
1439 chunkstart += (rev + 1) * iosize
1435 chunklength = length(rev)
1440 chunklength = length(rev)
1436 ladd(buffer(data, chunkstart - offset, chunklength))
1441 ladd(buffer(data, chunkstart - offset, chunklength))
1437
1442
1438 return chunks
1443 return chunks
1439
1444
1440 def dodeltachain(rev):
1445 def dodeltachain(rev):
1441 if not cache:
1446 if not cache:
1442 r.clearcaches()
1447 r.clearcaches()
1443 r._deltachain(rev)
1448 r._deltachain(rev)
1444
1449
1445 def doread(chain):
1450 def doread(chain):
1446 if not cache:
1451 if not cache:
1447 r.clearcaches()
1452 r.clearcaches()
1448 segmentforrevs(chain[0], chain[-1])
1453 segmentforrevs(chain[0], chain[-1])
1449
1454
1450 def dorawchunks(data, chain):
1455 def dorawchunks(data, chain):
1451 if not cache:
1456 if not cache:
1452 r.clearcaches()
1457 r.clearcaches()
1453 getrawchunks(data, chain)
1458 getrawchunks(data, chain)
1454
1459
1455 def dodecompress(chunks):
1460 def dodecompress(chunks):
1456 decomp = r.decompress
1461 decomp = r.decompress
1457 for chunk in chunks:
1462 for chunk in chunks:
1458 decomp(chunk)
1463 decomp(chunk)
1459
1464
1460 def dopatch(text, bins):
1465 def dopatch(text, bins):
1461 if not cache:
1466 if not cache:
1462 r.clearcaches()
1467 r.clearcaches()
1463 mdiff.patches(text, bins)
1468 mdiff.patches(text, bins)
1464
1469
1465 def dohash(text):
1470 def dohash(text):
1466 if not cache:
1471 if not cache:
1467 r.clearcaches()
1472 r.clearcaches()
1468 r.checkhash(text, node, rev=rev)
1473 r.checkhash(text, node, rev=rev)
1469
1474
1470 def dorevision():
1475 def dorevision():
1471 if not cache:
1476 if not cache:
1472 r.clearcaches()
1477 r.clearcaches()
1473 r.revision(node)
1478 r.revision(node)
1474
1479
1475 chain = r._deltachain(rev)[0]
1480 chain = r._deltachain(rev)[0]
1476 data = segmentforrevs(chain[0], chain[-1])[1]
1481 data = segmentforrevs(chain[0], chain[-1])[1]
1477 rawchunks = getrawchunks(data, chain)
1482 rawchunks = getrawchunks(data, chain)
1478 bins = r._chunks(chain)
1483 bins = r._chunks(chain)
1479 text = str(bins[0])
1484 text = str(bins[0])
1480 bins = bins[1:]
1485 bins = bins[1:]
1481 text = mdiff.patches(text, bins)
1486 text = mdiff.patches(text, bins)
1482
1487
1483 benches = [
1488 benches = [
1484 (lambda: dorevision(), 'full'),
1489 (lambda: dorevision(), 'full'),
1485 (lambda: dodeltachain(rev), 'deltachain'),
1490 (lambda: dodeltachain(rev), 'deltachain'),
1486 (lambda: doread(chain), 'read'),
1491 (lambda: doread(chain), 'read'),
1487 (lambda: dorawchunks(data, chain), 'rawchunks'),
1492 (lambda: dorawchunks(data, chain), 'rawchunks'),
1488 (lambda: dodecompress(rawchunks), 'decompress'),
1493 (lambda: dodecompress(rawchunks), 'decompress'),
1489 (lambda: dopatch(text, bins), 'patch'),
1494 (lambda: dopatch(text, bins), 'patch'),
1490 (lambda: dohash(text), 'hash'),
1495 (lambda: dohash(text), 'hash'),
1491 ]
1496 ]
1492
1497
1493 for fn, title in benches:
1498 for fn, title in benches:
1494 timer, fm = gettimer(ui, opts)
1499 timer, fm = gettimer(ui, opts)
1495 timer(fn, title=title)
1500 timer(fn, title=title)
1496 fm.end()
1501 fm.end()
1497
1502
1498 @command('perfrevset',
1503 @command('perfrevset',
1499 [('C', 'clear', False, 'clear volatile cache between each call.'),
1504 [('C', 'clear', False, 'clear volatile cache between each call.'),
1500 ('', 'contexts', False, 'obtain changectx for each revision')]
1505 ('', 'contexts', False, 'obtain changectx for each revision')]
1501 + formatteropts, "REVSET")
1506 + formatteropts, "REVSET")
1502 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1507 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1503 """benchmark the execution time of a revset
1508 """benchmark the execution time of a revset
1504
1509
1505 Use the --clean option if need to evaluate the impact of build volatile
1510 Use the --clean option if need to evaluate the impact of build volatile
1506 revisions set cache on the revset execution. Volatile cache hold filtered
1511 revisions set cache on the revset execution. Volatile cache hold filtered
1507 and obsolete related cache."""
1512 and obsolete related cache."""
1508 timer, fm = gettimer(ui, opts)
1513 timer, fm = gettimer(ui, opts)
1509 def d():
1514 def d():
1510 if clear:
1515 if clear:
1511 repo.invalidatevolatilesets()
1516 repo.invalidatevolatilesets()
1512 if contexts:
1517 if contexts:
1513 for ctx in repo.set(expr): pass
1518 for ctx in repo.set(expr): pass
1514 else:
1519 else:
1515 for r in repo.revs(expr): pass
1520 for r in repo.revs(expr): pass
1516 timer(d)
1521 timer(d)
1517 fm.end()
1522 fm.end()
1518
1523
1519 @command('perfvolatilesets',
1524 @command('perfvolatilesets',
1520 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1525 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1521 ] + formatteropts)
1526 ] + formatteropts)
1522 def perfvolatilesets(ui, repo, *names, **opts):
1527 def perfvolatilesets(ui, repo, *names, **opts):
1523 """benchmark the computation of various volatile set
1528 """benchmark the computation of various volatile set
1524
1529
1525 Volatile set computes element related to filtering and obsolescence."""
1530 Volatile set computes element related to filtering and obsolescence."""
1526 timer, fm = gettimer(ui, opts)
1531 timer, fm = gettimer(ui, opts)
1527 repo = repo.unfiltered()
1532 repo = repo.unfiltered()
1528
1533
1529 def getobs(name):
1534 def getobs(name):
1530 def d():
1535 def d():
1531 repo.invalidatevolatilesets()
1536 repo.invalidatevolatilesets()
1532 if opts['clear_obsstore']:
1537 if opts['clear_obsstore']:
1533 clearfilecache(repo, 'obsstore')
1538 clearfilecache(repo, 'obsstore')
1534 obsolete.getrevs(repo, name)
1539 obsolete.getrevs(repo, name)
1535 return d
1540 return d
1536
1541
1537 allobs = sorted(obsolete.cachefuncs)
1542 allobs = sorted(obsolete.cachefuncs)
1538 if names:
1543 if names:
1539 allobs = [n for n in allobs if n in names]
1544 allobs = [n for n in allobs if n in names]
1540
1545
1541 for name in allobs:
1546 for name in allobs:
1542 timer(getobs(name), title=name)
1547 timer(getobs(name), title=name)
1543
1548
1544 def getfiltered(name):
1549 def getfiltered(name):
1545 def d():
1550 def d():
1546 repo.invalidatevolatilesets()
1551 repo.invalidatevolatilesets()
1547 if opts['clear_obsstore']:
1552 if opts['clear_obsstore']:
1548 clearfilecache(repo, 'obsstore')
1553 clearfilecache(repo, 'obsstore')
1549 repoview.filterrevs(repo, name)
1554 repoview.filterrevs(repo, name)
1550 return d
1555 return d
1551
1556
1552 allfilter = sorted(repoview.filtertable)
1557 allfilter = sorted(repoview.filtertable)
1553 if names:
1558 if names:
1554 allfilter = [n for n in allfilter if n in names]
1559 allfilter = [n for n in allfilter if n in names]
1555
1560
1556 for name in allfilter:
1561 for name in allfilter:
1557 timer(getfiltered(name), title=name)
1562 timer(getfiltered(name), title=name)
1558 fm.end()
1563 fm.end()
1559
1564
1560 @command('perfbranchmap',
1565 @command('perfbranchmap',
1561 [('f', 'full', False,
1566 [('f', 'full', False,
1562 'Includes build time of subset'),
1567 'Includes build time of subset'),
1563 ('', 'clear-revbranch', False,
1568 ('', 'clear-revbranch', False,
1564 'purge the revbranch cache between computation'),
1569 'purge the revbranch cache between computation'),
1565 ] + formatteropts)
1570 ] + formatteropts)
1566 def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
1571 def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
1567 """benchmark the update of a branchmap
1572 """benchmark the update of a branchmap
1568
1573
1569 This benchmarks the full repo.branchmap() call with read and write disabled
1574 This benchmarks the full repo.branchmap() call with read and write disabled
1570 """
1575 """
1571 timer, fm = gettimer(ui, opts)
1576 timer, fm = gettimer(ui, opts)
1572 def getbranchmap(filtername):
1577 def getbranchmap(filtername):
1573 """generate a benchmark function for the filtername"""
1578 """generate a benchmark function for the filtername"""
1574 if filtername is None:
1579 if filtername is None:
1575 view = repo
1580 view = repo
1576 else:
1581 else:
1577 view = repo.filtered(filtername)
1582 view = repo.filtered(filtername)
1578 def d():
1583 def d():
1579 if clear_revbranch:
1584 if clear_revbranch:
1580 repo.revbranchcache()._clear()
1585 repo.revbranchcache()._clear()
1581 if full:
1586 if full:
1582 view._branchcaches.clear()
1587 view._branchcaches.clear()
1583 else:
1588 else:
1584 view._branchcaches.pop(filtername, None)
1589 view._branchcaches.pop(filtername, None)
1585 view.branchmap()
1590 view.branchmap()
1586 return d
1591 return d
1587 # add filter in smaller subset to bigger subset
1592 # add filter in smaller subset to bigger subset
1588 possiblefilters = set(repoview.filtertable)
1593 possiblefilters = set(repoview.filtertable)
1589 subsettable = getbranchmapsubsettable()
1594 subsettable = getbranchmapsubsettable()
1590 allfilters = []
1595 allfilters = []
1591 while possiblefilters:
1596 while possiblefilters:
1592 for name in possiblefilters:
1597 for name in possiblefilters:
1593 subset = subsettable.get(name)
1598 subset = subsettable.get(name)
1594 if subset not in possiblefilters:
1599 if subset not in possiblefilters:
1595 break
1600 break
1596 else:
1601 else:
1597 assert False, 'subset cycle %s!' % possiblefilters
1602 assert False, 'subset cycle %s!' % possiblefilters
1598 allfilters.append(name)
1603 allfilters.append(name)
1599 possiblefilters.remove(name)
1604 possiblefilters.remove(name)
1600
1605
1601 # warm the cache
1606 # warm the cache
1602 if not full:
1607 if not full:
1603 for name in allfilters:
1608 for name in allfilters:
1604 repo.filtered(name).branchmap()
1609 repo.filtered(name).branchmap()
1605 # add unfiltered
1610 # add unfiltered
1606 allfilters.append(None)
1611 allfilters.append(None)
1607
1612
1608 branchcacheread = safeattrsetter(branchmap, 'read')
1613 branchcacheread = safeattrsetter(branchmap, 'read')
1609 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1614 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1610 branchcacheread.set(lambda repo: None)
1615 branchcacheread.set(lambda repo: None)
1611 branchcachewrite.set(lambda bc, repo: None)
1616 branchcachewrite.set(lambda bc, repo: None)
1612 try:
1617 try:
1613 for name in allfilters:
1618 for name in allfilters:
1614 timer(getbranchmap(name), title=str(name))
1619 timer(getbranchmap(name), title=str(name))
1615 finally:
1620 finally:
1616 branchcacheread.restore()
1621 branchcacheread.restore()
1617 branchcachewrite.restore()
1622 branchcachewrite.restore()
1618 fm.end()
1623 fm.end()
1619
1624
1620 @command('perfloadmarkers')
1625 @command('perfloadmarkers')
1621 def perfloadmarkers(ui, repo):
1626 def perfloadmarkers(ui, repo):
1622 """benchmark the time to parse the on-disk markers for a repo
1627 """benchmark the time to parse the on-disk markers for a repo
1623
1628
1624 Result is the number of markers in the repo."""
1629 Result is the number of markers in the repo."""
1625 timer, fm = gettimer(ui)
1630 timer, fm = gettimer(ui)
1626 svfs = getsvfs(repo)
1631 svfs = getsvfs(repo)
1627 timer(lambda: len(obsolete.obsstore(svfs)))
1632 timer(lambda: len(obsolete.obsstore(svfs)))
1628 fm.end()
1633 fm.end()
1629
1634
1630 @command('perflrucachedict', formatteropts +
1635 @command('perflrucachedict', formatteropts +
1631 [('', 'size', 4, 'size of cache'),
1636 [('', 'size', 4, 'size of cache'),
1632 ('', 'gets', 10000, 'number of key lookups'),
1637 ('', 'gets', 10000, 'number of key lookups'),
1633 ('', 'sets', 10000, 'number of key sets'),
1638 ('', 'sets', 10000, 'number of key sets'),
1634 ('', 'mixed', 10000, 'number of mixed mode operations'),
1639 ('', 'mixed', 10000, 'number of mixed mode operations'),
1635 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1640 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1636 norepo=True)
1641 norepo=True)
1637 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1642 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1638 mixedgetfreq=50, **opts):
1643 mixedgetfreq=50, **opts):
1639 def doinit():
1644 def doinit():
1640 for i in xrange(10000):
1645 for i in xrange(10000):
1641 util.lrucachedict(size)
1646 util.lrucachedict(size)
1642
1647
1643 values = []
1648 values = []
1644 for i in xrange(size):
1649 for i in xrange(size):
1645 values.append(random.randint(0, sys.maxint))
1650 values.append(random.randint(0, sys.maxint))
1646
1651
1647 # Get mode fills the cache and tests raw lookup performance with no
1652 # Get mode fills the cache and tests raw lookup performance with no
1648 # eviction.
1653 # eviction.
1649 getseq = []
1654 getseq = []
1650 for i in xrange(gets):
1655 for i in xrange(gets):
1651 getseq.append(random.choice(values))
1656 getseq.append(random.choice(values))
1652
1657
1653 def dogets():
1658 def dogets():
1654 d = util.lrucachedict(size)
1659 d = util.lrucachedict(size)
1655 for v in values:
1660 for v in values:
1656 d[v] = v
1661 d[v] = v
1657 for key in getseq:
1662 for key in getseq:
1658 value = d[key]
1663 value = d[key]
1659 value # silence pyflakes warning
1664 value # silence pyflakes warning
1660
1665
1661 # Set mode tests insertion speed with cache eviction.
1666 # Set mode tests insertion speed with cache eviction.
1662 setseq = []
1667 setseq = []
1663 for i in xrange(sets):
1668 for i in xrange(sets):
1664 setseq.append(random.randint(0, sys.maxint))
1669 setseq.append(random.randint(0, sys.maxint))
1665
1670
1666 def dosets():
1671 def dosets():
1667 d = util.lrucachedict(size)
1672 d = util.lrucachedict(size)
1668 for v in setseq:
1673 for v in setseq:
1669 d[v] = v
1674 d[v] = v
1670
1675
1671 # Mixed mode randomly performs gets and sets with eviction.
1676 # Mixed mode randomly performs gets and sets with eviction.
1672 mixedops = []
1677 mixedops = []
1673 for i in xrange(mixed):
1678 for i in xrange(mixed):
1674 r = random.randint(0, 100)
1679 r = random.randint(0, 100)
1675 if r < mixedgetfreq:
1680 if r < mixedgetfreq:
1676 op = 0
1681 op = 0
1677 else:
1682 else:
1678 op = 1
1683 op = 1
1679
1684
1680 mixedops.append((op, random.randint(0, size * 2)))
1685 mixedops.append((op, random.randint(0, size * 2)))
1681
1686
1682 def domixed():
1687 def domixed():
1683 d = util.lrucachedict(size)
1688 d = util.lrucachedict(size)
1684
1689
1685 for op, v in mixedops:
1690 for op, v in mixedops:
1686 if op == 0:
1691 if op == 0:
1687 try:
1692 try:
1688 d[v]
1693 d[v]
1689 except KeyError:
1694 except KeyError:
1690 pass
1695 pass
1691 else:
1696 else:
1692 d[v] = v
1697 d[v] = v
1693
1698
1694 benches = [
1699 benches = [
1695 (doinit, 'init'),
1700 (doinit, 'init'),
1696 (dogets, 'gets'),
1701 (dogets, 'gets'),
1697 (dosets, 'sets'),
1702 (dosets, 'sets'),
1698 (domixed, 'mixed')
1703 (domixed, 'mixed')
1699 ]
1704 ]
1700
1705
1701 for fn, title in benches:
1706 for fn, title in benches:
1702 timer, fm = gettimer(ui, opts)
1707 timer, fm = gettimer(ui, opts)
1703 timer(fn, title=title)
1708 timer(fn, title=title)
1704 fm.end()
1709 fm.end()
1705
1710
1706 @command('perfwrite', formatteropts)
1711 @command('perfwrite', formatteropts)
1707 def perfwrite(ui, repo, **opts):
1712 def perfwrite(ui, repo, **opts):
1708 """microbenchmark ui.write
1713 """microbenchmark ui.write
1709 """
1714 """
1710 timer, fm = gettimer(ui, opts)
1715 timer, fm = gettimer(ui, opts)
1711 def write():
1716 def write():
1712 for i in range(100000):
1717 for i in range(100000):
1713 ui.write(('Testing write performance\n'))
1718 ui.write(('Testing write performance\n'))
1714 timer(write)
1719 timer(write)
1715 fm.end()
1720 fm.end()
1716
1721
1717 def uisetup(ui):
1722 def uisetup(ui):
1718 if (util.safehasattr(cmdutil, 'openrevlog') and
1723 if (util.safehasattr(cmdutil, 'openrevlog') and
1719 not util.safehasattr(commands, 'debugrevlogopts')):
1724 not util.safehasattr(commands, 'debugrevlogopts')):
1720 # for "historical portability":
1725 # for "historical portability":
1721 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1726 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1722 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1727 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1723 # openrevlog() should cause failure, because it has been
1728 # openrevlog() should cause failure, because it has been
1724 # available since 3.5 (or 49c583ca48c4).
1729 # available since 3.5 (or 49c583ca48c4).
1725 def openrevlog(orig, repo, cmd, file_, opts):
1730 def openrevlog(orig, repo, cmd, file_, opts):
1726 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1731 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1727 raise error.Abort("This version doesn't support --dir option",
1732 raise error.Abort("This version doesn't support --dir option",
1728 hint="use 3.5 or later")
1733 hint="use 3.5 or later")
1729 return orig(repo, cmd, file_, opts)
1734 return orig(repo, cmd, file_, opts)
1730 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
1735 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,722 +1,718 b''
1 # extensions.py - extension handling for mercurial
1 # extensions.py - extension handling for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import imp
11 import imp
12 import inspect
12 import inspect
13 import os
13 import os
14
14
15 from .i18n import (
15 from .i18n import (
16 _,
16 _,
17 gettext,
17 gettext,
18 )
18 )
19
19
20 from . import (
20 from . import (
21 cmdutil,
21 cmdutil,
22 configitems,
22 configitems,
23 error,
23 error,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 )
26 )
27
27
28 _extensions = {}
28 _extensions = {}
29 _disabledextensions = {}
29 _disabledextensions = {}
30 _aftercallbacks = {}
30 _aftercallbacks = {}
31 _order = []
31 _order = []
32 _builtin = {
32 _builtin = {
33 'hbisect',
33 'hbisect',
34 'bookmarks',
34 'bookmarks',
35 'color',
35 'color',
36 'parentrevspec',
36 'parentrevspec',
37 'progress',
37 'progress',
38 'interhg',
38 'interhg',
39 'inotify',
39 'inotify',
40 'hgcia'
40 'hgcia'
41 }
41 }
42
42
43 def extensions(ui=None):
43 def extensions(ui=None):
44 if ui:
44 if ui:
45 def enabled(name):
45 def enabled(name):
46 for format in ['%s', 'hgext.%s']:
46 for format in ['%s', 'hgext.%s']:
47 conf = ui.config('extensions', format % name)
47 conf = ui.config('extensions', format % name)
48 if conf is not None and not conf.startswith('!'):
48 if conf is not None and not conf.startswith('!'):
49 return True
49 return True
50 else:
50 else:
51 enabled = lambda name: True
51 enabled = lambda name: True
52 for name in _order:
52 for name in _order:
53 module = _extensions[name]
53 module = _extensions[name]
54 if module and enabled(name):
54 if module and enabled(name):
55 yield name, module
55 yield name, module
56
56
57 def find(name):
57 def find(name):
58 '''return module with given extension name'''
58 '''return module with given extension name'''
59 mod = None
59 mod = None
60 try:
60 try:
61 mod = _extensions[name]
61 mod = _extensions[name]
62 except KeyError:
62 except KeyError:
63 for k, v in _extensions.iteritems():
63 for k, v in _extensions.iteritems():
64 if k.endswith('.' + name) or k.endswith('/' + name):
64 if k.endswith('.' + name) or k.endswith('/' + name):
65 mod = v
65 mod = v
66 break
66 break
67 if not mod:
67 if not mod:
68 raise KeyError(name)
68 raise KeyError(name)
69 return mod
69 return mod
70
70
71 def loadpath(path, module_name):
71 def loadpath(path, module_name):
72 module_name = module_name.replace('.', '_')
72 module_name = module_name.replace('.', '_')
73 path = util.normpath(util.expandpath(path))
73 path = util.normpath(util.expandpath(path))
74 module_name = pycompat.fsdecode(module_name)
74 module_name = pycompat.fsdecode(module_name)
75 path = pycompat.fsdecode(path)
75 path = pycompat.fsdecode(path)
76 if os.path.isdir(path):
76 if os.path.isdir(path):
77 # module/__init__.py style
77 # module/__init__.py style
78 d, f = os.path.split(path)
78 d, f = os.path.split(path)
79 fd, fpath, desc = imp.find_module(f, [d])
79 fd, fpath, desc = imp.find_module(f, [d])
80 return imp.load_module(module_name, fd, fpath, desc)
80 return imp.load_module(module_name, fd, fpath, desc)
81 else:
81 else:
82 try:
82 try:
83 return imp.load_source(module_name, path)
83 return imp.load_source(module_name, path)
84 except IOError as exc:
84 except IOError as exc:
85 if not exc.filename:
85 if not exc.filename:
86 exc.filename = path # python does not fill this
86 exc.filename = path # python does not fill this
87 raise
87 raise
88
88
89 def _importh(name):
89 def _importh(name):
90 """import and return the <name> module"""
90 """import and return the <name> module"""
91 mod = __import__(pycompat.sysstr(name))
91 mod = __import__(pycompat.sysstr(name))
92 components = name.split('.')
92 components = name.split('.')
93 for comp in components[1:]:
93 for comp in components[1:]:
94 mod = getattr(mod, comp)
94 mod = getattr(mod, comp)
95 return mod
95 return mod
96
96
97 def _importext(name, path=None, reportfunc=None):
97 def _importext(name, path=None, reportfunc=None):
98 if path:
98 if path:
99 # the module will be loaded in sys.modules
99 # the module will be loaded in sys.modules
100 # choose an unique name so that it doesn't
100 # choose an unique name so that it doesn't
101 # conflicts with other modules
101 # conflicts with other modules
102 mod = loadpath(path, 'hgext.%s' % name)
102 mod = loadpath(path, 'hgext.%s' % name)
103 else:
103 else:
104 try:
104 try:
105 mod = _importh("hgext.%s" % name)
105 mod = _importh("hgext.%s" % name)
106 except ImportError as err:
106 except ImportError as err:
107 if reportfunc:
107 if reportfunc:
108 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
108 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
109 try:
109 try:
110 mod = _importh("hgext3rd.%s" % name)
110 mod = _importh("hgext3rd.%s" % name)
111 except ImportError as err:
111 except ImportError as err:
112 if reportfunc:
112 if reportfunc:
113 reportfunc(err, "hgext3rd.%s" % name, name)
113 reportfunc(err, "hgext3rd.%s" % name, name)
114 mod = _importh(name)
114 mod = _importh(name)
115 return mod
115 return mod
116
116
117 def _reportimporterror(ui, err, failed, next):
117 def _reportimporterror(ui, err, failed, next):
118 # note: this ui.debug happens before --debug is processed,
118 # note: this ui.debug happens before --debug is processed,
119 # Use --config ui.debug=1 to see them.
119 # Use --config ui.debug=1 to see them.
120 ui.debug('could not import %s (%s): trying %s\n'
120 ui.debug('could not import %s (%s): trying %s\n'
121 % (failed, util.forcebytestr(err), next))
121 % (failed, util.forcebytestr(err), next))
122 if ui.debugflag:
122 if ui.debugflag:
123 ui.traceback()
123 ui.traceback()
124
124
125 # attributes set by registrar.command
125 # attributes set by registrar.command
126 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
126 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
127
127
128 def _validatecmdtable(ui, cmdtable):
128 def _validatecmdtable(ui, cmdtable):
129 """Check if extension commands have required attributes"""
129 """Check if extension commands have required attributes"""
130 for c, e in cmdtable.iteritems():
130 for c, e in cmdtable.iteritems():
131 f = e[0]
131 f = e[0]
132 if getattr(f, '_deprecatedregistrar', False):
132 if getattr(f, '_deprecatedregistrar', False):
133 ui.deprecwarn("cmdutil.command is deprecated, use "
133 ui.deprecwarn("cmdutil.command is deprecated, use "
134 "registrar.command to register '%s'" % c, '4.6')
134 "registrar.command to register '%s'" % c, '4.6')
135 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
135 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
136 if not missing:
136 if not missing:
137 for option in e[1]:
137 for option in e[1]:
138 default = option[2]
138 default = option[2]
139 if isinstance(default, type(u'')):
139 if isinstance(default, type(u'')):
140 raise error.ProgrammingError(
140 raise error.ProgrammingError(
141 "option '%s.%s' has a unicode default value"
141 "option '%s.%s' has a unicode default value"
142 % (c, option[1]),
142 % (c, option[1]),
143 hint=("change the %s.%s default value to a "
143 hint=("change the %s.%s default value to a "
144 "non-unicode string" % (c, option[1])))
144 "non-unicode string" % (c, option[1])))
145 continue
145 continue
146 raise error.ProgrammingError(
146 raise error.ProgrammingError(
147 'missing attributes: %s' % ', '.join(missing),
147 'missing attributes: %s' % ', '.join(missing),
148 hint="use @command decorator to register '%s'" % c)
148 hint="use @command decorator to register '%s'" % c)
149
149
150 def load(ui, name, path):
150 def load(ui, name, path):
151 if name.startswith('hgext.') or name.startswith('hgext/'):
151 if name.startswith('hgext.') or name.startswith('hgext/'):
152 shortname = name[6:]
152 shortname = name[6:]
153 else:
153 else:
154 shortname = name
154 shortname = name
155 if shortname in _builtin:
155 if shortname in _builtin:
156 return None
156 return None
157 if shortname in _extensions:
157 if shortname in _extensions:
158 return _extensions[shortname]
158 return _extensions[shortname]
159 _extensions[shortname] = None
159 _extensions[shortname] = None
160 mod = _importext(name, path, bind(_reportimporterror, ui))
160 mod = _importext(name, path, bind(_reportimporterror, ui))
161
161
162 # Before we do anything with the extension, check against minimum stated
162 # Before we do anything with the extension, check against minimum stated
163 # compatibility. This gives extension authors a mechanism to have their
163 # compatibility. This gives extension authors a mechanism to have their
164 # extensions short circuit when loaded with a known incompatible version
164 # extensions short circuit when loaded with a known incompatible version
165 # of Mercurial.
165 # of Mercurial.
166 minver = getattr(mod, 'minimumhgversion', None)
166 minver = getattr(mod, 'minimumhgversion', None)
167 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
167 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
168 ui.warn(_('(third party extension %s requires version %s or newer '
168 ui.warn(_('(third party extension %s requires version %s or newer '
169 'of Mercurial; disabling)\n') % (shortname, minver))
169 'of Mercurial; disabling)\n') % (shortname, minver))
170 return
170 return
171 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
171 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
172
172
173 _extensions[shortname] = mod
173 _extensions[shortname] = mod
174 _order.append(shortname)
174 _order.append(shortname)
175 for fn in _aftercallbacks.get(shortname, []):
175 for fn in _aftercallbacks.get(shortname, []):
176 fn(loaded=True)
176 fn(loaded=True)
177 return mod
177 return mod
178
178
179 def _runuisetup(name, ui):
179 def _runuisetup(name, ui):
180 uisetup = getattr(_extensions[name], 'uisetup', None)
180 uisetup = getattr(_extensions[name], 'uisetup', None)
181 if uisetup:
181 if uisetup:
182 try:
182 try:
183 uisetup(ui)
183 uisetup(ui)
184 except Exception as inst:
184 except Exception as inst:
185 ui.traceback(force=True)
185 ui.traceback(force=True)
186 msg = util.forcebytestr(inst)
186 msg = util.forcebytestr(inst)
187 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
187 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
188 return False
188 return False
189 return True
189 return True
190
190
191 def _runextsetup(name, ui):
191 def _runextsetup(name, ui):
192 extsetup = getattr(_extensions[name], 'extsetup', None)
192 extsetup = getattr(_extensions[name], 'extsetup', None)
193 if extsetup:
193 if extsetup:
194 try:
194 try:
195 try:
195 try:
196 extsetup(ui)
196 extsetup(ui)
197 except TypeError:
197 except TypeError:
198 # Try to use getfullargspec (Python 3) first, and fall
198 if pycompat.getargspec(extsetup).args:
199 # back to getargspec only if it doesn't exist so as to
200 # avoid warnings.
201 if getattr(inspect, 'getfullargspec',
202 getattr(inspect, 'getargspec'))(extsetup).args:
203 raise
199 raise
204 extsetup() # old extsetup with no ui argument
200 extsetup() # old extsetup with no ui argument
205 except Exception as inst:
201 except Exception as inst:
206 ui.traceback(force=True)
202 ui.traceback(force=True)
207 msg = util.forcebytestr(inst)
203 msg = util.forcebytestr(inst)
208 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
204 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
209 return False
205 return False
210 return True
206 return True
211
207
212 def loadall(ui, whitelist=None):
208 def loadall(ui, whitelist=None):
213 result = ui.configitems("extensions")
209 result = ui.configitems("extensions")
214 if whitelist is not None:
210 if whitelist is not None:
215 result = [(k, v) for (k, v) in result if k in whitelist]
211 result = [(k, v) for (k, v) in result if k in whitelist]
216 newindex = len(_order)
212 newindex = len(_order)
217 for (name, path) in result:
213 for (name, path) in result:
218 if path:
214 if path:
219 if path[0:1] == '!':
215 if path[0:1] == '!':
220 _disabledextensions[name] = path[1:]
216 _disabledextensions[name] = path[1:]
221 continue
217 continue
222 try:
218 try:
223 load(ui, name, path)
219 load(ui, name, path)
224 except Exception as inst:
220 except Exception as inst:
225 msg = util.forcebytestr(inst)
221 msg = util.forcebytestr(inst)
226 if path:
222 if path:
227 ui.warn(_("*** failed to import extension %s from %s: %s\n")
223 ui.warn(_("*** failed to import extension %s from %s: %s\n")
228 % (name, path, msg))
224 % (name, path, msg))
229 else:
225 else:
230 ui.warn(_("*** failed to import extension %s: %s\n")
226 ui.warn(_("*** failed to import extension %s: %s\n")
231 % (name, msg))
227 % (name, msg))
232 if isinstance(inst, error.Hint) and inst.hint:
228 if isinstance(inst, error.Hint) and inst.hint:
233 ui.warn(_("*** (%s)\n") % inst.hint)
229 ui.warn(_("*** (%s)\n") % inst.hint)
234 ui.traceback()
230 ui.traceback()
235 # list of (objname, loadermod, loadername) tuple:
231 # list of (objname, loadermod, loadername) tuple:
236 # - objname is the name of an object in extension module,
232 # - objname is the name of an object in extension module,
237 # from which extra information is loaded
233 # from which extra information is loaded
238 # - loadermod is the module where loader is placed
234 # - loadermod is the module where loader is placed
239 # - loadername is the name of the function,
235 # - loadername is the name of the function,
240 # which takes (ui, extensionname, extraobj) arguments
236 # which takes (ui, extensionname, extraobj) arguments
241 #
237 #
242 # This one is for the list of item that must be run before running any setup
238 # This one is for the list of item that must be run before running any setup
243 earlyextraloaders = [
239 earlyextraloaders = [
244 ('configtable', configitems, 'loadconfigtable'),
240 ('configtable', configitems, 'loadconfigtable'),
245 ]
241 ]
246 _loadextra(ui, newindex, earlyextraloaders)
242 _loadextra(ui, newindex, earlyextraloaders)
247
243
248 broken = set()
244 broken = set()
249 for name in _order[newindex:]:
245 for name in _order[newindex:]:
250 if not _runuisetup(name, ui):
246 if not _runuisetup(name, ui):
251 broken.add(name)
247 broken.add(name)
252
248
253 for name in _order[newindex:]:
249 for name in _order[newindex:]:
254 if name in broken:
250 if name in broken:
255 continue
251 continue
256 if not _runextsetup(name, ui):
252 if not _runextsetup(name, ui):
257 broken.add(name)
253 broken.add(name)
258
254
259 for name in broken:
255 for name in broken:
260 _extensions[name] = None
256 _extensions[name] = None
261
257
262 # Call aftercallbacks that were never met.
258 # Call aftercallbacks that were never met.
263 for shortname in _aftercallbacks:
259 for shortname in _aftercallbacks:
264 if shortname in _extensions:
260 if shortname in _extensions:
265 continue
261 continue
266
262
267 for fn in _aftercallbacks[shortname]:
263 for fn in _aftercallbacks[shortname]:
268 fn(loaded=False)
264 fn(loaded=False)
269
265
270 # loadall() is called multiple times and lingering _aftercallbacks
266 # loadall() is called multiple times and lingering _aftercallbacks
271 # entries could result in double execution. See issue4646.
267 # entries could result in double execution. See issue4646.
272 _aftercallbacks.clear()
268 _aftercallbacks.clear()
273
269
274 # delay importing avoids cyclic dependency (especially commands)
270 # delay importing avoids cyclic dependency (especially commands)
275 from . import (
271 from . import (
276 color,
272 color,
277 commands,
273 commands,
278 filemerge,
274 filemerge,
279 fileset,
275 fileset,
280 revset,
276 revset,
281 templatefilters,
277 templatefilters,
282 templatekw,
278 templatekw,
283 templater,
279 templater,
284 )
280 )
285
281
286 # list of (objname, loadermod, loadername) tuple:
282 # list of (objname, loadermod, loadername) tuple:
287 # - objname is the name of an object in extension module,
283 # - objname is the name of an object in extension module,
288 # from which extra information is loaded
284 # from which extra information is loaded
289 # - loadermod is the module where loader is placed
285 # - loadermod is the module where loader is placed
290 # - loadername is the name of the function,
286 # - loadername is the name of the function,
291 # which takes (ui, extensionname, extraobj) arguments
287 # which takes (ui, extensionname, extraobj) arguments
292 extraloaders = [
288 extraloaders = [
293 ('cmdtable', commands, 'loadcmdtable'),
289 ('cmdtable', commands, 'loadcmdtable'),
294 ('colortable', color, 'loadcolortable'),
290 ('colortable', color, 'loadcolortable'),
295 ('filesetpredicate', fileset, 'loadpredicate'),
291 ('filesetpredicate', fileset, 'loadpredicate'),
296 ('internalmerge', filemerge, 'loadinternalmerge'),
292 ('internalmerge', filemerge, 'loadinternalmerge'),
297 ('revsetpredicate', revset, 'loadpredicate'),
293 ('revsetpredicate', revset, 'loadpredicate'),
298 ('templatefilter', templatefilters, 'loadfilter'),
294 ('templatefilter', templatefilters, 'loadfilter'),
299 ('templatefunc', templater, 'loadfunction'),
295 ('templatefunc', templater, 'loadfunction'),
300 ('templatekeyword', templatekw, 'loadkeyword'),
296 ('templatekeyword', templatekw, 'loadkeyword'),
301 ]
297 ]
302 _loadextra(ui, newindex, extraloaders)
298 _loadextra(ui, newindex, extraloaders)
303
299
304 def _loadextra(ui, newindex, extraloaders):
300 def _loadextra(ui, newindex, extraloaders):
305 for name in _order[newindex:]:
301 for name in _order[newindex:]:
306 module = _extensions[name]
302 module = _extensions[name]
307 if not module:
303 if not module:
308 continue # loading this module failed
304 continue # loading this module failed
309
305
310 for objname, loadermod, loadername in extraloaders:
306 for objname, loadermod, loadername in extraloaders:
311 extraobj = getattr(module, objname, None)
307 extraobj = getattr(module, objname, None)
312 if extraobj is not None:
308 if extraobj is not None:
313 getattr(loadermod, loadername)(ui, name, extraobj)
309 getattr(loadermod, loadername)(ui, name, extraobj)
314
310
315 def afterloaded(extension, callback):
311 def afterloaded(extension, callback):
316 '''Run the specified function after a named extension is loaded.
312 '''Run the specified function after a named extension is loaded.
317
313
318 If the named extension is already loaded, the callback will be called
314 If the named extension is already loaded, the callback will be called
319 immediately.
315 immediately.
320
316
321 If the named extension never loads, the callback will be called after
317 If the named extension never loads, the callback will be called after
322 all extensions have been loaded.
318 all extensions have been loaded.
323
319
324 The callback receives the named argument ``loaded``, which is a boolean
320 The callback receives the named argument ``loaded``, which is a boolean
325 indicating whether the dependent extension actually loaded.
321 indicating whether the dependent extension actually loaded.
326 '''
322 '''
327
323
328 if extension in _extensions:
324 if extension in _extensions:
329 # Report loaded as False if the extension is disabled
325 # Report loaded as False if the extension is disabled
330 loaded = (_extensions[extension] is not None)
326 loaded = (_extensions[extension] is not None)
331 callback(loaded=loaded)
327 callback(loaded=loaded)
332 else:
328 else:
333 _aftercallbacks.setdefault(extension, []).append(callback)
329 _aftercallbacks.setdefault(extension, []).append(callback)
334
330
335 def bind(func, *args):
331 def bind(func, *args):
336 '''Partial function application
332 '''Partial function application
337
333
338 Returns a new function that is the partial application of args and kwargs
334 Returns a new function that is the partial application of args and kwargs
339 to func. For example,
335 to func. For example,
340
336
341 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
337 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
342 assert callable(func)
338 assert callable(func)
343 def closure(*a, **kw):
339 def closure(*a, **kw):
344 return func(*(args + a), **kw)
340 return func(*(args + a), **kw)
345 return closure
341 return closure
346
342
347 def _updatewrapper(wrap, origfn, unboundwrapper):
343 def _updatewrapper(wrap, origfn, unboundwrapper):
348 '''Copy and add some useful attributes to wrapper'''
344 '''Copy and add some useful attributes to wrapper'''
349 try:
345 try:
350 wrap.__name__ = origfn.__name__
346 wrap.__name__ = origfn.__name__
351 except AttributeError:
347 except AttributeError:
352 pass
348 pass
353 wrap.__module__ = getattr(origfn, '__module__')
349 wrap.__module__ = getattr(origfn, '__module__')
354 wrap.__doc__ = getattr(origfn, '__doc__')
350 wrap.__doc__ = getattr(origfn, '__doc__')
355 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
351 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
356 wrap._origfunc = origfn
352 wrap._origfunc = origfn
357 wrap._unboundwrapper = unboundwrapper
353 wrap._unboundwrapper = unboundwrapper
358
354
359 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
355 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
360 '''Wrap the command named `command' in table
356 '''Wrap the command named `command' in table
361
357
362 Replace command in the command table with wrapper. The wrapped command will
358 Replace command in the command table with wrapper. The wrapped command will
363 be inserted into the command table specified by the table argument.
359 be inserted into the command table specified by the table argument.
364
360
365 The wrapper will be called like
361 The wrapper will be called like
366
362
367 wrapper(orig, *args, **kwargs)
363 wrapper(orig, *args, **kwargs)
368
364
369 where orig is the original (wrapped) function, and *args, **kwargs
365 where orig is the original (wrapped) function, and *args, **kwargs
370 are the arguments passed to it.
366 are the arguments passed to it.
371
367
372 Optionally append to the command synopsis and docstring, used for help.
368 Optionally append to the command synopsis and docstring, used for help.
373 For example, if your extension wraps the ``bookmarks`` command to add the
369 For example, if your extension wraps the ``bookmarks`` command to add the
374 flags ``--remote`` and ``--all`` you might call this function like so:
370 flags ``--remote`` and ``--all`` you might call this function like so:
375
371
376 synopsis = ' [-a] [--remote]'
372 synopsis = ' [-a] [--remote]'
377 docstring = """
373 docstring = """
378
374
379 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
375 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
380 flags to the bookmarks command. Either flag will show the remote bookmarks
376 flags to the bookmarks command. Either flag will show the remote bookmarks
381 known to the repository; ``--remote`` will also suppress the output of the
377 known to the repository; ``--remote`` will also suppress the output of the
382 local bookmarks.
378 local bookmarks.
383 """
379 """
384
380
385 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
381 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
386 synopsis, docstring)
382 synopsis, docstring)
387 '''
383 '''
388 assert callable(wrapper)
384 assert callable(wrapper)
389 aliases, entry = cmdutil.findcmd(command, table)
385 aliases, entry = cmdutil.findcmd(command, table)
390 for alias, e in table.iteritems():
386 for alias, e in table.iteritems():
391 if e is entry:
387 if e is entry:
392 key = alias
388 key = alias
393 break
389 break
394
390
395 origfn = entry[0]
391 origfn = entry[0]
396 wrap = functools.partial(util.checksignature(wrapper),
392 wrap = functools.partial(util.checksignature(wrapper),
397 util.checksignature(origfn))
393 util.checksignature(origfn))
398 _updatewrapper(wrap, origfn, wrapper)
394 _updatewrapper(wrap, origfn, wrapper)
399 if docstring is not None:
395 if docstring is not None:
400 wrap.__doc__ += docstring
396 wrap.__doc__ += docstring
401
397
402 newentry = list(entry)
398 newentry = list(entry)
403 newentry[0] = wrap
399 newentry[0] = wrap
404 if synopsis is not None:
400 if synopsis is not None:
405 newentry[2] += synopsis
401 newentry[2] += synopsis
406 table[key] = tuple(newentry)
402 table[key] = tuple(newentry)
407 return entry
403 return entry
408
404
409 def wrapfilecache(cls, propname, wrapper):
405 def wrapfilecache(cls, propname, wrapper):
410 """Wraps a filecache property.
406 """Wraps a filecache property.
411
407
412 These can't be wrapped using the normal wrapfunction.
408 These can't be wrapped using the normal wrapfunction.
413 """
409 """
414 propname = pycompat.sysstr(propname)
410 propname = pycompat.sysstr(propname)
415 assert callable(wrapper)
411 assert callable(wrapper)
416 for currcls in cls.__mro__:
412 for currcls in cls.__mro__:
417 if propname in currcls.__dict__:
413 if propname in currcls.__dict__:
418 origfn = currcls.__dict__[propname].func
414 origfn = currcls.__dict__[propname].func
419 assert callable(origfn)
415 assert callable(origfn)
420 def wrap(*args, **kwargs):
416 def wrap(*args, **kwargs):
421 return wrapper(origfn, *args, **kwargs)
417 return wrapper(origfn, *args, **kwargs)
422 currcls.__dict__[propname].func = wrap
418 currcls.__dict__[propname].func = wrap
423 break
419 break
424
420
425 if currcls is object:
421 if currcls is object:
426 raise AttributeError(r"type '%s' has no property '%s'" % (
422 raise AttributeError(r"type '%s' has no property '%s'" % (
427 cls, propname))
423 cls, propname))
428
424
429 class wrappedfunction(object):
425 class wrappedfunction(object):
430 '''context manager for temporarily wrapping a function'''
426 '''context manager for temporarily wrapping a function'''
431
427
432 def __init__(self, container, funcname, wrapper):
428 def __init__(self, container, funcname, wrapper):
433 assert callable(wrapper)
429 assert callable(wrapper)
434 self._container = container
430 self._container = container
435 self._funcname = funcname
431 self._funcname = funcname
436 self._wrapper = wrapper
432 self._wrapper = wrapper
437
433
438 def __enter__(self):
434 def __enter__(self):
439 wrapfunction(self._container, self._funcname, self._wrapper)
435 wrapfunction(self._container, self._funcname, self._wrapper)
440
436
441 def __exit__(self, exctype, excvalue, traceback):
437 def __exit__(self, exctype, excvalue, traceback):
442 unwrapfunction(self._container, self._funcname, self._wrapper)
438 unwrapfunction(self._container, self._funcname, self._wrapper)
443
439
444 def wrapfunction(container, funcname, wrapper):
440 def wrapfunction(container, funcname, wrapper):
445 '''Wrap the function named funcname in container
441 '''Wrap the function named funcname in container
446
442
447 Replace the funcname member in the given container with the specified
443 Replace the funcname member in the given container with the specified
448 wrapper. The container is typically a module, class, or instance.
444 wrapper. The container is typically a module, class, or instance.
449
445
450 The wrapper will be called like
446 The wrapper will be called like
451
447
452 wrapper(orig, *args, **kwargs)
448 wrapper(orig, *args, **kwargs)
453
449
454 where orig is the original (wrapped) function, and *args, **kwargs
450 where orig is the original (wrapped) function, and *args, **kwargs
455 are the arguments passed to it.
451 are the arguments passed to it.
456
452
457 Wrapping methods of the repository object is not recommended since
453 Wrapping methods of the repository object is not recommended since
458 it conflicts with extensions that extend the repository by
454 it conflicts with extensions that extend the repository by
459 subclassing. All extensions that need to extend methods of
455 subclassing. All extensions that need to extend methods of
460 localrepository should use this subclassing trick: namely,
456 localrepository should use this subclassing trick: namely,
461 reposetup() should look like
457 reposetup() should look like
462
458
463 def reposetup(ui, repo):
459 def reposetup(ui, repo):
464 class myrepo(repo.__class__):
460 class myrepo(repo.__class__):
465 def whatever(self, *args, **kwargs):
461 def whatever(self, *args, **kwargs):
466 [...extension stuff...]
462 [...extension stuff...]
467 super(myrepo, self).whatever(*args, **kwargs)
463 super(myrepo, self).whatever(*args, **kwargs)
468 [...extension stuff...]
464 [...extension stuff...]
469
465
470 repo.__class__ = myrepo
466 repo.__class__ = myrepo
471
467
472 In general, combining wrapfunction() with subclassing does not
468 In general, combining wrapfunction() with subclassing does not
473 work. Since you cannot control what other extensions are loaded by
469 work. Since you cannot control what other extensions are loaded by
474 your end users, you should play nicely with others by using the
470 your end users, you should play nicely with others by using the
475 subclass trick.
471 subclass trick.
476 '''
472 '''
477 assert callable(wrapper)
473 assert callable(wrapper)
478
474
479 origfn = getattr(container, funcname)
475 origfn = getattr(container, funcname)
480 assert callable(origfn)
476 assert callable(origfn)
481 if inspect.ismodule(container):
477 if inspect.ismodule(container):
482 # origfn is not an instance or class method. "partial" can be used.
478 # origfn is not an instance or class method. "partial" can be used.
483 # "partial" won't insert a frame in traceback.
479 # "partial" won't insert a frame in traceback.
484 wrap = functools.partial(wrapper, origfn)
480 wrap = functools.partial(wrapper, origfn)
485 else:
481 else:
486 # "partial" cannot be safely used. Emulate its effect by using "bind".
482 # "partial" cannot be safely used. Emulate its effect by using "bind".
487 # The downside is one more frame in traceback.
483 # The downside is one more frame in traceback.
488 wrap = bind(wrapper, origfn)
484 wrap = bind(wrapper, origfn)
489 _updatewrapper(wrap, origfn, wrapper)
485 _updatewrapper(wrap, origfn, wrapper)
490 setattr(container, funcname, wrap)
486 setattr(container, funcname, wrap)
491 return origfn
487 return origfn
492
488
493 def unwrapfunction(container, funcname, wrapper=None):
489 def unwrapfunction(container, funcname, wrapper=None):
494 '''undo wrapfunction
490 '''undo wrapfunction
495
491
496 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
492 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
497 from the chain of wrappers.
493 from the chain of wrappers.
498
494
499 Return the removed wrapper.
495 Return the removed wrapper.
500 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
496 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
501 wrapper is not None but is not found in the wrapper chain.
497 wrapper is not None but is not found in the wrapper chain.
502 '''
498 '''
503 chain = getwrapperchain(container, funcname)
499 chain = getwrapperchain(container, funcname)
504 origfn = chain.pop()
500 origfn = chain.pop()
505 if wrapper is None:
501 if wrapper is None:
506 wrapper = chain[0]
502 wrapper = chain[0]
507 chain.remove(wrapper)
503 chain.remove(wrapper)
508 setattr(container, funcname, origfn)
504 setattr(container, funcname, origfn)
509 for w in reversed(chain):
505 for w in reversed(chain):
510 wrapfunction(container, funcname, w)
506 wrapfunction(container, funcname, w)
511 return wrapper
507 return wrapper
512
508
513 def getwrapperchain(container, funcname):
509 def getwrapperchain(container, funcname):
514 '''get a chain of wrappers of a function
510 '''get a chain of wrappers of a function
515
511
516 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
512 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
517
513
518 The wrapper functions are the ones passed to wrapfunction, whose first
514 The wrapper functions are the ones passed to wrapfunction, whose first
519 argument is origfunc.
515 argument is origfunc.
520 '''
516 '''
521 result = []
517 result = []
522 fn = getattr(container, funcname)
518 fn = getattr(container, funcname)
523 while fn:
519 while fn:
524 assert callable(fn)
520 assert callable(fn)
525 result.append(getattr(fn, '_unboundwrapper', fn))
521 result.append(getattr(fn, '_unboundwrapper', fn))
526 fn = getattr(fn, '_origfunc', None)
522 fn = getattr(fn, '_origfunc', None)
527 return result
523 return result
528
524
529 def _disabledpaths(strip_init=False):
525 def _disabledpaths(strip_init=False):
530 '''find paths of disabled extensions. returns a dict of {name: path}
526 '''find paths of disabled extensions. returns a dict of {name: path}
531 removes /__init__.py from packages if strip_init is True'''
527 removes /__init__.py from packages if strip_init is True'''
532 import hgext
528 import hgext
533 extpath = os.path.dirname(
529 extpath = os.path.dirname(
534 os.path.abspath(pycompat.fsencode(hgext.__file__)))
530 os.path.abspath(pycompat.fsencode(hgext.__file__)))
535 try: # might not be a filesystem path
531 try: # might not be a filesystem path
536 files = os.listdir(extpath)
532 files = os.listdir(extpath)
537 except OSError:
533 except OSError:
538 return {}
534 return {}
539
535
540 exts = {}
536 exts = {}
541 for e in files:
537 for e in files:
542 if e.endswith('.py'):
538 if e.endswith('.py'):
543 name = e.rsplit('.', 1)[0]
539 name = e.rsplit('.', 1)[0]
544 path = os.path.join(extpath, e)
540 path = os.path.join(extpath, e)
545 else:
541 else:
546 name = e
542 name = e
547 path = os.path.join(extpath, e, '__init__.py')
543 path = os.path.join(extpath, e, '__init__.py')
548 if not os.path.exists(path):
544 if not os.path.exists(path):
549 continue
545 continue
550 if strip_init:
546 if strip_init:
551 path = os.path.dirname(path)
547 path = os.path.dirname(path)
552 if name in exts or name in _order or name == '__init__':
548 if name in exts or name in _order or name == '__init__':
553 continue
549 continue
554 exts[name] = path
550 exts[name] = path
555 for name, path in _disabledextensions.iteritems():
551 for name, path in _disabledextensions.iteritems():
556 # If no path was provided for a disabled extension (e.g. "color=!"),
552 # If no path was provided for a disabled extension (e.g. "color=!"),
557 # don't replace the path we already found by the scan above.
553 # don't replace the path we already found by the scan above.
558 if path:
554 if path:
559 exts[name] = path
555 exts[name] = path
560 return exts
556 return exts
561
557
562 def _moduledoc(file):
558 def _moduledoc(file):
563 '''return the top-level python documentation for the given file
559 '''return the top-level python documentation for the given file
564
560
565 Loosely inspired by pydoc.source_synopsis(), but rewritten to
561 Loosely inspired by pydoc.source_synopsis(), but rewritten to
566 handle triple quotes and to return the whole text instead of just
562 handle triple quotes and to return the whole text instead of just
567 the synopsis'''
563 the synopsis'''
568 result = []
564 result = []
569
565
570 line = file.readline()
566 line = file.readline()
571 while line[:1] == '#' or not line.strip():
567 while line[:1] == '#' or not line.strip():
572 line = file.readline()
568 line = file.readline()
573 if not line:
569 if not line:
574 break
570 break
575
571
576 start = line[:3]
572 start = line[:3]
577 if start == '"""' or start == "'''":
573 if start == '"""' or start == "'''":
578 line = line[3:]
574 line = line[3:]
579 while line:
575 while line:
580 if line.rstrip().endswith(start):
576 if line.rstrip().endswith(start):
581 line = line.split(start)[0]
577 line = line.split(start)[0]
582 if line:
578 if line:
583 result.append(line)
579 result.append(line)
584 break
580 break
585 elif not line:
581 elif not line:
586 return None # unmatched delimiter
582 return None # unmatched delimiter
587 result.append(line)
583 result.append(line)
588 line = file.readline()
584 line = file.readline()
589 else:
585 else:
590 return None
586 return None
591
587
592 return ''.join(result)
588 return ''.join(result)
593
589
594 def _disabledhelp(path):
590 def _disabledhelp(path):
595 '''retrieve help synopsis of a disabled extension (without importing)'''
591 '''retrieve help synopsis of a disabled extension (without importing)'''
596 try:
592 try:
597 file = open(path)
593 file = open(path)
598 except IOError:
594 except IOError:
599 return
595 return
600 else:
596 else:
601 doc = _moduledoc(file)
597 doc = _moduledoc(file)
602 file.close()
598 file.close()
603
599
604 if doc: # extracting localized synopsis
600 if doc: # extracting localized synopsis
605 return gettext(doc)
601 return gettext(doc)
606 else:
602 else:
607 return _('(no help text available)')
603 return _('(no help text available)')
608
604
609 def disabled():
605 def disabled():
610 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
606 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
611 try:
607 try:
612 from hgext import __index__
608 from hgext import __index__
613 return dict((name, gettext(desc))
609 return dict((name, gettext(desc))
614 for name, desc in __index__.docs.iteritems()
610 for name, desc in __index__.docs.iteritems()
615 if name not in _order)
611 if name not in _order)
616 except (ImportError, AttributeError):
612 except (ImportError, AttributeError):
617 pass
613 pass
618
614
619 paths = _disabledpaths()
615 paths = _disabledpaths()
620 if not paths:
616 if not paths:
621 return {}
617 return {}
622
618
623 exts = {}
619 exts = {}
624 for name, path in paths.iteritems():
620 for name, path in paths.iteritems():
625 doc = _disabledhelp(path)
621 doc = _disabledhelp(path)
626 if doc:
622 if doc:
627 exts[name] = doc.splitlines()[0]
623 exts[name] = doc.splitlines()[0]
628
624
629 return exts
625 return exts
630
626
631 def disabledext(name):
627 def disabledext(name):
632 '''find a specific disabled extension from hgext. returns desc'''
628 '''find a specific disabled extension from hgext. returns desc'''
633 try:
629 try:
634 from hgext import __index__
630 from hgext import __index__
635 if name in _order: # enabled
631 if name in _order: # enabled
636 return
632 return
637 else:
633 else:
638 return gettext(__index__.docs.get(name))
634 return gettext(__index__.docs.get(name))
639 except (ImportError, AttributeError):
635 except (ImportError, AttributeError):
640 pass
636 pass
641
637
642 paths = _disabledpaths()
638 paths = _disabledpaths()
643 if name in paths:
639 if name in paths:
644 return _disabledhelp(paths[name])
640 return _disabledhelp(paths[name])
645
641
646 def disabledcmd(ui, cmd, strict=False):
642 def disabledcmd(ui, cmd, strict=False):
647 '''import disabled extensions until cmd is found.
643 '''import disabled extensions until cmd is found.
648 returns (cmdname, extname, module)'''
644 returns (cmdname, extname, module)'''
649
645
650 paths = _disabledpaths(strip_init=True)
646 paths = _disabledpaths(strip_init=True)
651 if not paths:
647 if not paths:
652 raise error.UnknownCommand(cmd)
648 raise error.UnknownCommand(cmd)
653
649
654 def findcmd(cmd, name, path):
650 def findcmd(cmd, name, path):
655 try:
651 try:
656 mod = loadpath(path, 'hgext.%s' % name)
652 mod = loadpath(path, 'hgext.%s' % name)
657 except Exception:
653 except Exception:
658 return
654 return
659 try:
655 try:
660 aliases, entry = cmdutil.findcmd(cmd,
656 aliases, entry = cmdutil.findcmd(cmd,
661 getattr(mod, 'cmdtable', {}), strict)
657 getattr(mod, 'cmdtable', {}), strict)
662 except (error.AmbiguousCommand, error.UnknownCommand):
658 except (error.AmbiguousCommand, error.UnknownCommand):
663 return
659 return
664 except Exception:
660 except Exception:
665 ui.warn(_('warning: error finding commands in %s\n') % path)
661 ui.warn(_('warning: error finding commands in %s\n') % path)
666 ui.traceback()
662 ui.traceback()
667 return
663 return
668 for c in aliases:
664 for c in aliases:
669 if c.startswith(cmd):
665 if c.startswith(cmd):
670 cmd = c
666 cmd = c
671 break
667 break
672 else:
668 else:
673 cmd = aliases[0]
669 cmd = aliases[0]
674 return (cmd, name, mod)
670 return (cmd, name, mod)
675
671
676 ext = None
672 ext = None
677 # first, search for an extension with the same name as the command
673 # first, search for an extension with the same name as the command
678 path = paths.pop(cmd, None)
674 path = paths.pop(cmd, None)
679 if path:
675 if path:
680 ext = findcmd(cmd, cmd, path)
676 ext = findcmd(cmd, cmd, path)
681 if not ext:
677 if not ext:
682 # otherwise, interrogate each extension until there's a match
678 # otherwise, interrogate each extension until there's a match
683 for name, path in paths.iteritems():
679 for name, path in paths.iteritems():
684 ext = findcmd(cmd, name, path)
680 ext = findcmd(cmd, name, path)
685 if ext:
681 if ext:
686 break
682 break
687 if ext and 'DEPRECATED' not in ext.__doc__:
683 if ext and 'DEPRECATED' not in ext.__doc__:
688 return ext
684 return ext
689
685
690 raise error.UnknownCommand(cmd)
686 raise error.UnknownCommand(cmd)
691
687
692 def enabled(shortname=True):
688 def enabled(shortname=True):
693 '''return a dict of {name: desc} of extensions'''
689 '''return a dict of {name: desc} of extensions'''
694 exts = {}
690 exts = {}
695 for ename, ext in extensions():
691 for ename, ext in extensions():
696 doc = (gettext(ext.__doc__) or _('(no help text available)'))
692 doc = (gettext(ext.__doc__) or _('(no help text available)'))
697 if shortname:
693 if shortname:
698 ename = ename.split('.')[-1]
694 ename = ename.split('.')[-1]
699 exts[ename] = doc.splitlines()[0].strip()
695 exts[ename] = doc.splitlines()[0].strip()
700
696
701 return exts
697 return exts
702
698
703 def notloaded():
699 def notloaded():
704 '''return short names of extensions that failed to load'''
700 '''return short names of extensions that failed to load'''
705 return [name for name, mod in _extensions.iteritems() if mod is None]
701 return [name for name, mod in _extensions.iteritems() if mod is None]
706
702
707 def moduleversion(module):
703 def moduleversion(module):
708 '''return version information from given module as a string'''
704 '''return version information from given module as a string'''
709 if (util.safehasattr(module, 'getversion')
705 if (util.safehasattr(module, 'getversion')
710 and callable(module.getversion)):
706 and callable(module.getversion)):
711 version = module.getversion()
707 version = module.getversion()
712 elif util.safehasattr(module, '__version__'):
708 elif util.safehasattr(module, '__version__'):
713 version = module.__version__
709 version = module.__version__
714 else:
710 else:
715 version = ''
711 version = ''
716 if isinstance(version, (list, tuple)):
712 if isinstance(version, (list, tuple)):
717 version = '.'.join(str(o) for o in version)
713 version = '.'.join(str(o) for o in version)
718 return version
714 return version
719
715
720 def ismoduleinternal(module):
716 def ismoduleinternal(module):
721 exttestedwith = getattr(module, 'testedwith', None)
717 exttestedwith = getattr(module, 'testedwith', None)
722 return exttestedwith == "ships-with-hg-core"
718 return exttestedwith == "ships-with-hg-core"
@@ -1,2275 +1,2274 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
13 import os
12 import os
14 import random
13 import random
15 import time
14 import time
16 import weakref
15 import weakref
17
16
18 from .i18n import _
17 from .i18n import _
19 from .node import (
18 from .node import (
20 hex,
19 hex,
21 nullid,
20 nullid,
22 short,
21 short,
23 )
22 )
24 from . import (
23 from . import (
25 bookmarks,
24 bookmarks,
26 branchmap,
25 branchmap,
27 bundle2,
26 bundle2,
28 changegroup,
27 changegroup,
29 changelog,
28 changelog,
30 color,
29 color,
31 context,
30 context,
32 dirstate,
31 dirstate,
33 dirstateguard,
32 dirstateguard,
34 discovery,
33 discovery,
35 encoding,
34 encoding,
36 error,
35 error,
37 exchange,
36 exchange,
38 extensions,
37 extensions,
39 filelog,
38 filelog,
40 hook,
39 hook,
41 lock as lockmod,
40 lock as lockmod,
42 manifest,
41 manifest,
43 match as matchmod,
42 match as matchmod,
44 merge as mergemod,
43 merge as mergemod,
45 mergeutil,
44 mergeutil,
46 namespaces,
45 namespaces,
47 obsolete,
46 obsolete,
48 pathutil,
47 pathutil,
49 peer,
48 peer,
50 phases,
49 phases,
51 pushkey,
50 pushkey,
52 pycompat,
51 pycompat,
53 repository,
52 repository,
54 repoview,
53 repoview,
55 revset,
54 revset,
56 revsetlang,
55 revsetlang,
57 scmutil,
56 scmutil,
58 sparse,
57 sparse,
59 store,
58 store,
60 subrepoutil,
59 subrepoutil,
61 tags as tagsmod,
60 tags as tagsmod,
62 transaction,
61 transaction,
63 txnutil,
62 txnutil,
64 util,
63 util,
65 vfs as vfsmod,
64 vfs as vfsmod,
66 )
65 )
67
66
68 release = lockmod.release
67 release = lockmod.release
69 urlerr = util.urlerr
68 urlerr = util.urlerr
70 urlreq = util.urlreq
69 urlreq = util.urlreq
71
70
72 # set of (path, vfs-location) tuples. vfs-location is:
71 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
72 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
73 # - '' for svfs relative paths
75 _cachedfiles = set()
74 _cachedfiles = set()
76
75
77 class _basefilecache(scmutil.filecache):
76 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
77 """All filecache usage on repo are done for logic that should be unfiltered
79 """
78 """
80 def __get__(self, repo, type=None):
79 def __get__(self, repo, type=None):
81 if repo is None:
80 if repo is None:
82 return self
81 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
83 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
85 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
87
89 class repofilecache(_basefilecache):
88 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
89 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
90 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
91 super(repofilecache, self).__init__(*paths)
93 for path in paths:
92 for path in paths:
94 _cachedfiles.add((path, 'plain'))
93 _cachedfiles.add((path, 'plain'))
95
94
96 def join(self, obj, fname):
95 def join(self, obj, fname):
97 return obj.vfs.join(fname)
96 return obj.vfs.join(fname)
98
97
99 class storecache(_basefilecache):
98 class storecache(_basefilecache):
100 """filecache for files in the store"""
99 """filecache for files in the store"""
101 def __init__(self, *paths):
100 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
101 super(storecache, self).__init__(*paths)
103 for path in paths:
102 for path in paths:
104 _cachedfiles.add((path, ''))
103 _cachedfiles.add((path, ''))
105
104
106 def join(self, obj, fname):
105 def join(self, obj, fname):
107 return obj.sjoin(fname)
106 return obj.sjoin(fname)
108
107
109 def isfilecached(repo, name):
108 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
109 """check if a repo has already cached "name" filecache-ed property
111
110
112 This returns (cachedobj-or-None, iscached) tuple.
111 This returns (cachedobj-or-None, iscached) tuple.
113 """
112 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
114 if not cacheentry:
116 return None, False
115 return None, False
117 return cacheentry.obj, True
116 return cacheentry.obj, True
118
117
119 class unfilteredpropertycache(util.propertycache):
118 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
119 """propertycache that apply to unfiltered repo only"""
121
120
122 def __get__(self, repo, type=None):
121 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
122 unfi = repo.unfiltered()
124 if unfi is repo:
123 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
124 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
125 return getattr(unfi, self.name)
127
126
128 class filteredpropertycache(util.propertycache):
127 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
128 """propertycache that must take filtering in account"""
130
129
131 def cachevalue(self, obj, value):
130 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
131 object.__setattr__(obj, self.name, value)
133
132
134
133
135 def hasunfilteredcache(repo, name):
134 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
135 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
136 return name in vars(repo.unfiltered())
138
137
139 def unfilteredmethod(orig):
138 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
139 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
140 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
141 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
142 return wrapper
144
143
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
145 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
146 legacycaps = moderncaps.union({'changegroupsubset'})
148
147
149 class localpeer(repository.peer):
148 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
149 '''peer for a local repo; reflects only the most recent API'''
151
150
152 def __init__(self, repo, caps=None):
151 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
152 super(localpeer, self).__init__()
154
153
155 if caps is None:
154 if caps is None:
156 caps = moderncaps.copy()
155 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
156 self._repo = repo.filtered('served')
158 self._ui = repo.ui
157 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
158 self._caps = repo._restrictcapabilities(caps)
160
159
161 # Begin of _basepeer interface.
160 # Begin of _basepeer interface.
162
161
163 @util.propertycache
162 @util.propertycache
164 def ui(self):
163 def ui(self):
165 return self._ui
164 return self._ui
166
165
167 def url(self):
166 def url(self):
168 return self._repo.url()
167 return self._repo.url()
169
168
170 def local(self):
169 def local(self):
171 return self._repo
170 return self._repo
172
171
173 def peer(self):
172 def peer(self):
174 return self
173 return self
175
174
176 def canpush(self):
175 def canpush(self):
177 return True
176 return True
178
177
179 def close(self):
178 def close(self):
180 self._repo.close()
179 self._repo.close()
181
180
182 # End of _basepeer interface.
181 # End of _basepeer interface.
183
182
184 # Begin of _basewirecommands interface.
183 # Begin of _basewirecommands interface.
185
184
186 def branchmap(self):
185 def branchmap(self):
187 return self._repo.branchmap()
186 return self._repo.branchmap()
188
187
189 def capabilities(self):
188 def capabilities(self):
190 return self._caps
189 return self._caps
191
190
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
192 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
193 return "%s %s %s %s %s" % (one, two, three, four, five)
195
194
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
196 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
198 common=common, bundlecaps=bundlecaps,
200 **kwargs)[1]
199 **kwargs)[1]
201 cb = util.chunkbuffer(chunks)
200 cb = util.chunkbuffer(chunks)
202
201
203 if exchange.bundle2requested(bundlecaps):
202 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
203 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
204 # wire level function happier. We need to build a proper object
206 # from it in local peer.
205 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
206 return bundle2.getunbundler(self.ui, cb)
208 else:
207 else:
209 return changegroup.getunbundler('01', cb, None)
208 return changegroup.getunbundler('01', cb, None)
210
209
211 def heads(self):
210 def heads(self):
212 return self._repo.heads()
211 return self._repo.heads()
213
212
214 def known(self, nodes):
213 def known(self, nodes):
215 return self._repo.known(nodes)
214 return self._repo.known(nodes)
216
215
217 def listkeys(self, namespace):
216 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
219
218
220 def lookup(self, key):
219 def lookup(self, key):
221 return self._repo.lookup(key)
220 return self._repo.lookup(key)
222
221
223 def pushkey(self, namespace, key, old, new):
222 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
223 return self._repo.pushkey(namespace, key, old, new)
225
224
226 def stream_out(self):
225 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
226 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
227 'peer'))
229
228
230 def unbundle(self, cg, heads, url):
229 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
230 """apply a bundle on a repo
232
231
233 This function handles the repo locking itself."""
232 This function handles the repo locking itself."""
234 try:
233 try:
235 try:
234 try:
236 cg = exchange.readbundle(self.ui, cg, None)
235 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
237 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
238 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
239 # This little dance should be dropped eventually when the
241 # API is finally improved.
240 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
241 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
242 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
243 return ret
245 except Exception as exc:
244 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
245 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
246 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
247 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
248 # it directly.
250 #
249 #
251 # This is not very elegant but allows a "simple" solution for
250 # This is not very elegant but allows a "simple" solution for
252 # issue4594
251 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
252 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
253 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
254 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
255 for out in output:
257 bundler.addpart(out)
256 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
257 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
258 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
259 bundle2.processbundle(self._repo, b)
261 raise
260 raise
262 except error.PushRaced as exc:
261 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
262 raise error.ResponseError(_('push failed:'), str(exc))
264
263
265 # End of _basewirecommands interface.
264 # End of _basewirecommands interface.
266
265
267 # Begin of peer interface.
266 # Begin of peer interface.
268
267
269 def iterbatch(self):
268 def iterbatch(self):
270 return peer.localiterbatcher(self)
269 return peer.localiterbatcher(self)
271
270
272 # End of peer interface.
271 # End of peer interface.
273
272
274 class locallegacypeer(repository.legacypeer, localpeer):
273 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
274 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
275 restricted capabilities'''
277
276
278 def __init__(self, repo):
277 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
279
281 # Begin of baselegacywirecommands interface.
280 # Begin of baselegacywirecommands interface.
282
281
283 def between(self, pairs):
282 def between(self, pairs):
284 return self._repo.between(pairs)
283 return self._repo.between(pairs)
285
284
286 def branches(self, nodes):
285 def branches(self, nodes):
287 return self._repo.branches(nodes)
286 return self._repo.branches(nodes)
288
287
289 def changegroup(self, basenodes, source):
288 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
289 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
290 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
291 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
292
294 def changegroupsubset(self, bases, heads, source):
293 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
294 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
295 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
296 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
297
299 # End of baselegacywirecommands interface.
298 # End of baselegacywirecommands interface.
300
299
301 # Increment the sub-version when the revlog v2 format changes to lock out old
300 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
301 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
302 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
303
305 class localrepository(object):
304 class localrepository(object):
306
305
307 supportedformats = {
306 supportedformats = {
308 'revlogv1',
307 'revlogv1',
309 'generaldelta',
308 'generaldelta',
310 'treemanifest',
309 'treemanifest',
311 'manifestv2',
310 'manifestv2',
312 REVLOGV2_REQUIREMENT,
311 REVLOGV2_REQUIREMENT,
313 }
312 }
314 _basesupported = supportedformats | {
313 _basesupported = supportedformats | {
315 'store',
314 'store',
316 'fncache',
315 'fncache',
317 'shared',
316 'shared',
318 'relshared',
317 'relshared',
319 'dotencode',
318 'dotencode',
320 'exp-sparse',
319 'exp-sparse',
321 }
320 }
322 openerreqs = {
321 openerreqs = {
323 'revlogv1',
322 'revlogv1',
324 'generaldelta',
323 'generaldelta',
325 'treemanifest',
324 'treemanifest',
326 'manifestv2',
325 'manifestv2',
327 }
326 }
328
327
329 # a list of (ui, featureset) functions.
328 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
329 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
330 featuresetupfuncs = set()
332
331
333 # list of prefix for file which can be written without 'wlock'
332 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
333 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
334 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
335 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
336 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
337 # wlock is not needed so we keep them excluded for
339 # now.
338 # now.
340 'hgrc',
339 'hgrc',
341 'requires',
340 'requires',
342 # XXX cache is a complicatged business someone
341 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
342 # should investigate this in depth at some point
344 'cache/',
343 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
344 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
345 'dirstate',
347 # XXX bisect was still a bit too messy at the time
346 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
347 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
348 # the remainig bit and drop this line
350 'bisect.state',
349 'bisect.state',
351 }
350 }
352
351
353 def __init__(self, baseui, path, create=False):
352 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
353 self.requirements = set()
355 self.filtername = None
354 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
355 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
356 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
357 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
358 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
359 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
360 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
361 # repository's .hg/store directory.
363 self.svfs = None
362 self.svfs = None
364 self.root = self.wvfs.base
363 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
364 self.path = self.wvfs.join(".hg")
366 self.origroot = path
365 self.origroot = path
367 # This is only used by context.workingctx.match in order to
366 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
367 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
368 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
369 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
370 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
371 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
372 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
373 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
374 self.baseui = baseui
376 self.ui = baseui.copy()
375 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
376 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
377 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
378 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
379 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
380 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
381 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
382 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
383 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
384 self._phasedefaults = []
386 try:
385 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
386 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
387 self._loadextensions()
389 except IOError:
388 except IOError:
390 pass
389 pass
391
390
392 if self.featuresetupfuncs:
391 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
392 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
393 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
394 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
395 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
396 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
397 setupfunc(self.ui, self.supported)
399 else:
398 else:
400 self.supported = self._basesupported
399 self.supported = self._basesupported
401 color.setup(self.ui)
400 color.setup(self.ui)
402
401
403 # Add compression engines.
402 # Add compression engines.
404 for name in util.compengines:
403 for name in util.compengines:
405 engine = util.compengines[name]
404 engine = util.compengines[name]
406 if engine.revlogheader():
405 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
406 self.supported.add('exp-compression-%s' % name)
408
407
409 if not self.vfs.isdir():
408 if not self.vfs.isdir():
410 if create:
409 if create:
411 self.requirements = newreporequirements(self)
410 self.requirements = newreporequirements(self)
412
411
413 if not self.wvfs.exists():
412 if not self.wvfs.exists():
414 self.wvfs.makedirs()
413 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
414 self.vfs.makedir(notindexed=True)
416
415
417 if 'store' in self.requirements:
416 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
417 self.vfs.mkdir("store")
419
418
420 # create an invalid changelog
419 # create an invalid changelog
421 self.vfs.append(
420 self.vfs.append(
422 "00changelog.i",
421 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
422 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
423 ' dummy changelog to prevent using the old repo layout'
425 )
424 )
426 else:
425 else:
427 raise error.RepoError(_("repository %s not found") % path)
426 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
427 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
428 raise error.RepoError(_("repository %s already exists") % path)
430 else:
429 else:
431 try:
430 try:
432 self.requirements = scmutil.readrequires(
431 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
432 self.vfs, self.supported)
434 except IOError as inst:
433 except IOError as inst:
435 if inst.errno != errno.ENOENT:
434 if inst.errno != errno.ENOENT:
436 raise
435 raise
437
436
438 cachepath = self.vfs.join('cache')
437 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
438 self.sharedpath = self.path
440 try:
439 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
440 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
441 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
442 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
443 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
444 cachepath = vfs.join('cache')
446 s = vfs.base
445 s = vfs.base
447 if not vfs.exists():
446 if not vfs.exists():
448 raise error.RepoError(
447 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
448 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
449 self.sharedpath = s
451 except IOError as inst:
450 except IOError as inst:
452 if inst.errno != errno.ENOENT:
451 if inst.errno != errno.ENOENT:
453 raise
452 raise
454
453
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
454 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
455 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
456 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
457 '"sparse" extensions to access'))
459
458
460 self.store = store.store(
459 self.store = store.store(
461 self.requirements, self.sharedpath,
460 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
461 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
462 self.spath = self.store.path
464 self.svfs = self.store.vfs
463 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
464 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
465 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
466 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
467 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
468 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
469 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
470 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
471 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
472 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
473 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
474 self._applyopenerreqs()
476 if create:
475 if create:
477 self._writerequirements()
476 self._writerequirements()
478
477
479 self._dirstatevalidatewarned = False
478 self._dirstatevalidatewarned = False
480
479
481 self._branchcaches = {}
480 self._branchcaches = {}
482 self._revbranchcache = None
481 self._revbranchcache = None
483 self.filterpats = {}
482 self.filterpats = {}
484 self._datafilters = {}
483 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
484 self._transref = self._lockref = self._wlockref = None
486
485
487 # A cache for various files under .hg/ that tracks file changes,
486 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
487 # (used by the filecache decorator)
489 #
488 #
490 # Maps a property name to its util.filecacheentry
489 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
490 self._filecache = {}
492
491
493 # hold sets of revision to be filtered
492 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
493 # should be cleared when something might have changed the filter value:
495 # - new changesets,
494 # - new changesets,
496 # - phase change,
495 # - phase change,
497 # - new obsolescence marker,
496 # - new obsolescence marker,
498 # - working directory parent change,
497 # - working directory parent change,
499 # - bookmark changes
498 # - bookmark changes
500 self.filteredrevcache = {}
499 self.filteredrevcache = {}
501
500
502 # post-dirstate-status hooks
501 # post-dirstate-status hooks
503 self._postdsstatus = []
502 self._postdsstatus = []
504
503
505 # generic mapping between names and nodes
504 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
505 self.names = namespaces.namespaces()
507
506
508 # Key to signature value.
507 # Key to signature value.
509 self._sparsesignaturecache = {}
508 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
509 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
510 self._sparsematchercache = {}
512
511
513 def _getvfsward(self, origfunc):
512 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
513 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
514 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
515 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
516 ret = origfunc(path, mode=mode)
518 repo = rref()
517 repo = rref()
519 if (repo is None
518 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
519 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
520 or not util.safehasattr(repo, '_lockref')):
522 return
521 return
523 if mode in (None, 'r', 'rb'):
522 if mode in (None, 'r', 'rb'):
524 return
523 return
525 if path.startswith(repo.path):
524 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
525 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
526 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
527 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
528 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
529 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
530 if path.startswith('journal.'):
532 # journal is covered by 'lock'
531 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
532 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
533 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
534 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
535 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
536 # rest of vfs files are covered by 'wlock'
538 #
537 #
539 # exclude special files
538 # exclude special files
540 for prefix in self._wlockfreeprefix:
539 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
540 if path.startswith(prefix):
542 return
541 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
542 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
543 stacklevel=2, config='check-locks')
545 return ret
544 return ret
546 return checkvfs
545 return checkvfs
547
546
548 def _getsvfsward(self, origfunc):
547 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
548 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
549 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
550 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
551 ret = origfunc(path, mode=mode)
553 repo = rref()
552 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
553 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
554 return
556 if mode in (None, 'r', 'rb'):
555 if mode in (None, 'r', 'rb'):
557 return
556 return
558 if path.startswith(repo.sharedpath):
557 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
558 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
559 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
560 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
561 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
562 stacklevel=3)
564 return ret
563 return ret
565 return checksvfs
564 return checksvfs
566
565
567 def close(self):
566 def close(self):
568 self._writecaches()
567 self._writecaches()
569
568
570 def _loadextensions(self):
569 def _loadextensions(self):
571 extensions.loadall(self.ui)
570 extensions.loadall(self.ui)
572
571
573 def _writecaches(self):
572 def _writecaches(self):
574 if self._revbranchcache:
573 if self._revbranchcache:
575 self._revbranchcache.write()
574 self._revbranchcache.write()
576
575
577 def _restrictcapabilities(self, caps):
576 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
577 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
578 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
579 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
581 role='client'))
580 role='client'))
582 caps.add('bundle2=' + urlreq.quote(capsblob))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
583 return caps
582 return caps
584
583
585 def _applyopenerreqs(self):
584 def _applyopenerreqs(self):
586 self.svfs.options = dict((r, 1) for r in self.requirements
585 self.svfs.options = dict((r, 1) for r in self.requirements
587 if r in self.openerreqs)
586 if r in self.openerreqs)
588 # experimental config: format.chunkcachesize
587 # experimental config: format.chunkcachesize
589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
590 if chunkcachesize is not None:
589 if chunkcachesize is not None:
591 self.svfs.options['chunkcachesize'] = chunkcachesize
590 self.svfs.options['chunkcachesize'] = chunkcachesize
592 # experimental config: format.maxchainlen
591 # experimental config: format.maxchainlen
593 maxchainlen = self.ui.configint('format', 'maxchainlen')
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
594 if maxchainlen is not None:
593 if maxchainlen is not None:
595 self.svfs.options['maxchainlen'] = maxchainlen
594 self.svfs.options['maxchainlen'] = maxchainlen
596 # experimental config: format.manifestcachesize
595 # experimental config: format.manifestcachesize
597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
598 if manifestcachesize is not None:
597 if manifestcachesize is not None:
599 self.svfs.options['manifestcachesize'] = manifestcachesize
598 self.svfs.options['manifestcachesize'] = manifestcachesize
600 # experimental config: format.aggressivemergedeltas
599 # experimental config: format.aggressivemergedeltas
601 aggressivemergedeltas = self.ui.configbool('format',
600 aggressivemergedeltas = self.ui.configbool('format',
602 'aggressivemergedeltas')
601 'aggressivemergedeltas')
603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
606 if 0 <= chainspan:
605 if 0 <= chainspan:
607 self.svfs.options['maxdeltachainspan'] = chainspan
606 self.svfs.options['maxdeltachainspan'] = chainspan
608 mmapindexthreshold = self.ui.configbytes('experimental',
607 mmapindexthreshold = self.ui.configbytes('experimental',
609 'mmapindexthreshold')
608 'mmapindexthreshold')
610 if mmapindexthreshold is not None:
609 if mmapindexthreshold is not None:
611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
613 srdensitythres = float(self.ui.config('experimental',
612 srdensitythres = float(self.ui.config('experimental',
614 'sparse-read.density-threshold'))
613 'sparse-read.density-threshold'))
615 srmingapsize = self.ui.configbytes('experimental',
614 srmingapsize = self.ui.configbytes('experimental',
616 'sparse-read.min-gap-size')
615 'sparse-read.min-gap-size')
617 self.svfs.options['with-sparse-read'] = withsparseread
616 self.svfs.options['with-sparse-read'] = withsparseread
618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
620
619
621 for r in self.requirements:
620 for r in self.requirements:
622 if r.startswith('exp-compression-'):
621 if r.startswith('exp-compression-'):
623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
624
623
625 # TODO move "revlogv2" to openerreqs once finalized.
624 # TODO move "revlogv2" to openerreqs once finalized.
626 if REVLOGV2_REQUIREMENT in self.requirements:
625 if REVLOGV2_REQUIREMENT in self.requirements:
627 self.svfs.options['revlogv2'] = True
626 self.svfs.options['revlogv2'] = True
628
627
629 def _writerequirements(self):
628 def _writerequirements(self):
630 scmutil.writerequires(self.vfs, self.requirements)
629 scmutil.writerequires(self.vfs, self.requirements)
631
630
632 def _checknested(self, path):
631 def _checknested(self, path):
633 """Determine if path is a legal nested repository."""
632 """Determine if path is a legal nested repository."""
634 if not path.startswith(self.root):
633 if not path.startswith(self.root):
635 return False
634 return False
636 subpath = path[len(self.root) + 1:]
635 subpath = path[len(self.root) + 1:]
637 normsubpath = util.pconvert(subpath)
636 normsubpath = util.pconvert(subpath)
638
637
639 # XXX: Checking against the current working copy is wrong in
638 # XXX: Checking against the current working copy is wrong in
640 # the sense that it can reject things like
639 # the sense that it can reject things like
641 #
640 #
642 # $ hg cat -r 10 sub/x.txt
641 # $ hg cat -r 10 sub/x.txt
643 #
642 #
644 # if sub/ is no longer a subrepository in the working copy
643 # if sub/ is no longer a subrepository in the working copy
645 # parent revision.
644 # parent revision.
646 #
645 #
647 # However, it can of course also allow things that would have
646 # However, it can of course also allow things that would have
648 # been rejected before, such as the above cat command if sub/
647 # been rejected before, such as the above cat command if sub/
649 # is a subrepository now, but was a normal directory before.
648 # is a subrepository now, but was a normal directory before.
650 # The old path auditor would have rejected by mistake since it
649 # The old path auditor would have rejected by mistake since it
651 # panics when it sees sub/.hg/.
650 # panics when it sees sub/.hg/.
652 #
651 #
653 # All in all, checking against the working copy seems sensible
652 # All in all, checking against the working copy seems sensible
654 # since we want to prevent access to nested repositories on
653 # since we want to prevent access to nested repositories on
655 # the filesystem *now*.
654 # the filesystem *now*.
656 ctx = self[None]
655 ctx = self[None]
657 parts = util.splitpath(subpath)
656 parts = util.splitpath(subpath)
658 while parts:
657 while parts:
659 prefix = '/'.join(parts)
658 prefix = '/'.join(parts)
660 if prefix in ctx.substate:
659 if prefix in ctx.substate:
661 if prefix == normsubpath:
660 if prefix == normsubpath:
662 return True
661 return True
663 else:
662 else:
664 sub = ctx.sub(prefix)
663 sub = ctx.sub(prefix)
665 return sub.checknested(subpath[len(prefix) + 1:])
664 return sub.checknested(subpath[len(prefix) + 1:])
666 else:
665 else:
667 parts.pop()
666 parts.pop()
668 return False
667 return False
669
668
670 def peer(self):
669 def peer(self):
671 return localpeer(self) # not cached to avoid reference cycle
670 return localpeer(self) # not cached to avoid reference cycle
672
671
673 def unfiltered(self):
672 def unfiltered(self):
674 """Return unfiltered version of the repository
673 """Return unfiltered version of the repository
675
674
676 Intended to be overwritten by filtered repo."""
675 Intended to be overwritten by filtered repo."""
677 return self
676 return self
678
677
679 def filtered(self, name, visibilityexceptions=None):
678 def filtered(self, name, visibilityexceptions=None):
680 """Return a filtered version of a repository"""
679 """Return a filtered version of a repository"""
681 cls = repoview.newtype(self.unfiltered().__class__)
680 cls = repoview.newtype(self.unfiltered().__class__)
682 return cls(self, name, visibilityexceptions)
681 return cls(self, name, visibilityexceptions)
683
682
684 @repofilecache('bookmarks', 'bookmarks.current')
683 @repofilecache('bookmarks', 'bookmarks.current')
685 def _bookmarks(self):
684 def _bookmarks(self):
686 return bookmarks.bmstore(self)
685 return bookmarks.bmstore(self)
687
686
688 @property
687 @property
689 def _activebookmark(self):
688 def _activebookmark(self):
690 return self._bookmarks.active
689 return self._bookmarks.active
691
690
692 # _phasesets depend on changelog. what we need is to call
691 # _phasesets depend on changelog. what we need is to call
693 # _phasecache.invalidate() if '00changelog.i' was changed, but it
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
694 # can't be easily expressed in filecache mechanism.
693 # can't be easily expressed in filecache mechanism.
695 @storecache('phaseroots', '00changelog.i')
694 @storecache('phaseroots', '00changelog.i')
696 def _phasecache(self):
695 def _phasecache(self):
697 return phases.phasecache(self, self._phasedefaults)
696 return phases.phasecache(self, self._phasedefaults)
698
697
699 @storecache('obsstore')
698 @storecache('obsstore')
700 def obsstore(self):
699 def obsstore(self):
701 return obsolete.makestore(self.ui, self)
700 return obsolete.makestore(self.ui, self)
702
701
703 @storecache('00changelog.i')
702 @storecache('00changelog.i')
704 def changelog(self):
703 def changelog(self):
705 return changelog.changelog(self.svfs,
704 return changelog.changelog(self.svfs,
706 trypending=txnutil.mayhavepending(self.root))
705 trypending=txnutil.mayhavepending(self.root))
707
706
708 def _constructmanifest(self):
707 def _constructmanifest(self):
709 # This is a temporary function while we migrate from manifest to
708 # This is a temporary function while we migrate from manifest to
710 # manifestlog. It allows bundlerepo and unionrepo to intercept the
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
711 # manifest creation.
710 # manifest creation.
712 return manifest.manifestrevlog(self.svfs)
711 return manifest.manifestrevlog(self.svfs)
713
712
714 @storecache('00manifest.i')
713 @storecache('00manifest.i')
715 def manifestlog(self):
714 def manifestlog(self):
716 return manifest.manifestlog(self.svfs, self)
715 return manifest.manifestlog(self.svfs, self)
717
716
718 @repofilecache('dirstate')
717 @repofilecache('dirstate')
719 def dirstate(self):
718 def dirstate(self):
720 sparsematchfn = lambda: sparse.matcher(self)
719 sparsematchfn = lambda: sparse.matcher(self)
721
720
722 return dirstate.dirstate(self.vfs, self.ui, self.root,
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
723 self._dirstatevalidate, sparsematchfn)
722 self._dirstatevalidate, sparsematchfn)
724
723
725 def _dirstatevalidate(self, node):
724 def _dirstatevalidate(self, node):
726 try:
725 try:
727 self.changelog.rev(node)
726 self.changelog.rev(node)
728 return node
727 return node
729 except error.LookupError:
728 except error.LookupError:
730 if not self._dirstatevalidatewarned:
729 if not self._dirstatevalidatewarned:
731 self._dirstatevalidatewarned = True
730 self._dirstatevalidatewarned = True
732 self.ui.warn(_("warning: ignoring unknown"
731 self.ui.warn(_("warning: ignoring unknown"
733 " working parent %s!\n") % short(node))
732 " working parent %s!\n") % short(node))
734 return nullid
733 return nullid
735
734
736 def __getitem__(self, changeid):
735 def __getitem__(self, changeid):
737 if changeid is None:
736 if changeid is None:
738 return context.workingctx(self)
737 return context.workingctx(self)
739 if isinstance(changeid, slice):
738 if isinstance(changeid, slice):
740 # wdirrev isn't contiguous so the slice shouldn't include it
739 # wdirrev isn't contiguous so the slice shouldn't include it
741 return [context.changectx(self, i)
740 return [context.changectx(self, i)
742 for i in xrange(*changeid.indices(len(self)))
741 for i in xrange(*changeid.indices(len(self)))
743 if i not in self.changelog.filteredrevs]
742 if i not in self.changelog.filteredrevs]
744 try:
743 try:
745 return context.changectx(self, changeid)
744 return context.changectx(self, changeid)
746 except error.WdirUnsupported:
745 except error.WdirUnsupported:
747 return context.workingctx(self)
746 return context.workingctx(self)
748
747
749 def __contains__(self, changeid):
748 def __contains__(self, changeid):
750 """True if the given changeid exists
749 """True if the given changeid exists
751
750
752 error.LookupError is raised if an ambiguous node specified.
751 error.LookupError is raised if an ambiguous node specified.
753 """
752 """
754 try:
753 try:
755 self[changeid]
754 self[changeid]
756 return True
755 return True
757 except error.RepoLookupError:
756 except error.RepoLookupError:
758 return False
757 return False
759
758
760 def __nonzero__(self):
759 def __nonzero__(self):
761 return True
760 return True
762
761
763 __bool__ = __nonzero__
762 __bool__ = __nonzero__
764
763
765 def __len__(self):
764 def __len__(self):
766 # no need to pay the cost of repoview.changelog
765 # no need to pay the cost of repoview.changelog
767 unfi = self.unfiltered()
766 unfi = self.unfiltered()
768 return len(unfi.changelog)
767 return len(unfi.changelog)
769
768
770 def __iter__(self):
769 def __iter__(self):
771 return iter(self.changelog)
770 return iter(self.changelog)
772
771
773 def revs(self, expr, *args):
772 def revs(self, expr, *args):
774 '''Find revisions matching a revset.
773 '''Find revisions matching a revset.
775
774
776 The revset is specified as a string ``expr`` that may contain
775 The revset is specified as a string ``expr`` that may contain
777 %-formatting to escape certain types. See ``revsetlang.formatspec``.
776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
778
777
779 Revset aliases from the configuration are not expanded. To expand
778 Revset aliases from the configuration are not expanded. To expand
780 user aliases, consider calling ``scmutil.revrange()`` or
779 user aliases, consider calling ``scmutil.revrange()`` or
781 ``repo.anyrevs([expr], user=True)``.
780 ``repo.anyrevs([expr], user=True)``.
782
781
783 Returns a revset.abstractsmartset, which is a list-like interface
782 Returns a revset.abstractsmartset, which is a list-like interface
784 that contains integer revisions.
783 that contains integer revisions.
785 '''
784 '''
786 expr = revsetlang.formatspec(expr, *args)
785 expr = revsetlang.formatspec(expr, *args)
787 m = revset.match(None, expr)
786 m = revset.match(None, expr)
788 return m(self)
787 return m(self)
789
788
790 def set(self, expr, *args):
789 def set(self, expr, *args):
791 '''Find revisions matching a revset and emit changectx instances.
790 '''Find revisions matching a revset and emit changectx instances.
792
791
793 This is a convenience wrapper around ``revs()`` that iterates the
792 This is a convenience wrapper around ``revs()`` that iterates the
794 result and is a generator of changectx instances.
793 result and is a generator of changectx instances.
795
794
796 Revset aliases from the configuration are not expanded. To expand
795 Revset aliases from the configuration are not expanded. To expand
797 user aliases, consider calling ``scmutil.revrange()``.
796 user aliases, consider calling ``scmutil.revrange()``.
798 '''
797 '''
799 for r in self.revs(expr, *args):
798 for r in self.revs(expr, *args):
800 yield self[r]
799 yield self[r]
801
800
802 def anyrevs(self, specs, user=False, localalias=None):
801 def anyrevs(self, specs, user=False, localalias=None):
803 '''Find revisions matching one of the given revsets.
802 '''Find revisions matching one of the given revsets.
804
803
805 Revset aliases from the configuration are not expanded by default. To
804 Revset aliases from the configuration are not expanded by default. To
806 expand user aliases, specify ``user=True``. To provide some local
805 expand user aliases, specify ``user=True``. To provide some local
807 definitions overriding user aliases, set ``localalias`` to
806 definitions overriding user aliases, set ``localalias`` to
808 ``{name: definitionstring}``.
807 ``{name: definitionstring}``.
809 '''
808 '''
810 if user:
809 if user:
811 m = revset.matchany(self.ui, specs, repo=self,
810 m = revset.matchany(self.ui, specs, repo=self,
812 localalias=localalias)
811 localalias=localalias)
813 else:
812 else:
814 m = revset.matchany(None, specs, localalias=localalias)
813 m = revset.matchany(None, specs, localalias=localalias)
815 return m(self)
814 return m(self)
816
815
817 def url(self):
816 def url(self):
818 return 'file:' + self.root
817 return 'file:' + self.root
819
818
820 def hook(self, name, throw=False, **args):
819 def hook(self, name, throw=False, **args):
821 """Call a hook, passing this repo instance.
820 """Call a hook, passing this repo instance.
822
821
823 This a convenience method to aid invoking hooks. Extensions likely
822 This a convenience method to aid invoking hooks. Extensions likely
824 won't call this unless they have registered a custom hook or are
823 won't call this unless they have registered a custom hook or are
825 replacing code that is expected to call a hook.
824 replacing code that is expected to call a hook.
826 """
825 """
827 return hook.hook(self.ui, self, name, throw, **args)
826 return hook.hook(self.ui, self, name, throw, **args)
828
827
829 @filteredpropertycache
828 @filteredpropertycache
830 def _tagscache(self):
829 def _tagscache(self):
831 '''Returns a tagscache object that contains various tags related
830 '''Returns a tagscache object that contains various tags related
832 caches.'''
831 caches.'''
833
832
834 # This simplifies its cache management by having one decorated
833 # This simplifies its cache management by having one decorated
835 # function (this one) and the rest simply fetch things from it.
834 # function (this one) and the rest simply fetch things from it.
836 class tagscache(object):
835 class tagscache(object):
837 def __init__(self):
836 def __init__(self):
838 # These two define the set of tags for this repository. tags
837 # These two define the set of tags for this repository. tags
839 # maps tag name to node; tagtypes maps tag name to 'global' or
838 # maps tag name to node; tagtypes maps tag name to 'global' or
840 # 'local'. (Global tags are defined by .hgtags across all
839 # 'local'. (Global tags are defined by .hgtags across all
841 # heads, and local tags are defined in .hg/localtags.)
840 # heads, and local tags are defined in .hg/localtags.)
842 # They constitute the in-memory cache of tags.
841 # They constitute the in-memory cache of tags.
843 self.tags = self.tagtypes = None
842 self.tags = self.tagtypes = None
844
843
845 self.nodetagscache = self.tagslist = None
844 self.nodetagscache = self.tagslist = None
846
845
847 cache = tagscache()
846 cache = tagscache()
848 cache.tags, cache.tagtypes = self._findtags()
847 cache.tags, cache.tagtypes = self._findtags()
849
848
850 return cache
849 return cache
851
850
852 def tags(self):
851 def tags(self):
853 '''return a mapping of tag to node'''
852 '''return a mapping of tag to node'''
854 t = {}
853 t = {}
855 if self.changelog.filteredrevs:
854 if self.changelog.filteredrevs:
856 tags, tt = self._findtags()
855 tags, tt = self._findtags()
857 else:
856 else:
858 tags = self._tagscache.tags
857 tags = self._tagscache.tags
859 for k, v in tags.iteritems():
858 for k, v in tags.iteritems():
860 try:
859 try:
861 # ignore tags to unknown nodes
860 # ignore tags to unknown nodes
862 self.changelog.rev(v)
861 self.changelog.rev(v)
863 t[k] = v
862 t[k] = v
864 except (error.LookupError, ValueError):
863 except (error.LookupError, ValueError):
865 pass
864 pass
866 return t
865 return t
867
866
868 def _findtags(self):
867 def _findtags(self):
869 '''Do the hard work of finding tags. Return a pair of dicts
868 '''Do the hard work of finding tags. Return a pair of dicts
870 (tags, tagtypes) where tags maps tag name to node, and tagtypes
869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
871 maps tag name to a string like \'global\' or \'local\'.
870 maps tag name to a string like \'global\' or \'local\'.
872 Subclasses or extensions are free to add their own tags, but
871 Subclasses or extensions are free to add their own tags, but
873 should be aware that the returned dicts will be retained for the
872 should be aware that the returned dicts will be retained for the
874 duration of the localrepo object.'''
873 duration of the localrepo object.'''
875
874
876 # XXX what tagtype should subclasses/extensions use? Currently
875 # XXX what tagtype should subclasses/extensions use? Currently
877 # mq and bookmarks add tags, but do not set the tagtype at all.
876 # mq and bookmarks add tags, but do not set the tagtype at all.
878 # Should each extension invent its own tag type? Should there
877 # Should each extension invent its own tag type? Should there
879 # be one tagtype for all such "virtual" tags? Or is the status
878 # be one tagtype for all such "virtual" tags? Or is the status
880 # quo fine?
879 # quo fine?
881
880
882
881
883 # map tag name to (node, hist)
882 # map tag name to (node, hist)
884 alltags = tagsmod.findglobaltags(self.ui, self)
883 alltags = tagsmod.findglobaltags(self.ui, self)
885 # map tag name to tag type
884 # map tag name to tag type
886 tagtypes = dict((tag, 'global') for tag in alltags)
885 tagtypes = dict((tag, 'global') for tag in alltags)
887
886
888 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
889
888
890 # Build the return dicts. Have to re-encode tag names because
889 # Build the return dicts. Have to re-encode tag names because
891 # the tags module always uses UTF-8 (in order not to lose info
890 # the tags module always uses UTF-8 (in order not to lose info
892 # writing to the cache), but the rest of Mercurial wants them in
891 # writing to the cache), but the rest of Mercurial wants them in
893 # local encoding.
892 # local encoding.
894 tags = {}
893 tags = {}
895 for (name, (node, hist)) in alltags.iteritems():
894 for (name, (node, hist)) in alltags.iteritems():
896 if node != nullid:
895 if node != nullid:
897 tags[encoding.tolocal(name)] = node
896 tags[encoding.tolocal(name)] = node
898 tags['tip'] = self.changelog.tip()
897 tags['tip'] = self.changelog.tip()
899 tagtypes = dict([(encoding.tolocal(name), value)
898 tagtypes = dict([(encoding.tolocal(name), value)
900 for (name, value) in tagtypes.iteritems()])
899 for (name, value) in tagtypes.iteritems()])
901 return (tags, tagtypes)
900 return (tags, tagtypes)
902
901
903 def tagtype(self, tagname):
902 def tagtype(self, tagname):
904 '''
903 '''
905 return the type of the given tag. result can be:
904 return the type of the given tag. result can be:
906
905
907 'local' : a local tag
906 'local' : a local tag
908 'global' : a global tag
907 'global' : a global tag
909 None : tag does not exist
908 None : tag does not exist
910 '''
909 '''
911
910
912 return self._tagscache.tagtypes.get(tagname)
911 return self._tagscache.tagtypes.get(tagname)
913
912
914 def tagslist(self):
913 def tagslist(self):
915 '''return a list of tags ordered by revision'''
914 '''return a list of tags ordered by revision'''
916 if not self._tagscache.tagslist:
915 if not self._tagscache.tagslist:
917 l = []
916 l = []
918 for t, n in self.tags().iteritems():
917 for t, n in self.tags().iteritems():
919 l.append((self.changelog.rev(n), t, n))
918 l.append((self.changelog.rev(n), t, n))
920 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
921
920
922 return self._tagscache.tagslist
921 return self._tagscache.tagslist
923
922
924 def nodetags(self, node):
923 def nodetags(self, node):
925 '''return the tags associated with a node'''
924 '''return the tags associated with a node'''
926 if not self._tagscache.nodetagscache:
925 if not self._tagscache.nodetagscache:
927 nodetagscache = {}
926 nodetagscache = {}
928 for t, n in self._tagscache.tags.iteritems():
927 for t, n in self._tagscache.tags.iteritems():
929 nodetagscache.setdefault(n, []).append(t)
928 nodetagscache.setdefault(n, []).append(t)
930 for tags in nodetagscache.itervalues():
929 for tags in nodetagscache.itervalues():
931 tags.sort()
930 tags.sort()
932 self._tagscache.nodetagscache = nodetagscache
931 self._tagscache.nodetagscache = nodetagscache
933 return self._tagscache.nodetagscache.get(node, [])
932 return self._tagscache.nodetagscache.get(node, [])
934
933
935 def nodebookmarks(self, node):
934 def nodebookmarks(self, node):
936 """return the list of bookmarks pointing to the specified node"""
935 """return the list of bookmarks pointing to the specified node"""
937 marks = []
936 marks = []
938 for bookmark, n in self._bookmarks.iteritems():
937 for bookmark, n in self._bookmarks.iteritems():
939 if n == node:
938 if n == node:
940 marks.append(bookmark)
939 marks.append(bookmark)
941 return sorted(marks)
940 return sorted(marks)
942
941
943 def branchmap(self):
942 def branchmap(self):
944 '''returns a dictionary {branch: [branchheads]} with branchheads
943 '''returns a dictionary {branch: [branchheads]} with branchheads
945 ordered by increasing revision number'''
944 ordered by increasing revision number'''
946 branchmap.updatecache(self)
945 branchmap.updatecache(self)
947 return self._branchcaches[self.filtername]
946 return self._branchcaches[self.filtername]
948
947
949 @unfilteredmethod
948 @unfilteredmethod
950 def revbranchcache(self):
949 def revbranchcache(self):
951 if not self._revbranchcache:
950 if not self._revbranchcache:
952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
953 return self._revbranchcache
952 return self._revbranchcache
954
953
955 def branchtip(self, branch, ignoremissing=False):
954 def branchtip(self, branch, ignoremissing=False):
956 '''return the tip node for a given branch
955 '''return the tip node for a given branch
957
956
958 If ignoremissing is True, then this method will not raise an error.
957 If ignoremissing is True, then this method will not raise an error.
959 This is helpful for callers that only expect None for a missing branch
958 This is helpful for callers that only expect None for a missing branch
960 (e.g. namespace).
959 (e.g. namespace).
961
960
962 '''
961 '''
963 try:
962 try:
964 return self.branchmap().branchtip(branch)
963 return self.branchmap().branchtip(branch)
965 except KeyError:
964 except KeyError:
966 if not ignoremissing:
965 if not ignoremissing:
967 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
968 else:
967 else:
969 pass
968 pass
970
969
971 def lookup(self, key):
970 def lookup(self, key):
972 return self[key].node()
971 return self[key].node()
973
972
974 def lookupbranch(self, key, remote=None):
973 def lookupbranch(self, key, remote=None):
975 repo = remote or self
974 repo = remote or self
976 if key in repo.branchmap():
975 if key in repo.branchmap():
977 return key
976 return key
978
977
979 repo = (remote and remote.local()) and remote or self
978 repo = (remote and remote.local()) and remote or self
980 return repo[key].branch()
979 return repo[key].branch()
981
980
982 def known(self, nodes):
981 def known(self, nodes):
983 cl = self.changelog
982 cl = self.changelog
984 nm = cl.nodemap
983 nm = cl.nodemap
985 filtered = cl.filteredrevs
984 filtered = cl.filteredrevs
986 result = []
985 result = []
987 for n in nodes:
986 for n in nodes:
988 r = nm.get(n)
987 r = nm.get(n)
989 resp = not (r is None or r in filtered)
988 resp = not (r is None or r in filtered)
990 result.append(resp)
989 result.append(resp)
991 return result
990 return result
992
991
993 def local(self):
992 def local(self):
994 return self
993 return self
995
994
996 def publishing(self):
995 def publishing(self):
997 # it's safe (and desirable) to trust the publish flag unconditionally
996 # it's safe (and desirable) to trust the publish flag unconditionally
998 # so that we don't finalize changes shared between users via ssh or nfs
997 # so that we don't finalize changes shared between users via ssh or nfs
999 return self.ui.configbool('phases', 'publish', untrusted=True)
998 return self.ui.configbool('phases', 'publish', untrusted=True)
1000
999
1001 def cancopy(self):
1000 def cancopy(self):
1002 # so statichttprepo's override of local() works
1001 # so statichttprepo's override of local() works
1003 if not self.local():
1002 if not self.local():
1004 return False
1003 return False
1005 if not self.publishing():
1004 if not self.publishing():
1006 return True
1005 return True
1007 # if publishing we can't copy if there is filtered content
1006 # if publishing we can't copy if there is filtered content
1008 return not self.filtered('visible').changelog.filteredrevs
1007 return not self.filtered('visible').changelog.filteredrevs
1009
1008
1010 def shared(self):
1009 def shared(self):
1011 '''the type of shared repository (None if not shared)'''
1010 '''the type of shared repository (None if not shared)'''
1012 if self.sharedpath != self.path:
1011 if self.sharedpath != self.path:
1013 return 'store'
1012 return 'store'
1014 return None
1013 return None
1015
1014
1016 def wjoin(self, f, *insidef):
1015 def wjoin(self, f, *insidef):
1017 return self.vfs.reljoin(self.root, f, *insidef)
1016 return self.vfs.reljoin(self.root, f, *insidef)
1018
1017
1019 def file(self, f):
1018 def file(self, f):
1020 if f[0] == '/':
1019 if f[0] == '/':
1021 f = f[1:]
1020 f = f[1:]
1022 return filelog.filelog(self.svfs, f)
1021 return filelog.filelog(self.svfs, f)
1023
1022
1024 def changectx(self, changeid):
1023 def changectx(self, changeid):
1025 return self[changeid]
1024 return self[changeid]
1026
1025
1027 def setparents(self, p1, p2=nullid):
1026 def setparents(self, p1, p2=nullid):
1028 with self.dirstate.parentchange():
1027 with self.dirstate.parentchange():
1029 copies = self.dirstate.setparents(p1, p2)
1028 copies = self.dirstate.setparents(p1, p2)
1030 pctx = self[p1]
1029 pctx = self[p1]
1031 if copies:
1030 if copies:
1032 # Adjust copy records, the dirstate cannot do it, it
1031 # Adjust copy records, the dirstate cannot do it, it
1033 # requires access to parents manifests. Preserve them
1032 # requires access to parents manifests. Preserve them
1034 # only for entries added to first parent.
1033 # only for entries added to first parent.
1035 for f in copies:
1034 for f in copies:
1036 if f not in pctx and copies[f] in pctx:
1035 if f not in pctx and copies[f] in pctx:
1037 self.dirstate.copy(copies[f], f)
1036 self.dirstate.copy(copies[f], f)
1038 if p2 == nullid:
1037 if p2 == nullid:
1039 for f, s in sorted(self.dirstate.copies().items()):
1038 for f, s in sorted(self.dirstate.copies().items()):
1040 if f not in pctx and s not in pctx:
1039 if f not in pctx and s not in pctx:
1041 self.dirstate.copy(None, f)
1040 self.dirstate.copy(None, f)
1042
1041
1043 def filectx(self, path, changeid=None, fileid=None):
1042 def filectx(self, path, changeid=None, fileid=None):
1044 """changeid can be a changeset revision, node, or tag.
1043 """changeid can be a changeset revision, node, or tag.
1045 fileid can be a file revision or node."""
1044 fileid can be a file revision or node."""
1046 return context.filectx(self, path, changeid, fileid)
1045 return context.filectx(self, path, changeid, fileid)
1047
1046
1048 def getcwd(self):
1047 def getcwd(self):
1049 return self.dirstate.getcwd()
1048 return self.dirstate.getcwd()
1050
1049
1051 def pathto(self, f, cwd=None):
1050 def pathto(self, f, cwd=None):
1052 return self.dirstate.pathto(f, cwd)
1051 return self.dirstate.pathto(f, cwd)
1053
1052
1054 def _loadfilter(self, filter):
1053 def _loadfilter(self, filter):
1055 if filter not in self.filterpats:
1054 if filter not in self.filterpats:
1056 l = []
1055 l = []
1057 for pat, cmd in self.ui.configitems(filter):
1056 for pat, cmd in self.ui.configitems(filter):
1058 if cmd == '!':
1057 if cmd == '!':
1059 continue
1058 continue
1060 mf = matchmod.match(self.root, '', [pat])
1059 mf = matchmod.match(self.root, '', [pat])
1061 fn = None
1060 fn = None
1062 params = cmd
1061 params = cmd
1063 for name, filterfn in self._datafilters.iteritems():
1062 for name, filterfn in self._datafilters.iteritems():
1064 if cmd.startswith(name):
1063 if cmd.startswith(name):
1065 fn = filterfn
1064 fn = filterfn
1066 params = cmd[len(name):].lstrip()
1065 params = cmd[len(name):].lstrip()
1067 break
1066 break
1068 if not fn:
1067 if not fn:
1069 fn = lambda s, c, **kwargs: util.filter(s, c)
1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1070 # Wrap old filters not supporting keyword arguments
1069 # Wrap old filters not supporting keyword arguments
1071 if not inspect.getargspec(fn)[2]:
1070 if not pycompat.getargspec(fn)[2]:
1072 oldfn = fn
1071 oldfn = fn
1073 fn = lambda s, c, **kwargs: oldfn(s, c)
1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1074 l.append((mf, fn, params))
1073 l.append((mf, fn, params))
1075 self.filterpats[filter] = l
1074 self.filterpats[filter] = l
1076 return self.filterpats[filter]
1075 return self.filterpats[filter]
1077
1076
1078 def _filter(self, filterpats, filename, data):
1077 def _filter(self, filterpats, filename, data):
1079 for mf, fn, cmd in filterpats:
1078 for mf, fn, cmd in filterpats:
1080 if mf(filename):
1079 if mf(filename):
1081 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1082 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1083 break
1082 break
1084
1083
1085 return data
1084 return data
1086
1085
1087 @unfilteredpropertycache
1086 @unfilteredpropertycache
1088 def _encodefilterpats(self):
1087 def _encodefilterpats(self):
1089 return self._loadfilter('encode')
1088 return self._loadfilter('encode')
1090
1089
1091 @unfilteredpropertycache
1090 @unfilteredpropertycache
1092 def _decodefilterpats(self):
1091 def _decodefilterpats(self):
1093 return self._loadfilter('decode')
1092 return self._loadfilter('decode')
1094
1093
1095 def adddatafilter(self, name, filter):
1094 def adddatafilter(self, name, filter):
1096 self._datafilters[name] = filter
1095 self._datafilters[name] = filter
1097
1096
1098 def wread(self, filename):
1097 def wread(self, filename):
1099 if self.wvfs.islink(filename):
1098 if self.wvfs.islink(filename):
1100 data = self.wvfs.readlink(filename)
1099 data = self.wvfs.readlink(filename)
1101 else:
1100 else:
1102 data = self.wvfs.read(filename)
1101 data = self.wvfs.read(filename)
1103 return self._filter(self._encodefilterpats, filename, data)
1102 return self._filter(self._encodefilterpats, filename, data)
1104
1103
1105 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1106 """write ``data`` into ``filename`` in the working directory
1105 """write ``data`` into ``filename`` in the working directory
1107
1106
1108 This returns length of written (maybe decoded) data.
1107 This returns length of written (maybe decoded) data.
1109 """
1108 """
1110 data = self._filter(self._decodefilterpats, filename, data)
1109 data = self._filter(self._decodefilterpats, filename, data)
1111 if 'l' in flags:
1110 if 'l' in flags:
1112 self.wvfs.symlink(data, filename)
1111 self.wvfs.symlink(data, filename)
1113 else:
1112 else:
1114 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1115 **kwargs)
1114 **kwargs)
1116 if 'x' in flags:
1115 if 'x' in flags:
1117 self.wvfs.setflags(filename, False, True)
1116 self.wvfs.setflags(filename, False, True)
1118 else:
1117 else:
1119 self.wvfs.setflags(filename, False, False)
1118 self.wvfs.setflags(filename, False, False)
1120 return len(data)
1119 return len(data)
1121
1120
1122 def wwritedata(self, filename, data):
1121 def wwritedata(self, filename, data):
1123 return self._filter(self._decodefilterpats, filename, data)
1122 return self._filter(self._decodefilterpats, filename, data)
1124
1123
1125 def currenttransaction(self):
1124 def currenttransaction(self):
1126 """return the current transaction or None if non exists"""
1125 """return the current transaction or None if non exists"""
1127 if self._transref:
1126 if self._transref:
1128 tr = self._transref()
1127 tr = self._transref()
1129 else:
1128 else:
1130 tr = None
1129 tr = None
1131
1130
1132 if tr and tr.running():
1131 if tr and tr.running():
1133 return tr
1132 return tr
1134 return None
1133 return None
1135
1134
1136 def transaction(self, desc, report=None):
1135 def transaction(self, desc, report=None):
1137 if (self.ui.configbool('devel', 'all-warnings')
1136 if (self.ui.configbool('devel', 'all-warnings')
1138 or self.ui.configbool('devel', 'check-locks')):
1137 or self.ui.configbool('devel', 'check-locks')):
1139 if self._currentlock(self._lockref) is None:
1138 if self._currentlock(self._lockref) is None:
1140 raise error.ProgrammingError('transaction requires locking')
1139 raise error.ProgrammingError('transaction requires locking')
1141 tr = self.currenttransaction()
1140 tr = self.currenttransaction()
1142 if tr is not None:
1141 if tr is not None:
1143 return tr.nest()
1142 return tr.nest()
1144
1143
1145 # abort here if the journal already exists
1144 # abort here if the journal already exists
1146 if self.svfs.exists("journal"):
1145 if self.svfs.exists("journal"):
1147 raise error.RepoError(
1146 raise error.RepoError(
1148 _("abandoned transaction found"),
1147 _("abandoned transaction found"),
1149 hint=_("run 'hg recover' to clean up transaction"))
1148 hint=_("run 'hg recover' to clean up transaction"))
1150
1149
1151 idbase = "%.40f#%f" % (random.random(), time.time())
1150 idbase = "%.40f#%f" % (random.random(), time.time())
1152 ha = hex(hashlib.sha1(idbase).digest())
1151 ha = hex(hashlib.sha1(idbase).digest())
1153 txnid = 'TXN:' + ha
1152 txnid = 'TXN:' + ha
1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1155
1154
1156 self._writejournal(desc)
1155 self._writejournal(desc)
1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1158 if report:
1157 if report:
1159 rp = report
1158 rp = report
1160 else:
1159 else:
1161 rp = self.ui.warn
1160 rp = self.ui.warn
1162 vfsmap = {'plain': self.vfs} # root of .hg/
1161 vfsmap = {'plain': self.vfs} # root of .hg/
1163 # we must avoid cyclic reference between repo and transaction.
1162 # we must avoid cyclic reference between repo and transaction.
1164 reporef = weakref.ref(self)
1163 reporef = weakref.ref(self)
1165 # Code to track tag movement
1164 # Code to track tag movement
1166 #
1165 #
1167 # Since tags are all handled as file content, it is actually quite hard
1166 # Since tags are all handled as file content, it is actually quite hard
1168 # to track these movement from a code perspective. So we fallback to a
1167 # to track these movement from a code perspective. So we fallback to a
1169 # tracking at the repository level. One could envision to track changes
1168 # tracking at the repository level. One could envision to track changes
1170 # to the '.hgtags' file through changegroup apply but that fails to
1169 # to the '.hgtags' file through changegroup apply but that fails to
1171 # cope with case where transaction expose new heads without changegroup
1170 # cope with case where transaction expose new heads without changegroup
1172 # being involved (eg: phase movement).
1171 # being involved (eg: phase movement).
1173 #
1172 #
1174 # For now, We gate the feature behind a flag since this likely comes
1173 # For now, We gate the feature behind a flag since this likely comes
1175 # with performance impacts. The current code run more often than needed
1174 # with performance impacts. The current code run more often than needed
1176 # and do not use caches as much as it could. The current focus is on
1175 # and do not use caches as much as it could. The current focus is on
1177 # the behavior of the feature so we disable it by default. The flag
1176 # the behavior of the feature so we disable it by default. The flag
1178 # will be removed when we are happy with the performance impact.
1177 # will be removed when we are happy with the performance impact.
1179 #
1178 #
1180 # Once this feature is no longer experimental move the following
1179 # Once this feature is no longer experimental move the following
1181 # documentation to the appropriate help section:
1180 # documentation to the appropriate help section:
1182 #
1181 #
1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1184 # tags (new or changed or deleted tags). In addition the details of
1183 # tags (new or changed or deleted tags). In addition the details of
1185 # these changes are made available in a file at:
1184 # these changes are made available in a file at:
1186 # ``REPOROOT/.hg/changes/tags.changes``.
1185 # ``REPOROOT/.hg/changes/tags.changes``.
1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1188 # might exist from a previous transaction even if no tag were touched
1187 # might exist from a previous transaction even if no tag were touched
1189 # in this one. Changes are recorded in a line base format::
1188 # in this one. Changes are recorded in a line base format::
1190 #
1189 #
1191 # <action> <hex-node> <tag-name>\n
1190 # <action> <hex-node> <tag-name>\n
1192 #
1191 #
1193 # Actions are defined as follow:
1192 # Actions are defined as follow:
1194 # "-R": tag is removed,
1193 # "-R": tag is removed,
1195 # "+A": tag is added,
1194 # "+A": tag is added,
1196 # "-M": tag is moved (old value),
1195 # "-M": tag is moved (old value),
1197 # "+M": tag is moved (new value),
1196 # "+M": tag is moved (new value),
1198 tracktags = lambda x: None
1197 tracktags = lambda x: None
1199 # experimental config: experimental.hook-track-tags
1198 # experimental config: experimental.hook-track-tags
1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1201 if desc != 'strip' and shouldtracktags:
1200 if desc != 'strip' and shouldtracktags:
1202 oldheads = self.changelog.headrevs()
1201 oldheads = self.changelog.headrevs()
1203 def tracktags(tr2):
1202 def tracktags(tr2):
1204 repo = reporef()
1203 repo = reporef()
1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1206 newheads = repo.changelog.headrevs()
1205 newheads = repo.changelog.headrevs()
1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1208 # notes: we compare lists here.
1207 # notes: we compare lists here.
1209 # As we do it only once buiding set would not be cheaper
1208 # As we do it only once buiding set would not be cheaper
1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1211 if changes:
1210 if changes:
1212 tr2.hookargs['tag_moved'] = '1'
1211 tr2.hookargs['tag_moved'] = '1'
1213 with repo.vfs('changes/tags.changes', 'w',
1212 with repo.vfs('changes/tags.changes', 'w',
1214 atomictemp=True) as changesfile:
1213 atomictemp=True) as changesfile:
1215 # note: we do not register the file to the transaction
1214 # note: we do not register the file to the transaction
1216 # because we needs it to still exist on the transaction
1215 # because we needs it to still exist on the transaction
1217 # is close (for txnclose hooks)
1216 # is close (for txnclose hooks)
1218 tagsmod.writediff(changesfile, changes)
1217 tagsmod.writediff(changesfile, changes)
1219 def validate(tr2):
1218 def validate(tr2):
1220 """will run pre-closing hooks"""
1219 """will run pre-closing hooks"""
1221 # XXX the transaction API is a bit lacking here so we take a hacky
1220 # XXX the transaction API is a bit lacking here so we take a hacky
1222 # path for now
1221 # path for now
1223 #
1222 #
1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1225 # dict is copied before these run. In addition we needs the data
1224 # dict is copied before these run. In addition we needs the data
1226 # available to in memory hooks too.
1225 # available to in memory hooks too.
1227 #
1226 #
1228 # Moreover, we also need to make sure this runs before txnclose
1227 # Moreover, we also need to make sure this runs before txnclose
1229 # hooks and there is no "pending" mechanism that would execute
1228 # hooks and there is no "pending" mechanism that would execute
1230 # logic only if hooks are about to run.
1229 # logic only if hooks are about to run.
1231 #
1230 #
1232 # Fixing this limitation of the transaction is also needed to track
1231 # Fixing this limitation of the transaction is also needed to track
1233 # other families of changes (bookmarks, phases, obsolescence).
1232 # other families of changes (bookmarks, phases, obsolescence).
1234 #
1233 #
1235 # This will have to be fixed before we remove the experimental
1234 # This will have to be fixed before we remove the experimental
1236 # gating.
1235 # gating.
1237 tracktags(tr2)
1236 tracktags(tr2)
1238 repo = reporef()
1237 repo = reporef()
1239 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1240 scmutil.enforcesinglehead(repo, tr2, desc)
1239 scmutil.enforcesinglehead(repo, tr2, desc)
1241 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1242 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1243 args = tr.hookargs.copy()
1242 args = tr.hookargs.copy()
1244 args.update(bookmarks.preparehookargs(name, old, new))
1243 args.update(bookmarks.preparehookargs(name, old, new))
1245 repo.hook('pretxnclose-bookmark', throw=True,
1244 repo.hook('pretxnclose-bookmark', throw=True,
1246 txnname=desc,
1245 txnname=desc,
1247 **pycompat.strkwargs(args))
1246 **pycompat.strkwargs(args))
1248 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1249 cl = repo.unfiltered().changelog
1248 cl = repo.unfiltered().changelog
1250 for rev, (old, new) in tr.changes['phases'].items():
1249 for rev, (old, new) in tr.changes['phases'].items():
1251 args = tr.hookargs.copy()
1250 args = tr.hookargs.copy()
1252 node = hex(cl.node(rev))
1251 node = hex(cl.node(rev))
1253 args.update(phases.preparehookargs(node, old, new))
1252 args.update(phases.preparehookargs(node, old, new))
1254 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1255 **pycompat.strkwargs(args))
1254 **pycompat.strkwargs(args))
1256
1255
1257 repo.hook('pretxnclose', throw=True,
1256 repo.hook('pretxnclose', throw=True,
1258 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1259 def releasefn(tr, success):
1258 def releasefn(tr, success):
1260 repo = reporef()
1259 repo = reporef()
1261 if success:
1260 if success:
1262 # this should be explicitly invoked here, because
1261 # this should be explicitly invoked here, because
1263 # in-memory changes aren't written out at closing
1262 # in-memory changes aren't written out at closing
1264 # transaction, if tr.addfilegenerator (via
1263 # transaction, if tr.addfilegenerator (via
1265 # dirstate.write or so) isn't invoked while
1264 # dirstate.write or so) isn't invoked while
1266 # transaction running
1265 # transaction running
1267 repo.dirstate.write(None)
1266 repo.dirstate.write(None)
1268 else:
1267 else:
1269 # discard all changes (including ones already written
1268 # discard all changes (including ones already written
1270 # out) in this transaction
1269 # out) in this transaction
1271 repo.dirstate.restorebackup(None, 'journal.dirstate')
1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1272
1271
1273 repo.invalidate(clearfilecache=True)
1272 repo.invalidate(clearfilecache=True)
1274
1273
1275 tr = transaction.transaction(rp, self.svfs, vfsmap,
1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1276 "journal",
1275 "journal",
1277 "undo",
1276 "undo",
1278 aftertrans(renames),
1277 aftertrans(renames),
1279 self.store.createmode,
1278 self.store.createmode,
1280 validator=validate,
1279 validator=validate,
1281 releasefn=releasefn,
1280 releasefn=releasefn,
1282 checkambigfiles=_cachedfiles)
1281 checkambigfiles=_cachedfiles)
1283 tr.changes['revs'] = xrange(0, 0)
1282 tr.changes['revs'] = xrange(0, 0)
1284 tr.changes['obsmarkers'] = set()
1283 tr.changes['obsmarkers'] = set()
1285 tr.changes['phases'] = {}
1284 tr.changes['phases'] = {}
1286 tr.changes['bookmarks'] = {}
1285 tr.changes['bookmarks'] = {}
1287
1286
1288 tr.hookargs['txnid'] = txnid
1287 tr.hookargs['txnid'] = txnid
1289 # note: writing the fncache only during finalize mean that the file is
1288 # note: writing the fncache only during finalize mean that the file is
1290 # outdated when running hooks. As fncache is used for streaming clone,
1289 # outdated when running hooks. As fncache is used for streaming clone,
1291 # this is not expected to break anything that happen during the hooks.
1290 # this is not expected to break anything that happen during the hooks.
1292 tr.addfinalize('flush-fncache', self.store.write)
1291 tr.addfinalize('flush-fncache', self.store.write)
1293 def txnclosehook(tr2):
1292 def txnclosehook(tr2):
1294 """To be run if transaction is successful, will schedule a hook run
1293 """To be run if transaction is successful, will schedule a hook run
1295 """
1294 """
1296 # Don't reference tr2 in hook() so we don't hold a reference.
1295 # Don't reference tr2 in hook() so we don't hold a reference.
1297 # This reduces memory consumption when there are multiple
1296 # This reduces memory consumption when there are multiple
1298 # transactions per lock. This can likely go away if issue5045
1297 # transactions per lock. This can likely go away if issue5045
1299 # fixes the function accumulation.
1298 # fixes the function accumulation.
1300 hookargs = tr2.hookargs
1299 hookargs = tr2.hookargs
1301
1300
1302 def hookfunc():
1301 def hookfunc():
1303 repo = reporef()
1302 repo = reporef()
1304 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1305 bmchanges = sorted(tr.changes['bookmarks'].items())
1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1306 for name, (old, new) in bmchanges:
1305 for name, (old, new) in bmchanges:
1307 args = tr.hookargs.copy()
1306 args = tr.hookargs.copy()
1308 args.update(bookmarks.preparehookargs(name, old, new))
1307 args.update(bookmarks.preparehookargs(name, old, new))
1309 repo.hook('txnclose-bookmark', throw=False,
1308 repo.hook('txnclose-bookmark', throw=False,
1310 txnname=desc, **pycompat.strkwargs(args))
1309 txnname=desc, **pycompat.strkwargs(args))
1311
1310
1312 if hook.hashook(repo.ui, 'txnclose-phase'):
1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1313 cl = repo.unfiltered().changelog
1312 cl = repo.unfiltered().changelog
1314 phasemv = sorted(tr.changes['phases'].items())
1313 phasemv = sorted(tr.changes['phases'].items())
1315 for rev, (old, new) in phasemv:
1314 for rev, (old, new) in phasemv:
1316 args = tr.hookargs.copy()
1315 args = tr.hookargs.copy()
1317 node = hex(cl.node(rev))
1316 node = hex(cl.node(rev))
1318 args.update(phases.preparehookargs(node, old, new))
1317 args.update(phases.preparehookargs(node, old, new))
1319 repo.hook('txnclose-phase', throw=False, txnname=desc,
1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1320 **pycompat.strkwargs(args))
1319 **pycompat.strkwargs(args))
1321
1320
1322 repo.hook('txnclose', throw=False, txnname=desc,
1321 repo.hook('txnclose', throw=False, txnname=desc,
1323 **pycompat.strkwargs(hookargs))
1322 **pycompat.strkwargs(hookargs))
1324 reporef()._afterlock(hookfunc)
1323 reporef()._afterlock(hookfunc)
1325 tr.addfinalize('txnclose-hook', txnclosehook)
1324 tr.addfinalize('txnclose-hook', txnclosehook)
1326 # Include a leading "-" to make it happen before the transaction summary
1325 # Include a leading "-" to make it happen before the transaction summary
1327 # reports registered via scmutil.registersummarycallback() whose names
1326 # reports registered via scmutil.registersummarycallback() whose names
1328 # are 00-txnreport etc. That way, the caches will be warm when the
1327 # are 00-txnreport etc. That way, the caches will be warm when the
1329 # callbacks run.
1328 # callbacks run.
1330 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1329 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1331 def txnaborthook(tr2):
1330 def txnaborthook(tr2):
1332 """To be run if transaction is aborted
1331 """To be run if transaction is aborted
1333 """
1332 """
1334 reporef().hook('txnabort', throw=False, txnname=desc,
1333 reporef().hook('txnabort', throw=False, txnname=desc,
1335 **pycompat.strkwargs(tr2.hookargs))
1334 **pycompat.strkwargs(tr2.hookargs))
1336 tr.addabort('txnabort-hook', txnaborthook)
1335 tr.addabort('txnabort-hook', txnaborthook)
1337 # avoid eager cache invalidation. in-memory data should be identical
1336 # avoid eager cache invalidation. in-memory data should be identical
1338 # to stored data if transaction has no error.
1337 # to stored data if transaction has no error.
1339 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1338 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1340 self._transref = weakref.ref(tr)
1339 self._transref = weakref.ref(tr)
1341 scmutil.registersummarycallback(self, tr, desc)
1340 scmutil.registersummarycallback(self, tr, desc)
1342 return tr
1341 return tr
1343
1342
1344 def _journalfiles(self):
1343 def _journalfiles(self):
1345 return ((self.svfs, 'journal'),
1344 return ((self.svfs, 'journal'),
1346 (self.vfs, 'journal.dirstate'),
1345 (self.vfs, 'journal.dirstate'),
1347 (self.vfs, 'journal.branch'),
1346 (self.vfs, 'journal.branch'),
1348 (self.vfs, 'journal.desc'),
1347 (self.vfs, 'journal.desc'),
1349 (self.vfs, 'journal.bookmarks'),
1348 (self.vfs, 'journal.bookmarks'),
1350 (self.svfs, 'journal.phaseroots'))
1349 (self.svfs, 'journal.phaseroots'))
1351
1350
1352 def undofiles(self):
1351 def undofiles(self):
1353 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1352 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1354
1353
1355 @unfilteredmethod
1354 @unfilteredmethod
1356 def _writejournal(self, desc):
1355 def _writejournal(self, desc):
1357 self.dirstate.savebackup(None, 'journal.dirstate')
1356 self.dirstate.savebackup(None, 'journal.dirstate')
1358 self.vfs.write("journal.branch",
1357 self.vfs.write("journal.branch",
1359 encoding.fromlocal(self.dirstate.branch()))
1358 encoding.fromlocal(self.dirstate.branch()))
1360 self.vfs.write("journal.desc",
1359 self.vfs.write("journal.desc",
1361 "%d\n%s\n" % (len(self), desc))
1360 "%d\n%s\n" % (len(self), desc))
1362 self.vfs.write("journal.bookmarks",
1361 self.vfs.write("journal.bookmarks",
1363 self.vfs.tryread("bookmarks"))
1362 self.vfs.tryread("bookmarks"))
1364 self.svfs.write("journal.phaseroots",
1363 self.svfs.write("journal.phaseroots",
1365 self.svfs.tryread("phaseroots"))
1364 self.svfs.tryread("phaseroots"))
1366
1365
1367 def recover(self):
1366 def recover(self):
1368 with self.lock():
1367 with self.lock():
1369 if self.svfs.exists("journal"):
1368 if self.svfs.exists("journal"):
1370 self.ui.status(_("rolling back interrupted transaction\n"))
1369 self.ui.status(_("rolling back interrupted transaction\n"))
1371 vfsmap = {'': self.svfs,
1370 vfsmap = {'': self.svfs,
1372 'plain': self.vfs,}
1371 'plain': self.vfs,}
1373 transaction.rollback(self.svfs, vfsmap, "journal",
1372 transaction.rollback(self.svfs, vfsmap, "journal",
1374 self.ui.warn,
1373 self.ui.warn,
1375 checkambigfiles=_cachedfiles)
1374 checkambigfiles=_cachedfiles)
1376 self.invalidate()
1375 self.invalidate()
1377 return True
1376 return True
1378 else:
1377 else:
1379 self.ui.warn(_("no interrupted transaction available\n"))
1378 self.ui.warn(_("no interrupted transaction available\n"))
1380 return False
1379 return False
1381
1380
1382 def rollback(self, dryrun=False, force=False):
1381 def rollback(self, dryrun=False, force=False):
1383 wlock = lock = dsguard = None
1382 wlock = lock = dsguard = None
1384 try:
1383 try:
1385 wlock = self.wlock()
1384 wlock = self.wlock()
1386 lock = self.lock()
1385 lock = self.lock()
1387 if self.svfs.exists("undo"):
1386 if self.svfs.exists("undo"):
1388 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1387 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1389
1388
1390 return self._rollback(dryrun, force, dsguard)
1389 return self._rollback(dryrun, force, dsguard)
1391 else:
1390 else:
1392 self.ui.warn(_("no rollback information available\n"))
1391 self.ui.warn(_("no rollback information available\n"))
1393 return 1
1392 return 1
1394 finally:
1393 finally:
1395 release(dsguard, lock, wlock)
1394 release(dsguard, lock, wlock)
1396
1395
1397 @unfilteredmethod # Until we get smarter cache management
1396 @unfilteredmethod # Until we get smarter cache management
1398 def _rollback(self, dryrun, force, dsguard):
1397 def _rollback(self, dryrun, force, dsguard):
1399 ui = self.ui
1398 ui = self.ui
1400 try:
1399 try:
1401 args = self.vfs.read('undo.desc').splitlines()
1400 args = self.vfs.read('undo.desc').splitlines()
1402 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1401 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1403 if len(args) >= 3:
1402 if len(args) >= 3:
1404 detail = args[2]
1403 detail = args[2]
1405 oldtip = oldlen - 1
1404 oldtip = oldlen - 1
1406
1405
1407 if detail and ui.verbose:
1406 if detail and ui.verbose:
1408 msg = (_('repository tip rolled back to revision %d'
1407 msg = (_('repository tip rolled back to revision %d'
1409 ' (undo %s: %s)\n')
1408 ' (undo %s: %s)\n')
1410 % (oldtip, desc, detail))
1409 % (oldtip, desc, detail))
1411 else:
1410 else:
1412 msg = (_('repository tip rolled back to revision %d'
1411 msg = (_('repository tip rolled back to revision %d'
1413 ' (undo %s)\n')
1412 ' (undo %s)\n')
1414 % (oldtip, desc))
1413 % (oldtip, desc))
1415 except IOError:
1414 except IOError:
1416 msg = _('rolling back unknown transaction\n')
1415 msg = _('rolling back unknown transaction\n')
1417 desc = None
1416 desc = None
1418
1417
1419 if not force and self['.'] != self['tip'] and desc == 'commit':
1418 if not force and self['.'] != self['tip'] and desc == 'commit':
1420 raise error.Abort(
1419 raise error.Abort(
1421 _('rollback of last commit while not checked out '
1420 _('rollback of last commit while not checked out '
1422 'may lose data'), hint=_('use -f to force'))
1421 'may lose data'), hint=_('use -f to force'))
1423
1422
1424 ui.status(msg)
1423 ui.status(msg)
1425 if dryrun:
1424 if dryrun:
1426 return 0
1425 return 0
1427
1426
1428 parents = self.dirstate.parents()
1427 parents = self.dirstate.parents()
1429 self.destroying()
1428 self.destroying()
1430 vfsmap = {'plain': self.vfs, '': self.svfs}
1429 vfsmap = {'plain': self.vfs, '': self.svfs}
1431 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1430 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1432 checkambigfiles=_cachedfiles)
1431 checkambigfiles=_cachedfiles)
1433 if self.vfs.exists('undo.bookmarks'):
1432 if self.vfs.exists('undo.bookmarks'):
1434 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1433 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1435 if self.svfs.exists('undo.phaseroots'):
1434 if self.svfs.exists('undo.phaseroots'):
1436 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1435 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1437 self.invalidate()
1436 self.invalidate()
1438
1437
1439 parentgone = (parents[0] not in self.changelog.nodemap or
1438 parentgone = (parents[0] not in self.changelog.nodemap or
1440 parents[1] not in self.changelog.nodemap)
1439 parents[1] not in self.changelog.nodemap)
1441 if parentgone:
1440 if parentgone:
1442 # prevent dirstateguard from overwriting already restored one
1441 # prevent dirstateguard from overwriting already restored one
1443 dsguard.close()
1442 dsguard.close()
1444
1443
1445 self.dirstate.restorebackup(None, 'undo.dirstate')
1444 self.dirstate.restorebackup(None, 'undo.dirstate')
1446 try:
1445 try:
1447 branch = self.vfs.read('undo.branch')
1446 branch = self.vfs.read('undo.branch')
1448 self.dirstate.setbranch(encoding.tolocal(branch))
1447 self.dirstate.setbranch(encoding.tolocal(branch))
1449 except IOError:
1448 except IOError:
1450 ui.warn(_('named branch could not be reset: '
1449 ui.warn(_('named branch could not be reset: '
1451 'current branch is still \'%s\'\n')
1450 'current branch is still \'%s\'\n')
1452 % self.dirstate.branch())
1451 % self.dirstate.branch())
1453
1452
1454 parents = tuple([p.rev() for p in self[None].parents()])
1453 parents = tuple([p.rev() for p in self[None].parents()])
1455 if len(parents) > 1:
1454 if len(parents) > 1:
1456 ui.status(_('working directory now based on '
1455 ui.status(_('working directory now based on '
1457 'revisions %d and %d\n') % parents)
1456 'revisions %d and %d\n') % parents)
1458 else:
1457 else:
1459 ui.status(_('working directory now based on '
1458 ui.status(_('working directory now based on '
1460 'revision %d\n') % parents)
1459 'revision %d\n') % parents)
1461 mergemod.mergestate.clean(self, self['.'].node())
1460 mergemod.mergestate.clean(self, self['.'].node())
1462
1461
1463 # TODO: if we know which new heads may result from this rollback, pass
1462 # TODO: if we know which new heads may result from this rollback, pass
1464 # them to destroy(), which will prevent the branchhead cache from being
1463 # them to destroy(), which will prevent the branchhead cache from being
1465 # invalidated.
1464 # invalidated.
1466 self.destroyed()
1465 self.destroyed()
1467 return 0
1466 return 0
1468
1467
1469 def _buildcacheupdater(self, newtransaction):
1468 def _buildcacheupdater(self, newtransaction):
1470 """called during transaction to build the callback updating cache
1469 """called during transaction to build the callback updating cache
1471
1470
1472 Lives on the repository to help extension who might want to augment
1471 Lives on the repository to help extension who might want to augment
1473 this logic. For this purpose, the created transaction is passed to the
1472 this logic. For this purpose, the created transaction is passed to the
1474 method.
1473 method.
1475 """
1474 """
1476 # we must avoid cyclic reference between repo and transaction.
1475 # we must avoid cyclic reference between repo and transaction.
1477 reporef = weakref.ref(self)
1476 reporef = weakref.ref(self)
1478 def updater(tr):
1477 def updater(tr):
1479 repo = reporef()
1478 repo = reporef()
1480 repo.updatecaches(tr)
1479 repo.updatecaches(tr)
1481 return updater
1480 return updater
1482
1481
1483 @unfilteredmethod
1482 @unfilteredmethod
1484 def updatecaches(self, tr=None):
1483 def updatecaches(self, tr=None):
1485 """warm appropriate caches
1484 """warm appropriate caches
1486
1485
1487 If this function is called after a transaction closed. The transaction
1486 If this function is called after a transaction closed. The transaction
1488 will be available in the 'tr' argument. This can be used to selectively
1487 will be available in the 'tr' argument. This can be used to selectively
1489 update caches relevant to the changes in that transaction.
1488 update caches relevant to the changes in that transaction.
1490 """
1489 """
1491 if tr is not None and tr.hookargs.get('source') == 'strip':
1490 if tr is not None and tr.hookargs.get('source') == 'strip':
1492 # During strip, many caches are invalid but
1491 # During strip, many caches are invalid but
1493 # later call to `destroyed` will refresh them.
1492 # later call to `destroyed` will refresh them.
1494 return
1493 return
1495
1494
1496 if tr is None or tr.changes['revs']:
1495 if tr is None or tr.changes['revs']:
1497 # updating the unfiltered branchmap should refresh all the others,
1496 # updating the unfiltered branchmap should refresh all the others,
1498 self.ui.debug('updating the branch cache\n')
1497 self.ui.debug('updating the branch cache\n')
1499 branchmap.updatecache(self.filtered('served'))
1498 branchmap.updatecache(self.filtered('served'))
1500
1499
1501 def invalidatecaches(self):
1500 def invalidatecaches(self):
1502
1501
1503 if '_tagscache' in vars(self):
1502 if '_tagscache' in vars(self):
1504 # can't use delattr on proxy
1503 # can't use delattr on proxy
1505 del self.__dict__['_tagscache']
1504 del self.__dict__['_tagscache']
1506
1505
1507 self.unfiltered()._branchcaches.clear()
1506 self.unfiltered()._branchcaches.clear()
1508 self.invalidatevolatilesets()
1507 self.invalidatevolatilesets()
1509 self._sparsesignaturecache.clear()
1508 self._sparsesignaturecache.clear()
1510
1509
1511 def invalidatevolatilesets(self):
1510 def invalidatevolatilesets(self):
1512 self.filteredrevcache.clear()
1511 self.filteredrevcache.clear()
1513 obsolete.clearobscaches(self)
1512 obsolete.clearobscaches(self)
1514
1513
1515 def invalidatedirstate(self):
1514 def invalidatedirstate(self):
1516 '''Invalidates the dirstate, causing the next call to dirstate
1515 '''Invalidates the dirstate, causing the next call to dirstate
1517 to check if it was modified since the last time it was read,
1516 to check if it was modified since the last time it was read,
1518 rereading it if it has.
1517 rereading it if it has.
1519
1518
1520 This is different to dirstate.invalidate() that it doesn't always
1519 This is different to dirstate.invalidate() that it doesn't always
1521 rereads the dirstate. Use dirstate.invalidate() if you want to
1520 rereads the dirstate. Use dirstate.invalidate() if you want to
1522 explicitly read the dirstate again (i.e. restoring it to a previous
1521 explicitly read the dirstate again (i.e. restoring it to a previous
1523 known good state).'''
1522 known good state).'''
1524 if hasunfilteredcache(self, 'dirstate'):
1523 if hasunfilteredcache(self, 'dirstate'):
1525 for k in self.dirstate._filecache:
1524 for k in self.dirstate._filecache:
1526 try:
1525 try:
1527 delattr(self.dirstate, k)
1526 delattr(self.dirstate, k)
1528 except AttributeError:
1527 except AttributeError:
1529 pass
1528 pass
1530 delattr(self.unfiltered(), 'dirstate')
1529 delattr(self.unfiltered(), 'dirstate')
1531
1530
1532 def invalidate(self, clearfilecache=False):
1531 def invalidate(self, clearfilecache=False):
1533 '''Invalidates both store and non-store parts other than dirstate
1532 '''Invalidates both store and non-store parts other than dirstate
1534
1533
1535 If a transaction is running, invalidation of store is omitted,
1534 If a transaction is running, invalidation of store is omitted,
1536 because discarding in-memory changes might cause inconsistency
1535 because discarding in-memory changes might cause inconsistency
1537 (e.g. incomplete fncache causes unintentional failure, but
1536 (e.g. incomplete fncache causes unintentional failure, but
1538 redundant one doesn't).
1537 redundant one doesn't).
1539 '''
1538 '''
1540 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1539 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1541 for k in list(self._filecache.keys()):
1540 for k in list(self._filecache.keys()):
1542 # dirstate is invalidated separately in invalidatedirstate()
1541 # dirstate is invalidated separately in invalidatedirstate()
1543 if k == 'dirstate':
1542 if k == 'dirstate':
1544 continue
1543 continue
1545 if (k == 'changelog' and
1544 if (k == 'changelog' and
1546 self.currenttransaction() and
1545 self.currenttransaction() and
1547 self.changelog._delayed):
1546 self.changelog._delayed):
1548 # The changelog object may store unwritten revisions. We don't
1547 # The changelog object may store unwritten revisions. We don't
1549 # want to lose them.
1548 # want to lose them.
1550 # TODO: Solve the problem instead of working around it.
1549 # TODO: Solve the problem instead of working around it.
1551 continue
1550 continue
1552
1551
1553 if clearfilecache:
1552 if clearfilecache:
1554 del self._filecache[k]
1553 del self._filecache[k]
1555 try:
1554 try:
1556 delattr(unfiltered, k)
1555 delattr(unfiltered, k)
1557 except AttributeError:
1556 except AttributeError:
1558 pass
1557 pass
1559 self.invalidatecaches()
1558 self.invalidatecaches()
1560 if not self.currenttransaction():
1559 if not self.currenttransaction():
1561 # TODO: Changing contents of store outside transaction
1560 # TODO: Changing contents of store outside transaction
1562 # causes inconsistency. We should make in-memory store
1561 # causes inconsistency. We should make in-memory store
1563 # changes detectable, and abort if changed.
1562 # changes detectable, and abort if changed.
1564 self.store.invalidatecaches()
1563 self.store.invalidatecaches()
1565
1564
1566 def invalidateall(self):
1565 def invalidateall(self):
1567 '''Fully invalidates both store and non-store parts, causing the
1566 '''Fully invalidates both store and non-store parts, causing the
1568 subsequent operation to reread any outside changes.'''
1567 subsequent operation to reread any outside changes.'''
1569 # extension should hook this to invalidate its caches
1568 # extension should hook this to invalidate its caches
1570 self.invalidate()
1569 self.invalidate()
1571 self.invalidatedirstate()
1570 self.invalidatedirstate()
1572
1571
1573 @unfilteredmethod
1572 @unfilteredmethod
1574 def _refreshfilecachestats(self, tr):
1573 def _refreshfilecachestats(self, tr):
1575 """Reload stats of cached files so that they are flagged as valid"""
1574 """Reload stats of cached files so that they are flagged as valid"""
1576 for k, ce in self._filecache.items():
1575 for k, ce in self._filecache.items():
1577 k = pycompat.sysstr(k)
1576 k = pycompat.sysstr(k)
1578 if k == r'dirstate' or k not in self.__dict__:
1577 if k == r'dirstate' or k not in self.__dict__:
1579 continue
1578 continue
1580 ce.refresh()
1579 ce.refresh()
1581
1580
1582 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1581 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1583 inheritchecker=None, parentenvvar=None):
1582 inheritchecker=None, parentenvvar=None):
1584 parentlock = None
1583 parentlock = None
1585 # the contents of parentenvvar are used by the underlying lock to
1584 # the contents of parentenvvar are used by the underlying lock to
1586 # determine whether it can be inherited
1585 # determine whether it can be inherited
1587 if parentenvvar is not None:
1586 if parentenvvar is not None:
1588 parentlock = encoding.environ.get(parentenvvar)
1587 parentlock = encoding.environ.get(parentenvvar)
1589
1588
1590 timeout = 0
1589 timeout = 0
1591 warntimeout = 0
1590 warntimeout = 0
1592 if wait:
1591 if wait:
1593 timeout = self.ui.configint("ui", "timeout")
1592 timeout = self.ui.configint("ui", "timeout")
1594 warntimeout = self.ui.configint("ui", "timeout.warn")
1593 warntimeout = self.ui.configint("ui", "timeout.warn")
1595
1594
1596 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1595 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1597 releasefn=releasefn,
1596 releasefn=releasefn,
1598 acquirefn=acquirefn, desc=desc,
1597 acquirefn=acquirefn, desc=desc,
1599 inheritchecker=inheritchecker,
1598 inheritchecker=inheritchecker,
1600 parentlock=parentlock)
1599 parentlock=parentlock)
1601 return l
1600 return l
1602
1601
1603 def _afterlock(self, callback):
1602 def _afterlock(self, callback):
1604 """add a callback to be run when the repository is fully unlocked
1603 """add a callback to be run when the repository is fully unlocked
1605
1604
1606 The callback will be executed when the outermost lock is released
1605 The callback will be executed when the outermost lock is released
1607 (with wlock being higher level than 'lock')."""
1606 (with wlock being higher level than 'lock')."""
1608 for ref in (self._wlockref, self._lockref):
1607 for ref in (self._wlockref, self._lockref):
1609 l = ref and ref()
1608 l = ref and ref()
1610 if l and l.held:
1609 if l and l.held:
1611 l.postrelease.append(callback)
1610 l.postrelease.append(callback)
1612 break
1611 break
1613 else: # no lock have been found.
1612 else: # no lock have been found.
1614 callback()
1613 callback()
1615
1614
1616 def lock(self, wait=True):
1615 def lock(self, wait=True):
1617 '''Lock the repository store (.hg/store) and return a weak reference
1616 '''Lock the repository store (.hg/store) and return a weak reference
1618 to the lock. Use this before modifying the store (e.g. committing or
1617 to the lock. Use this before modifying the store (e.g. committing or
1619 stripping). If you are opening a transaction, get a lock as well.)
1618 stripping). If you are opening a transaction, get a lock as well.)
1620
1619
1621 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1620 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1622 'wlock' first to avoid a dead-lock hazard.'''
1621 'wlock' first to avoid a dead-lock hazard.'''
1623 l = self._currentlock(self._lockref)
1622 l = self._currentlock(self._lockref)
1624 if l is not None:
1623 if l is not None:
1625 l.lock()
1624 l.lock()
1626 return l
1625 return l
1627
1626
1628 l = self._lock(self.svfs, "lock", wait, None,
1627 l = self._lock(self.svfs, "lock", wait, None,
1629 self.invalidate, _('repository %s') % self.origroot)
1628 self.invalidate, _('repository %s') % self.origroot)
1630 self._lockref = weakref.ref(l)
1629 self._lockref = weakref.ref(l)
1631 return l
1630 return l
1632
1631
1633 def _wlockchecktransaction(self):
1632 def _wlockchecktransaction(self):
1634 if self.currenttransaction() is not None:
1633 if self.currenttransaction() is not None:
1635 raise error.LockInheritanceContractViolation(
1634 raise error.LockInheritanceContractViolation(
1636 'wlock cannot be inherited in the middle of a transaction')
1635 'wlock cannot be inherited in the middle of a transaction')
1637
1636
1638 def wlock(self, wait=True):
1637 def wlock(self, wait=True):
1639 '''Lock the non-store parts of the repository (everything under
1638 '''Lock the non-store parts of the repository (everything under
1640 .hg except .hg/store) and return a weak reference to the lock.
1639 .hg except .hg/store) and return a weak reference to the lock.
1641
1640
1642 Use this before modifying files in .hg.
1641 Use this before modifying files in .hg.
1643
1642
1644 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1643 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1645 'wlock' first to avoid a dead-lock hazard.'''
1644 'wlock' first to avoid a dead-lock hazard.'''
1646 l = self._wlockref and self._wlockref()
1645 l = self._wlockref and self._wlockref()
1647 if l is not None and l.held:
1646 if l is not None and l.held:
1648 l.lock()
1647 l.lock()
1649 return l
1648 return l
1650
1649
1651 # We do not need to check for non-waiting lock acquisition. Such
1650 # We do not need to check for non-waiting lock acquisition. Such
1652 # acquisition would not cause dead-lock as they would just fail.
1651 # acquisition would not cause dead-lock as they would just fail.
1653 if wait and (self.ui.configbool('devel', 'all-warnings')
1652 if wait and (self.ui.configbool('devel', 'all-warnings')
1654 or self.ui.configbool('devel', 'check-locks')):
1653 or self.ui.configbool('devel', 'check-locks')):
1655 if self._currentlock(self._lockref) is not None:
1654 if self._currentlock(self._lockref) is not None:
1656 self.ui.develwarn('"wlock" acquired after "lock"')
1655 self.ui.develwarn('"wlock" acquired after "lock"')
1657
1656
1658 def unlock():
1657 def unlock():
1659 if self.dirstate.pendingparentchange():
1658 if self.dirstate.pendingparentchange():
1660 self.dirstate.invalidate()
1659 self.dirstate.invalidate()
1661 else:
1660 else:
1662 self.dirstate.write(None)
1661 self.dirstate.write(None)
1663
1662
1664 self._filecache['dirstate'].refresh()
1663 self._filecache['dirstate'].refresh()
1665
1664
1666 l = self._lock(self.vfs, "wlock", wait, unlock,
1665 l = self._lock(self.vfs, "wlock", wait, unlock,
1667 self.invalidatedirstate, _('working directory of %s') %
1666 self.invalidatedirstate, _('working directory of %s') %
1668 self.origroot,
1667 self.origroot,
1669 inheritchecker=self._wlockchecktransaction,
1668 inheritchecker=self._wlockchecktransaction,
1670 parentenvvar='HG_WLOCK_LOCKER')
1669 parentenvvar='HG_WLOCK_LOCKER')
1671 self._wlockref = weakref.ref(l)
1670 self._wlockref = weakref.ref(l)
1672 return l
1671 return l
1673
1672
1674 def _currentlock(self, lockref):
1673 def _currentlock(self, lockref):
1675 """Returns the lock if it's held, or None if it's not."""
1674 """Returns the lock if it's held, or None if it's not."""
1676 if lockref is None:
1675 if lockref is None:
1677 return None
1676 return None
1678 l = lockref()
1677 l = lockref()
1679 if l is None or not l.held:
1678 if l is None or not l.held:
1680 return None
1679 return None
1681 return l
1680 return l
1682
1681
1683 def currentwlock(self):
1682 def currentwlock(self):
1684 """Returns the wlock if it's held, or None if it's not."""
1683 """Returns the wlock if it's held, or None if it's not."""
1685 return self._currentlock(self._wlockref)
1684 return self._currentlock(self._wlockref)
1686
1685
1687 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1686 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1688 """
1687 """
1689 commit an individual file as part of a larger transaction
1688 commit an individual file as part of a larger transaction
1690 """
1689 """
1691
1690
1692 fname = fctx.path()
1691 fname = fctx.path()
1693 fparent1 = manifest1.get(fname, nullid)
1692 fparent1 = manifest1.get(fname, nullid)
1694 fparent2 = manifest2.get(fname, nullid)
1693 fparent2 = manifest2.get(fname, nullid)
1695 if isinstance(fctx, context.filectx):
1694 if isinstance(fctx, context.filectx):
1696 node = fctx.filenode()
1695 node = fctx.filenode()
1697 if node in [fparent1, fparent2]:
1696 if node in [fparent1, fparent2]:
1698 self.ui.debug('reusing %s filelog entry\n' % fname)
1697 self.ui.debug('reusing %s filelog entry\n' % fname)
1699 if manifest1.flags(fname) != fctx.flags():
1698 if manifest1.flags(fname) != fctx.flags():
1700 changelist.append(fname)
1699 changelist.append(fname)
1701 return node
1700 return node
1702
1701
1703 flog = self.file(fname)
1702 flog = self.file(fname)
1704 meta = {}
1703 meta = {}
1705 copy = fctx.renamed()
1704 copy = fctx.renamed()
1706 if copy and copy[0] != fname:
1705 if copy and copy[0] != fname:
1707 # Mark the new revision of this file as a copy of another
1706 # Mark the new revision of this file as a copy of another
1708 # file. This copy data will effectively act as a parent
1707 # file. This copy data will effectively act as a parent
1709 # of this new revision. If this is a merge, the first
1708 # of this new revision. If this is a merge, the first
1710 # parent will be the nullid (meaning "look up the copy data")
1709 # parent will be the nullid (meaning "look up the copy data")
1711 # and the second one will be the other parent. For example:
1710 # and the second one will be the other parent. For example:
1712 #
1711 #
1713 # 0 --- 1 --- 3 rev1 changes file foo
1712 # 0 --- 1 --- 3 rev1 changes file foo
1714 # \ / rev2 renames foo to bar and changes it
1713 # \ / rev2 renames foo to bar and changes it
1715 # \- 2 -/ rev3 should have bar with all changes and
1714 # \- 2 -/ rev3 should have bar with all changes and
1716 # should record that bar descends from
1715 # should record that bar descends from
1717 # bar in rev2 and foo in rev1
1716 # bar in rev2 and foo in rev1
1718 #
1717 #
1719 # this allows this merge to succeed:
1718 # this allows this merge to succeed:
1720 #
1719 #
1721 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1720 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1722 # \ / merging rev3 and rev4 should use bar@rev2
1721 # \ / merging rev3 and rev4 should use bar@rev2
1723 # \- 2 --- 4 as the merge base
1722 # \- 2 --- 4 as the merge base
1724 #
1723 #
1725
1724
1726 cfname = copy[0]
1725 cfname = copy[0]
1727 crev = manifest1.get(cfname)
1726 crev = manifest1.get(cfname)
1728 newfparent = fparent2
1727 newfparent = fparent2
1729
1728
1730 if manifest2: # branch merge
1729 if manifest2: # branch merge
1731 if fparent2 == nullid or crev is None: # copied on remote side
1730 if fparent2 == nullid or crev is None: # copied on remote side
1732 if cfname in manifest2:
1731 if cfname in manifest2:
1733 crev = manifest2[cfname]
1732 crev = manifest2[cfname]
1734 newfparent = fparent1
1733 newfparent = fparent1
1735
1734
1736 # Here, we used to search backwards through history to try to find
1735 # Here, we used to search backwards through history to try to find
1737 # where the file copy came from if the source of a copy was not in
1736 # where the file copy came from if the source of a copy was not in
1738 # the parent directory. However, this doesn't actually make sense to
1737 # the parent directory. However, this doesn't actually make sense to
1739 # do (what does a copy from something not in your working copy even
1738 # do (what does a copy from something not in your working copy even
1740 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1739 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1741 # the user that copy information was dropped, so if they didn't
1740 # the user that copy information was dropped, so if they didn't
1742 # expect this outcome it can be fixed, but this is the correct
1741 # expect this outcome it can be fixed, but this is the correct
1743 # behavior in this circumstance.
1742 # behavior in this circumstance.
1744
1743
1745 if crev:
1744 if crev:
1746 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1745 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1747 meta["copy"] = cfname
1746 meta["copy"] = cfname
1748 meta["copyrev"] = hex(crev)
1747 meta["copyrev"] = hex(crev)
1749 fparent1, fparent2 = nullid, newfparent
1748 fparent1, fparent2 = nullid, newfparent
1750 else:
1749 else:
1751 self.ui.warn(_("warning: can't find ancestor for '%s' "
1750 self.ui.warn(_("warning: can't find ancestor for '%s' "
1752 "copied from '%s'!\n") % (fname, cfname))
1751 "copied from '%s'!\n") % (fname, cfname))
1753
1752
1754 elif fparent1 == nullid:
1753 elif fparent1 == nullid:
1755 fparent1, fparent2 = fparent2, nullid
1754 fparent1, fparent2 = fparent2, nullid
1756 elif fparent2 != nullid:
1755 elif fparent2 != nullid:
1757 # is one parent an ancestor of the other?
1756 # is one parent an ancestor of the other?
1758 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1757 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1759 if fparent1 in fparentancestors:
1758 if fparent1 in fparentancestors:
1760 fparent1, fparent2 = fparent2, nullid
1759 fparent1, fparent2 = fparent2, nullid
1761 elif fparent2 in fparentancestors:
1760 elif fparent2 in fparentancestors:
1762 fparent2 = nullid
1761 fparent2 = nullid
1763
1762
1764 # is the file changed?
1763 # is the file changed?
1765 text = fctx.data()
1764 text = fctx.data()
1766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1767 changelist.append(fname)
1766 changelist.append(fname)
1768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1769 # are just the flags changed during merge?
1768 # are just the flags changed during merge?
1770 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1769 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1771 changelist.append(fname)
1770 changelist.append(fname)
1772
1771
1773 return fparent1
1772 return fparent1
1774
1773
1775 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1774 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1776 """check for commit arguments that aren't committable"""
1775 """check for commit arguments that aren't committable"""
1777 if match.isexact() or match.prefix():
1776 if match.isexact() or match.prefix():
1778 matched = set(status.modified + status.added + status.removed)
1777 matched = set(status.modified + status.added + status.removed)
1779
1778
1780 for f in match.files():
1779 for f in match.files():
1781 f = self.dirstate.normalize(f)
1780 f = self.dirstate.normalize(f)
1782 if f == '.' or f in matched or f in wctx.substate:
1781 if f == '.' or f in matched or f in wctx.substate:
1783 continue
1782 continue
1784 if f in status.deleted:
1783 if f in status.deleted:
1785 fail(f, _('file not found!'))
1784 fail(f, _('file not found!'))
1786 if f in vdirs: # visited directory
1785 if f in vdirs: # visited directory
1787 d = f + '/'
1786 d = f + '/'
1788 for mf in matched:
1787 for mf in matched:
1789 if mf.startswith(d):
1788 if mf.startswith(d):
1790 break
1789 break
1791 else:
1790 else:
1792 fail(f, _("no match under directory!"))
1791 fail(f, _("no match under directory!"))
1793 elif f not in self.dirstate:
1792 elif f not in self.dirstate:
1794 fail(f, _("file not tracked!"))
1793 fail(f, _("file not tracked!"))
1795
1794
1796 @unfilteredmethod
1795 @unfilteredmethod
1797 def commit(self, text="", user=None, date=None, match=None, force=False,
1796 def commit(self, text="", user=None, date=None, match=None, force=False,
1798 editor=False, extra=None):
1797 editor=False, extra=None):
1799 """Add a new revision to current repository.
1798 """Add a new revision to current repository.
1800
1799
1801 Revision information is gathered from the working directory,
1800 Revision information is gathered from the working directory,
1802 match can be used to filter the committed files. If editor is
1801 match can be used to filter the committed files. If editor is
1803 supplied, it is called to get a commit message.
1802 supplied, it is called to get a commit message.
1804 """
1803 """
1805 if extra is None:
1804 if extra is None:
1806 extra = {}
1805 extra = {}
1807
1806
1808 def fail(f, msg):
1807 def fail(f, msg):
1809 raise error.Abort('%s: %s' % (f, msg))
1808 raise error.Abort('%s: %s' % (f, msg))
1810
1809
1811 if not match:
1810 if not match:
1812 match = matchmod.always(self.root, '')
1811 match = matchmod.always(self.root, '')
1813
1812
1814 if not force:
1813 if not force:
1815 vdirs = []
1814 vdirs = []
1816 match.explicitdir = vdirs.append
1815 match.explicitdir = vdirs.append
1817 match.bad = fail
1816 match.bad = fail
1818
1817
1819 wlock = lock = tr = None
1818 wlock = lock = tr = None
1820 try:
1819 try:
1821 wlock = self.wlock()
1820 wlock = self.wlock()
1822 lock = self.lock() # for recent changelog (see issue4368)
1821 lock = self.lock() # for recent changelog (see issue4368)
1823
1822
1824 wctx = self[None]
1823 wctx = self[None]
1825 merge = len(wctx.parents()) > 1
1824 merge = len(wctx.parents()) > 1
1826
1825
1827 if not force and merge and not match.always():
1826 if not force and merge and not match.always():
1828 raise error.Abort(_('cannot partially commit a merge '
1827 raise error.Abort(_('cannot partially commit a merge '
1829 '(do not specify files or patterns)'))
1828 '(do not specify files or patterns)'))
1830
1829
1831 status = self.status(match=match, clean=force)
1830 status = self.status(match=match, clean=force)
1832 if force:
1831 if force:
1833 status.modified.extend(status.clean) # mq may commit clean files
1832 status.modified.extend(status.clean) # mq may commit clean files
1834
1833
1835 # check subrepos
1834 # check subrepos
1836 subs, commitsubs, newstate = subrepoutil.precommit(
1835 subs, commitsubs, newstate = subrepoutil.precommit(
1837 self.ui, wctx, status, match, force=force)
1836 self.ui, wctx, status, match, force=force)
1838
1837
1839 # make sure all explicit patterns are matched
1838 # make sure all explicit patterns are matched
1840 if not force:
1839 if not force:
1841 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1840 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1842
1841
1843 cctx = context.workingcommitctx(self, status,
1842 cctx = context.workingcommitctx(self, status,
1844 text, user, date, extra)
1843 text, user, date, extra)
1845
1844
1846 # internal config: ui.allowemptycommit
1845 # internal config: ui.allowemptycommit
1847 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1846 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1848 or extra.get('close') or merge or cctx.files()
1847 or extra.get('close') or merge or cctx.files()
1849 or self.ui.configbool('ui', 'allowemptycommit'))
1848 or self.ui.configbool('ui', 'allowemptycommit'))
1850 if not allowemptycommit:
1849 if not allowemptycommit:
1851 return None
1850 return None
1852
1851
1853 if merge and cctx.deleted():
1852 if merge and cctx.deleted():
1854 raise error.Abort(_("cannot commit merge with missing files"))
1853 raise error.Abort(_("cannot commit merge with missing files"))
1855
1854
1856 ms = mergemod.mergestate.read(self)
1855 ms = mergemod.mergestate.read(self)
1857 mergeutil.checkunresolved(ms)
1856 mergeutil.checkunresolved(ms)
1858
1857
1859 if editor:
1858 if editor:
1860 cctx._text = editor(self, cctx, subs)
1859 cctx._text = editor(self, cctx, subs)
1861 edited = (text != cctx._text)
1860 edited = (text != cctx._text)
1862
1861
1863 # Save commit message in case this transaction gets rolled back
1862 # Save commit message in case this transaction gets rolled back
1864 # (e.g. by a pretxncommit hook). Leave the content alone on
1863 # (e.g. by a pretxncommit hook). Leave the content alone on
1865 # the assumption that the user will use the same editor again.
1864 # the assumption that the user will use the same editor again.
1866 msgfn = self.savecommitmessage(cctx._text)
1865 msgfn = self.savecommitmessage(cctx._text)
1867
1866
1868 # commit subs and write new state
1867 # commit subs and write new state
1869 if subs:
1868 if subs:
1870 for s in sorted(commitsubs):
1869 for s in sorted(commitsubs):
1871 sub = wctx.sub(s)
1870 sub = wctx.sub(s)
1872 self.ui.status(_('committing subrepository %s\n') %
1871 self.ui.status(_('committing subrepository %s\n') %
1873 subrepoutil.subrelpath(sub))
1872 subrepoutil.subrelpath(sub))
1874 sr = sub.commit(cctx._text, user, date)
1873 sr = sub.commit(cctx._text, user, date)
1875 newstate[s] = (newstate[s][0], sr)
1874 newstate[s] = (newstate[s][0], sr)
1876 subrepoutil.writestate(self, newstate)
1875 subrepoutil.writestate(self, newstate)
1877
1876
1878 p1, p2 = self.dirstate.parents()
1877 p1, p2 = self.dirstate.parents()
1879 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1878 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1880 try:
1879 try:
1881 self.hook("precommit", throw=True, parent1=hookp1,
1880 self.hook("precommit", throw=True, parent1=hookp1,
1882 parent2=hookp2)
1881 parent2=hookp2)
1883 tr = self.transaction('commit')
1882 tr = self.transaction('commit')
1884 ret = self.commitctx(cctx, True)
1883 ret = self.commitctx(cctx, True)
1885 except: # re-raises
1884 except: # re-raises
1886 if edited:
1885 if edited:
1887 self.ui.write(
1886 self.ui.write(
1888 _('note: commit message saved in %s\n') % msgfn)
1887 _('note: commit message saved in %s\n') % msgfn)
1889 raise
1888 raise
1890 # update bookmarks, dirstate and mergestate
1889 # update bookmarks, dirstate and mergestate
1891 bookmarks.update(self, [p1, p2], ret)
1890 bookmarks.update(self, [p1, p2], ret)
1892 cctx.markcommitted(ret)
1891 cctx.markcommitted(ret)
1893 ms.reset()
1892 ms.reset()
1894 tr.close()
1893 tr.close()
1895
1894
1896 finally:
1895 finally:
1897 lockmod.release(tr, lock, wlock)
1896 lockmod.release(tr, lock, wlock)
1898
1897
1899 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1898 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1900 # hack for command that use a temporary commit (eg: histedit)
1899 # hack for command that use a temporary commit (eg: histedit)
1901 # temporary commit got stripped before hook release
1900 # temporary commit got stripped before hook release
1902 if self.changelog.hasnode(ret):
1901 if self.changelog.hasnode(ret):
1903 self.hook("commit", node=node, parent1=parent1,
1902 self.hook("commit", node=node, parent1=parent1,
1904 parent2=parent2)
1903 parent2=parent2)
1905 self._afterlock(commithook)
1904 self._afterlock(commithook)
1906 return ret
1905 return ret
1907
1906
1908 @unfilteredmethod
1907 @unfilteredmethod
1909 def commitctx(self, ctx, error=False):
1908 def commitctx(self, ctx, error=False):
1910 """Add a new revision to current repository.
1909 """Add a new revision to current repository.
1911 Revision information is passed via the context argument.
1910 Revision information is passed via the context argument.
1912 """
1911 """
1913
1912
1914 tr = None
1913 tr = None
1915 p1, p2 = ctx.p1(), ctx.p2()
1914 p1, p2 = ctx.p1(), ctx.p2()
1916 user = ctx.user()
1915 user = ctx.user()
1917
1916
1918 lock = self.lock()
1917 lock = self.lock()
1919 try:
1918 try:
1920 tr = self.transaction("commit")
1919 tr = self.transaction("commit")
1921 trp = weakref.proxy(tr)
1920 trp = weakref.proxy(tr)
1922
1921
1923 if ctx.manifestnode():
1922 if ctx.manifestnode():
1924 # reuse an existing manifest revision
1923 # reuse an existing manifest revision
1925 mn = ctx.manifestnode()
1924 mn = ctx.manifestnode()
1926 files = ctx.files()
1925 files = ctx.files()
1927 elif ctx.files():
1926 elif ctx.files():
1928 m1ctx = p1.manifestctx()
1927 m1ctx = p1.manifestctx()
1929 m2ctx = p2.manifestctx()
1928 m2ctx = p2.manifestctx()
1930 mctx = m1ctx.copy()
1929 mctx = m1ctx.copy()
1931
1930
1932 m = mctx.read()
1931 m = mctx.read()
1933 m1 = m1ctx.read()
1932 m1 = m1ctx.read()
1934 m2 = m2ctx.read()
1933 m2 = m2ctx.read()
1935
1934
1936 # check in files
1935 # check in files
1937 added = []
1936 added = []
1938 changed = []
1937 changed = []
1939 removed = list(ctx.removed())
1938 removed = list(ctx.removed())
1940 linkrev = len(self)
1939 linkrev = len(self)
1941 self.ui.note(_("committing files:\n"))
1940 self.ui.note(_("committing files:\n"))
1942 for f in sorted(ctx.modified() + ctx.added()):
1941 for f in sorted(ctx.modified() + ctx.added()):
1943 self.ui.note(f + "\n")
1942 self.ui.note(f + "\n")
1944 try:
1943 try:
1945 fctx = ctx[f]
1944 fctx = ctx[f]
1946 if fctx is None:
1945 if fctx is None:
1947 removed.append(f)
1946 removed.append(f)
1948 else:
1947 else:
1949 added.append(f)
1948 added.append(f)
1950 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1949 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1951 trp, changed)
1950 trp, changed)
1952 m.setflag(f, fctx.flags())
1951 m.setflag(f, fctx.flags())
1953 except OSError as inst:
1952 except OSError as inst:
1954 self.ui.warn(_("trouble committing %s!\n") % f)
1953 self.ui.warn(_("trouble committing %s!\n") % f)
1955 raise
1954 raise
1956 except IOError as inst:
1955 except IOError as inst:
1957 errcode = getattr(inst, 'errno', errno.ENOENT)
1956 errcode = getattr(inst, 'errno', errno.ENOENT)
1958 if error or errcode and errcode != errno.ENOENT:
1957 if error or errcode and errcode != errno.ENOENT:
1959 self.ui.warn(_("trouble committing %s!\n") % f)
1958 self.ui.warn(_("trouble committing %s!\n") % f)
1960 raise
1959 raise
1961
1960
1962 # update manifest
1961 # update manifest
1963 self.ui.note(_("committing manifest\n"))
1962 self.ui.note(_("committing manifest\n"))
1964 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1963 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1965 drop = [f for f in removed if f in m]
1964 drop = [f for f in removed if f in m]
1966 for f in drop:
1965 for f in drop:
1967 del m[f]
1966 del m[f]
1968 mn = mctx.write(trp, linkrev,
1967 mn = mctx.write(trp, linkrev,
1969 p1.manifestnode(), p2.manifestnode(),
1968 p1.manifestnode(), p2.manifestnode(),
1970 added, drop)
1969 added, drop)
1971 files = changed + removed
1970 files = changed + removed
1972 else:
1971 else:
1973 mn = p1.manifestnode()
1972 mn = p1.manifestnode()
1974 files = []
1973 files = []
1975
1974
1976 # update changelog
1975 # update changelog
1977 self.ui.note(_("committing changelog\n"))
1976 self.ui.note(_("committing changelog\n"))
1978 self.changelog.delayupdate(tr)
1977 self.changelog.delayupdate(tr)
1979 n = self.changelog.add(mn, files, ctx.description(),
1978 n = self.changelog.add(mn, files, ctx.description(),
1980 trp, p1.node(), p2.node(),
1979 trp, p1.node(), p2.node(),
1981 user, ctx.date(), ctx.extra().copy())
1980 user, ctx.date(), ctx.extra().copy())
1982 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1981 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1983 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1982 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1984 parent2=xp2)
1983 parent2=xp2)
1985 # set the new commit is proper phase
1984 # set the new commit is proper phase
1986 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1985 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1987 if targetphase:
1986 if targetphase:
1988 # retract boundary do not alter parent changeset.
1987 # retract boundary do not alter parent changeset.
1989 # if a parent have higher the resulting phase will
1988 # if a parent have higher the resulting phase will
1990 # be compliant anyway
1989 # be compliant anyway
1991 #
1990 #
1992 # if minimal phase was 0 we don't need to retract anything
1991 # if minimal phase was 0 we don't need to retract anything
1993 phases.registernew(self, tr, targetphase, [n])
1992 phases.registernew(self, tr, targetphase, [n])
1994 tr.close()
1993 tr.close()
1995 return n
1994 return n
1996 finally:
1995 finally:
1997 if tr:
1996 if tr:
1998 tr.release()
1997 tr.release()
1999 lock.release()
1998 lock.release()
2000
1999
2001 @unfilteredmethod
2000 @unfilteredmethod
2002 def destroying(self):
2001 def destroying(self):
2003 '''Inform the repository that nodes are about to be destroyed.
2002 '''Inform the repository that nodes are about to be destroyed.
2004 Intended for use by strip and rollback, so there's a common
2003 Intended for use by strip and rollback, so there's a common
2005 place for anything that has to be done before destroying history.
2004 place for anything that has to be done before destroying history.
2006
2005
2007 This is mostly useful for saving state that is in memory and waiting
2006 This is mostly useful for saving state that is in memory and waiting
2008 to be flushed when the current lock is released. Because a call to
2007 to be flushed when the current lock is released. Because a call to
2009 destroyed is imminent, the repo will be invalidated causing those
2008 destroyed is imminent, the repo will be invalidated causing those
2010 changes to stay in memory (waiting for the next unlock), or vanish
2009 changes to stay in memory (waiting for the next unlock), or vanish
2011 completely.
2010 completely.
2012 '''
2011 '''
2013 # When using the same lock to commit and strip, the phasecache is left
2012 # When using the same lock to commit and strip, the phasecache is left
2014 # dirty after committing. Then when we strip, the repo is invalidated,
2013 # dirty after committing. Then when we strip, the repo is invalidated,
2015 # causing those changes to disappear.
2014 # causing those changes to disappear.
2016 if '_phasecache' in vars(self):
2015 if '_phasecache' in vars(self):
2017 self._phasecache.write()
2016 self._phasecache.write()
2018
2017
2019 @unfilteredmethod
2018 @unfilteredmethod
2020 def destroyed(self):
2019 def destroyed(self):
2021 '''Inform the repository that nodes have been destroyed.
2020 '''Inform the repository that nodes have been destroyed.
2022 Intended for use by strip and rollback, so there's a common
2021 Intended for use by strip and rollback, so there's a common
2023 place for anything that has to be done after destroying history.
2022 place for anything that has to be done after destroying history.
2024 '''
2023 '''
2025 # When one tries to:
2024 # When one tries to:
2026 # 1) destroy nodes thus calling this method (e.g. strip)
2025 # 1) destroy nodes thus calling this method (e.g. strip)
2027 # 2) use phasecache somewhere (e.g. commit)
2026 # 2) use phasecache somewhere (e.g. commit)
2028 #
2027 #
2029 # then 2) will fail because the phasecache contains nodes that were
2028 # then 2) will fail because the phasecache contains nodes that were
2030 # removed. We can either remove phasecache from the filecache,
2029 # removed. We can either remove phasecache from the filecache,
2031 # causing it to reload next time it is accessed, or simply filter
2030 # causing it to reload next time it is accessed, or simply filter
2032 # the removed nodes now and write the updated cache.
2031 # the removed nodes now and write the updated cache.
2033 self._phasecache.filterunknown(self)
2032 self._phasecache.filterunknown(self)
2034 self._phasecache.write()
2033 self._phasecache.write()
2035
2034
2036 # refresh all repository caches
2035 # refresh all repository caches
2037 self.updatecaches()
2036 self.updatecaches()
2038
2037
2039 # Ensure the persistent tag cache is updated. Doing it now
2038 # Ensure the persistent tag cache is updated. Doing it now
2040 # means that the tag cache only has to worry about destroyed
2039 # means that the tag cache only has to worry about destroyed
2041 # heads immediately after a strip/rollback. That in turn
2040 # heads immediately after a strip/rollback. That in turn
2042 # guarantees that "cachetip == currenttip" (comparing both rev
2041 # guarantees that "cachetip == currenttip" (comparing both rev
2043 # and node) always means no nodes have been added or destroyed.
2042 # and node) always means no nodes have been added or destroyed.
2044
2043
2045 # XXX this is suboptimal when qrefresh'ing: we strip the current
2044 # XXX this is suboptimal when qrefresh'ing: we strip the current
2046 # head, refresh the tag cache, then immediately add a new head.
2045 # head, refresh the tag cache, then immediately add a new head.
2047 # But I think doing it this way is necessary for the "instant
2046 # But I think doing it this way is necessary for the "instant
2048 # tag cache retrieval" case to work.
2047 # tag cache retrieval" case to work.
2049 self.invalidate()
2048 self.invalidate()
2050
2049
2051 def status(self, node1='.', node2=None, match=None,
2050 def status(self, node1='.', node2=None, match=None,
2052 ignored=False, clean=False, unknown=False,
2051 ignored=False, clean=False, unknown=False,
2053 listsubrepos=False):
2052 listsubrepos=False):
2054 '''a convenience method that calls node1.status(node2)'''
2053 '''a convenience method that calls node1.status(node2)'''
2055 return self[node1].status(node2, match, ignored, clean, unknown,
2054 return self[node1].status(node2, match, ignored, clean, unknown,
2056 listsubrepos)
2055 listsubrepos)
2057
2056
2058 def addpostdsstatus(self, ps):
2057 def addpostdsstatus(self, ps):
2059 """Add a callback to run within the wlock, at the point at which status
2058 """Add a callback to run within the wlock, at the point at which status
2060 fixups happen.
2059 fixups happen.
2061
2060
2062 On status completion, callback(wctx, status) will be called with the
2061 On status completion, callback(wctx, status) will be called with the
2063 wlock held, unless the dirstate has changed from underneath or the wlock
2062 wlock held, unless the dirstate has changed from underneath or the wlock
2064 couldn't be grabbed.
2063 couldn't be grabbed.
2065
2064
2066 Callbacks should not capture and use a cached copy of the dirstate --
2065 Callbacks should not capture and use a cached copy of the dirstate --
2067 it might change in the meanwhile. Instead, they should access the
2066 it might change in the meanwhile. Instead, they should access the
2068 dirstate via wctx.repo().dirstate.
2067 dirstate via wctx.repo().dirstate.
2069
2068
2070 This list is emptied out after each status run -- extensions should
2069 This list is emptied out after each status run -- extensions should
2071 make sure it adds to this list each time dirstate.status is called.
2070 make sure it adds to this list each time dirstate.status is called.
2072 Extensions should also make sure they don't call this for statuses
2071 Extensions should also make sure they don't call this for statuses
2073 that don't involve the dirstate.
2072 that don't involve the dirstate.
2074 """
2073 """
2075
2074
2076 # The list is located here for uniqueness reasons -- it is actually
2075 # The list is located here for uniqueness reasons -- it is actually
2077 # managed by the workingctx, but that isn't unique per-repo.
2076 # managed by the workingctx, but that isn't unique per-repo.
2078 self._postdsstatus.append(ps)
2077 self._postdsstatus.append(ps)
2079
2078
2080 def postdsstatus(self):
2079 def postdsstatus(self):
2081 """Used by workingctx to get the list of post-dirstate-status hooks."""
2080 """Used by workingctx to get the list of post-dirstate-status hooks."""
2082 return self._postdsstatus
2081 return self._postdsstatus
2083
2082
2084 def clearpostdsstatus(self):
2083 def clearpostdsstatus(self):
2085 """Used by workingctx to clear post-dirstate-status hooks."""
2084 """Used by workingctx to clear post-dirstate-status hooks."""
2086 del self._postdsstatus[:]
2085 del self._postdsstatus[:]
2087
2086
2088 def heads(self, start=None):
2087 def heads(self, start=None):
2089 if start is None:
2088 if start is None:
2090 cl = self.changelog
2089 cl = self.changelog
2091 headrevs = reversed(cl.headrevs())
2090 headrevs = reversed(cl.headrevs())
2092 return [cl.node(rev) for rev in headrevs]
2091 return [cl.node(rev) for rev in headrevs]
2093
2092
2094 heads = self.changelog.heads(start)
2093 heads = self.changelog.heads(start)
2095 # sort the output in rev descending order
2094 # sort the output in rev descending order
2096 return sorted(heads, key=self.changelog.rev, reverse=True)
2095 return sorted(heads, key=self.changelog.rev, reverse=True)
2097
2096
2098 def branchheads(self, branch=None, start=None, closed=False):
2097 def branchheads(self, branch=None, start=None, closed=False):
2099 '''return a (possibly filtered) list of heads for the given branch
2098 '''return a (possibly filtered) list of heads for the given branch
2100
2099
2101 Heads are returned in topological order, from newest to oldest.
2100 Heads are returned in topological order, from newest to oldest.
2102 If branch is None, use the dirstate branch.
2101 If branch is None, use the dirstate branch.
2103 If start is not None, return only heads reachable from start.
2102 If start is not None, return only heads reachable from start.
2104 If closed is True, return heads that are marked as closed as well.
2103 If closed is True, return heads that are marked as closed as well.
2105 '''
2104 '''
2106 if branch is None:
2105 if branch is None:
2107 branch = self[None].branch()
2106 branch = self[None].branch()
2108 branches = self.branchmap()
2107 branches = self.branchmap()
2109 if branch not in branches:
2108 if branch not in branches:
2110 return []
2109 return []
2111 # the cache returns heads ordered lowest to highest
2110 # the cache returns heads ordered lowest to highest
2112 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2111 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2113 if start is not None:
2112 if start is not None:
2114 # filter out the heads that cannot be reached from startrev
2113 # filter out the heads that cannot be reached from startrev
2115 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2114 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2116 bheads = [h for h in bheads if h in fbheads]
2115 bheads = [h for h in bheads if h in fbheads]
2117 return bheads
2116 return bheads
2118
2117
2119 def branches(self, nodes):
2118 def branches(self, nodes):
2120 if not nodes:
2119 if not nodes:
2121 nodes = [self.changelog.tip()]
2120 nodes = [self.changelog.tip()]
2122 b = []
2121 b = []
2123 for n in nodes:
2122 for n in nodes:
2124 t = n
2123 t = n
2125 while True:
2124 while True:
2126 p = self.changelog.parents(n)
2125 p = self.changelog.parents(n)
2127 if p[1] != nullid or p[0] == nullid:
2126 if p[1] != nullid or p[0] == nullid:
2128 b.append((t, n, p[0], p[1]))
2127 b.append((t, n, p[0], p[1]))
2129 break
2128 break
2130 n = p[0]
2129 n = p[0]
2131 return b
2130 return b
2132
2131
2133 def between(self, pairs):
2132 def between(self, pairs):
2134 r = []
2133 r = []
2135
2134
2136 for top, bottom in pairs:
2135 for top, bottom in pairs:
2137 n, l, i = top, [], 0
2136 n, l, i = top, [], 0
2138 f = 1
2137 f = 1
2139
2138
2140 while n != bottom and n != nullid:
2139 while n != bottom and n != nullid:
2141 p = self.changelog.parents(n)[0]
2140 p = self.changelog.parents(n)[0]
2142 if i == f:
2141 if i == f:
2143 l.append(n)
2142 l.append(n)
2144 f = f * 2
2143 f = f * 2
2145 n = p
2144 n = p
2146 i += 1
2145 i += 1
2147
2146
2148 r.append(l)
2147 r.append(l)
2149
2148
2150 return r
2149 return r
2151
2150
2152 def checkpush(self, pushop):
2151 def checkpush(self, pushop):
2153 """Extensions can override this function if additional checks have
2152 """Extensions can override this function if additional checks have
2154 to be performed before pushing, or call it if they override push
2153 to be performed before pushing, or call it if they override push
2155 command.
2154 command.
2156 """
2155 """
2157
2156
2158 @unfilteredpropertycache
2157 @unfilteredpropertycache
2159 def prepushoutgoinghooks(self):
2158 def prepushoutgoinghooks(self):
2160 """Return util.hooks consists of a pushop with repo, remote, outgoing
2159 """Return util.hooks consists of a pushop with repo, remote, outgoing
2161 methods, which are called before pushing changesets.
2160 methods, which are called before pushing changesets.
2162 """
2161 """
2163 return util.hooks()
2162 return util.hooks()
2164
2163
2165 def pushkey(self, namespace, key, old, new):
2164 def pushkey(self, namespace, key, old, new):
2166 try:
2165 try:
2167 tr = self.currenttransaction()
2166 tr = self.currenttransaction()
2168 hookargs = {}
2167 hookargs = {}
2169 if tr is not None:
2168 if tr is not None:
2170 hookargs.update(tr.hookargs)
2169 hookargs.update(tr.hookargs)
2171 hookargs['namespace'] = namespace
2170 hookargs['namespace'] = namespace
2172 hookargs['key'] = key
2171 hookargs['key'] = key
2173 hookargs['old'] = old
2172 hookargs['old'] = old
2174 hookargs['new'] = new
2173 hookargs['new'] = new
2175 self.hook('prepushkey', throw=True, **hookargs)
2174 self.hook('prepushkey', throw=True, **hookargs)
2176 except error.HookAbort as exc:
2175 except error.HookAbort as exc:
2177 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2176 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2178 if exc.hint:
2177 if exc.hint:
2179 self.ui.write_err(_("(%s)\n") % exc.hint)
2178 self.ui.write_err(_("(%s)\n") % exc.hint)
2180 return False
2179 return False
2181 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2180 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2182 ret = pushkey.push(self, namespace, key, old, new)
2181 ret = pushkey.push(self, namespace, key, old, new)
2183 def runhook():
2182 def runhook():
2184 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2183 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2185 ret=ret)
2184 ret=ret)
2186 self._afterlock(runhook)
2185 self._afterlock(runhook)
2187 return ret
2186 return ret
2188
2187
2189 def listkeys(self, namespace):
2188 def listkeys(self, namespace):
2190 self.hook('prelistkeys', throw=True, namespace=namespace)
2189 self.hook('prelistkeys', throw=True, namespace=namespace)
2191 self.ui.debug('listing keys for "%s"\n' % namespace)
2190 self.ui.debug('listing keys for "%s"\n' % namespace)
2192 values = pushkey.list(self, namespace)
2191 values = pushkey.list(self, namespace)
2193 self.hook('listkeys', namespace=namespace, values=values)
2192 self.hook('listkeys', namespace=namespace, values=values)
2194 return values
2193 return values
2195
2194
2196 def debugwireargs(self, one, two, three=None, four=None, five=None):
2195 def debugwireargs(self, one, two, three=None, four=None, five=None):
2197 '''used to test argument passing over the wire'''
2196 '''used to test argument passing over the wire'''
2198 return "%s %s %s %s %s" % (one, two, three, four, five)
2197 return "%s %s %s %s %s" % (one, two, three, four, five)
2199
2198
2200 def savecommitmessage(self, text):
2199 def savecommitmessage(self, text):
2201 fp = self.vfs('last-message.txt', 'wb')
2200 fp = self.vfs('last-message.txt', 'wb')
2202 try:
2201 try:
2203 fp.write(text)
2202 fp.write(text)
2204 finally:
2203 finally:
2205 fp.close()
2204 fp.close()
2206 return self.pathto(fp.name[len(self.root) + 1:])
2205 return self.pathto(fp.name[len(self.root) + 1:])
2207
2206
2208 # used to avoid circular references so destructors work
2207 # used to avoid circular references so destructors work
2209 def aftertrans(files):
2208 def aftertrans(files):
2210 renamefiles = [tuple(t) for t in files]
2209 renamefiles = [tuple(t) for t in files]
2211 def a():
2210 def a():
2212 for vfs, src, dest in renamefiles:
2211 for vfs, src, dest in renamefiles:
2213 # if src and dest refer to a same file, vfs.rename is a no-op,
2212 # if src and dest refer to a same file, vfs.rename is a no-op,
2214 # leaving both src and dest on disk. delete dest to make sure
2213 # leaving both src and dest on disk. delete dest to make sure
2215 # the rename couldn't be such a no-op.
2214 # the rename couldn't be such a no-op.
2216 vfs.tryunlink(dest)
2215 vfs.tryunlink(dest)
2217 try:
2216 try:
2218 vfs.rename(src, dest)
2217 vfs.rename(src, dest)
2219 except OSError: # journal file does not yet exist
2218 except OSError: # journal file does not yet exist
2220 pass
2219 pass
2221 return a
2220 return a
2222
2221
2223 def undoname(fn):
2222 def undoname(fn):
2224 base, name = os.path.split(fn)
2223 base, name = os.path.split(fn)
2225 assert name.startswith('journal')
2224 assert name.startswith('journal')
2226 return os.path.join(base, name.replace('journal', 'undo', 1))
2225 return os.path.join(base, name.replace('journal', 'undo', 1))
2227
2226
2228 def instance(ui, path, create):
2227 def instance(ui, path, create):
2229 return localrepository(ui, util.urllocalpath(path), create)
2228 return localrepository(ui, util.urllocalpath(path), create)
2230
2229
2231 def islocal(path):
2230 def islocal(path):
2232 return True
2231 return True
2233
2232
2234 def newreporequirements(repo):
2233 def newreporequirements(repo):
2235 """Determine the set of requirements for a new local repository.
2234 """Determine the set of requirements for a new local repository.
2236
2235
2237 Extensions can wrap this function to specify custom requirements for
2236 Extensions can wrap this function to specify custom requirements for
2238 new repositories.
2237 new repositories.
2239 """
2238 """
2240 ui = repo.ui
2239 ui = repo.ui
2241 requirements = {'revlogv1'}
2240 requirements = {'revlogv1'}
2242 if ui.configbool('format', 'usestore'):
2241 if ui.configbool('format', 'usestore'):
2243 requirements.add('store')
2242 requirements.add('store')
2244 if ui.configbool('format', 'usefncache'):
2243 if ui.configbool('format', 'usefncache'):
2245 requirements.add('fncache')
2244 requirements.add('fncache')
2246 if ui.configbool('format', 'dotencode'):
2245 if ui.configbool('format', 'dotencode'):
2247 requirements.add('dotencode')
2246 requirements.add('dotencode')
2248
2247
2249 compengine = ui.config('experimental', 'format.compression')
2248 compengine = ui.config('experimental', 'format.compression')
2250 if compengine not in util.compengines:
2249 if compengine not in util.compengines:
2251 raise error.Abort(_('compression engine %s defined by '
2250 raise error.Abort(_('compression engine %s defined by '
2252 'experimental.format.compression not available') %
2251 'experimental.format.compression not available') %
2253 compengine,
2252 compengine,
2254 hint=_('run "hg debuginstall" to list available '
2253 hint=_('run "hg debuginstall" to list available '
2255 'compression engines'))
2254 'compression engines'))
2256
2255
2257 # zlib is the historical default and doesn't need an explicit requirement.
2256 # zlib is the historical default and doesn't need an explicit requirement.
2258 if compengine != 'zlib':
2257 if compengine != 'zlib':
2259 requirements.add('exp-compression-%s' % compengine)
2258 requirements.add('exp-compression-%s' % compengine)
2260
2259
2261 if scmutil.gdinitconfig(ui):
2260 if scmutil.gdinitconfig(ui):
2262 requirements.add('generaldelta')
2261 requirements.add('generaldelta')
2263 if ui.configbool('experimental', 'treemanifest'):
2262 if ui.configbool('experimental', 'treemanifest'):
2264 requirements.add('treemanifest')
2263 requirements.add('treemanifest')
2265 if ui.configbool('experimental', 'manifestv2'):
2264 if ui.configbool('experimental', 'manifestv2'):
2266 requirements.add('manifestv2')
2265 requirements.add('manifestv2')
2267
2266
2268 revlogv2 = ui.config('experimental', 'revlogv2')
2267 revlogv2 = ui.config('experimental', 'revlogv2')
2269 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2268 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2270 requirements.remove('revlogv1')
2269 requirements.remove('revlogv1')
2271 # generaldelta is implied by revlogv2.
2270 # generaldelta is implied by revlogv2.
2272 requirements.discard('generaldelta')
2271 requirements.discard('generaldelta')
2273 requirements.add(REVLOGV2_REQUIREMENT)
2272 requirements.add(REVLOGV2_REQUIREMENT)
2274
2273
2275 return requirements
2274 return requirements
@@ -1,348 +1,351 b''
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import getopt
13 import getopt
14 import inspect
14 import os
15 import os
15 import shlex
16 import shlex
16 import sys
17 import sys
17
18
18 ispy3 = (sys.version_info[0] >= 3)
19 ispy3 = (sys.version_info[0] >= 3)
19 ispypy = (r'__pypy__' in sys.builtin_module_names)
20 ispypy = (r'__pypy__' in sys.builtin_module_names)
20
21
21 if not ispy3:
22 if not ispy3:
22 import cookielib
23 import cookielib
23 import cPickle as pickle
24 import cPickle as pickle
24 import httplib
25 import httplib
25 import Queue as _queue
26 import Queue as _queue
26 import SocketServer as socketserver
27 import SocketServer as socketserver
27 import xmlrpclib
28 import xmlrpclib
28 else:
29 else:
29 import http.cookiejar as cookielib
30 import http.cookiejar as cookielib
30 import http.client as httplib
31 import http.client as httplib
31 import pickle
32 import pickle
32 import queue as _queue
33 import queue as _queue
33 import socketserver
34 import socketserver
34 import xmlrpc.client as xmlrpclib
35 import xmlrpc.client as xmlrpclib
35
36
36 empty = _queue.Empty
37 empty = _queue.Empty
37 queue = _queue.Queue
38 queue = _queue.Queue
38
39
39 def identity(a):
40 def identity(a):
40 return a
41 return a
41
42
42 if ispy3:
43 if ispy3:
43 import builtins
44 import builtins
44 import functools
45 import functools
45 import io
46 import io
46 import struct
47 import struct
47
48
48 fsencode = os.fsencode
49 fsencode = os.fsencode
49 fsdecode = os.fsdecode
50 fsdecode = os.fsdecode
50 oslinesep = os.linesep.encode('ascii')
51 oslinesep = os.linesep.encode('ascii')
51 osname = os.name.encode('ascii')
52 osname = os.name.encode('ascii')
52 ospathsep = os.pathsep.encode('ascii')
53 ospathsep = os.pathsep.encode('ascii')
53 ossep = os.sep.encode('ascii')
54 ossep = os.sep.encode('ascii')
54 osaltsep = os.altsep
55 osaltsep = os.altsep
55 if osaltsep:
56 if osaltsep:
56 osaltsep = osaltsep.encode('ascii')
57 osaltsep = osaltsep.encode('ascii')
57 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
58 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
58 # returns bytes.
59 # returns bytes.
59 getcwd = os.getcwdb
60 getcwd = os.getcwdb
60 sysplatform = sys.platform.encode('ascii')
61 sysplatform = sys.platform.encode('ascii')
61 sysexecutable = sys.executable
62 sysexecutable = sys.executable
62 if sysexecutable:
63 if sysexecutable:
63 sysexecutable = os.fsencode(sysexecutable)
64 sysexecutable = os.fsencode(sysexecutable)
64 stringio = io.BytesIO
65 stringio = io.BytesIO
65 maplist = lambda *args: list(map(*args))
66 maplist = lambda *args: list(map(*args))
66 ziplist = lambda *args: list(zip(*args))
67 ziplist = lambda *args: list(zip(*args))
67 rawinput = input
68 rawinput = input
69 getargspec = inspect.getfullargspec
68
70
69 # TODO: .buffer might not exist if std streams were replaced; we'll need
71 # TODO: .buffer might not exist if std streams were replaced; we'll need
70 # a silly wrapper to make a bytes stream backed by a unicode one.
72 # a silly wrapper to make a bytes stream backed by a unicode one.
71 stdin = sys.stdin.buffer
73 stdin = sys.stdin.buffer
72 stdout = sys.stdout.buffer
74 stdout = sys.stdout.buffer
73 stderr = sys.stderr.buffer
75 stderr = sys.stderr.buffer
74
76
75 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
77 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
76 # we can use os.fsencode() to get back bytes argv.
78 # we can use os.fsencode() to get back bytes argv.
77 #
79 #
78 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
80 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
79 #
81 #
80 # TODO: On Windows, the native argv is wchar_t, so we'll need a different
82 # TODO: On Windows, the native argv is wchar_t, so we'll need a different
81 # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
83 # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
82 if getattr(sys, 'argv', None) is not None:
84 if getattr(sys, 'argv', None) is not None:
83 sysargv = list(map(os.fsencode, sys.argv))
85 sysargv = list(map(os.fsencode, sys.argv))
84
86
85 bytechr = struct.Struct('>B').pack
87 bytechr = struct.Struct('>B').pack
86
88
87 class bytestr(bytes):
89 class bytestr(bytes):
88 """A bytes which mostly acts as a Python 2 str
90 """A bytes which mostly acts as a Python 2 str
89
91
90 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
92 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
91 ('', 'foo', 'ascii', '1')
93 ('', 'foo', 'ascii', '1')
92 >>> s = bytestr(b'foo')
94 >>> s = bytestr(b'foo')
93 >>> assert s is bytestr(s)
95 >>> assert s is bytestr(s)
94
96
95 __bytes__() should be called if provided:
97 __bytes__() should be called if provided:
96
98
97 >>> class bytesable(object):
99 >>> class bytesable(object):
98 ... def __bytes__(self):
100 ... def __bytes__(self):
99 ... return b'bytes'
101 ... return b'bytes'
100 >>> bytestr(bytesable())
102 >>> bytestr(bytesable())
101 'bytes'
103 'bytes'
102
104
103 There's no implicit conversion from non-ascii str as its encoding is
105 There's no implicit conversion from non-ascii str as its encoding is
104 unknown:
106 unknown:
105
107
106 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
108 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
107 Traceback (most recent call last):
109 Traceback (most recent call last):
108 ...
110 ...
109 UnicodeEncodeError: ...
111 UnicodeEncodeError: ...
110
112
111 Comparison between bytestr and bytes should work:
113 Comparison between bytestr and bytes should work:
112
114
113 >>> assert bytestr(b'foo') == b'foo'
115 >>> assert bytestr(b'foo') == b'foo'
114 >>> assert b'foo' == bytestr(b'foo')
116 >>> assert b'foo' == bytestr(b'foo')
115 >>> assert b'f' in bytestr(b'foo')
117 >>> assert b'f' in bytestr(b'foo')
116 >>> assert bytestr(b'f') in b'foo'
118 >>> assert bytestr(b'f') in b'foo'
117
119
118 Sliced elements should be bytes, not integer:
120 Sliced elements should be bytes, not integer:
119
121
120 >>> s[1], s[:2]
122 >>> s[1], s[:2]
121 (b'o', b'fo')
123 (b'o', b'fo')
122 >>> list(s), list(reversed(s))
124 >>> list(s), list(reversed(s))
123 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
125 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
124
126
125 As bytestr type isn't propagated across operations, you need to cast
127 As bytestr type isn't propagated across operations, you need to cast
126 bytes to bytestr explicitly:
128 bytes to bytestr explicitly:
127
129
128 >>> s = bytestr(b'foo').upper()
130 >>> s = bytestr(b'foo').upper()
129 >>> t = bytestr(s)
131 >>> t = bytestr(s)
130 >>> s[0], t[0]
132 >>> s[0], t[0]
131 (70, b'F')
133 (70, b'F')
132
134
133 Be careful to not pass a bytestr object to a function which expects
135 Be careful to not pass a bytestr object to a function which expects
134 bytearray-like behavior.
136 bytearray-like behavior.
135
137
136 >>> t = bytes(t) # cast to bytes
138 >>> t = bytes(t) # cast to bytes
137 >>> assert type(t) is bytes
139 >>> assert type(t) is bytes
138 """
140 """
139
141
140 def __new__(cls, s=b''):
142 def __new__(cls, s=b''):
141 if isinstance(s, bytestr):
143 if isinstance(s, bytestr):
142 return s
144 return s
143 if (not isinstance(s, (bytes, bytearray))
145 if (not isinstance(s, (bytes, bytearray))
144 and not hasattr(s, u'__bytes__')): # hasattr-py3-only
146 and not hasattr(s, u'__bytes__')): # hasattr-py3-only
145 s = str(s).encode(u'ascii')
147 s = str(s).encode(u'ascii')
146 return bytes.__new__(cls, s)
148 return bytes.__new__(cls, s)
147
149
148 def __getitem__(self, key):
150 def __getitem__(self, key):
149 s = bytes.__getitem__(self, key)
151 s = bytes.__getitem__(self, key)
150 if not isinstance(s, bytes):
152 if not isinstance(s, bytes):
151 s = bytechr(s)
153 s = bytechr(s)
152 return s
154 return s
153
155
154 def __iter__(self):
156 def __iter__(self):
155 return iterbytestr(bytes.__iter__(self))
157 return iterbytestr(bytes.__iter__(self))
156
158
157 def __repr__(self):
159 def __repr__(self):
158 return bytes.__repr__(self)[1:] # drop b''
160 return bytes.__repr__(self)[1:] # drop b''
159
161
160 def iterbytestr(s):
162 def iterbytestr(s):
161 """Iterate bytes as if it were a str object of Python 2"""
163 """Iterate bytes as if it were a str object of Python 2"""
162 return map(bytechr, s)
164 return map(bytechr, s)
163
165
164 def maybebytestr(s):
166 def maybebytestr(s):
165 """Promote bytes to bytestr"""
167 """Promote bytes to bytestr"""
166 if isinstance(s, bytes):
168 if isinstance(s, bytes):
167 return bytestr(s)
169 return bytestr(s)
168 return s
170 return s
169
171
170 def sysbytes(s):
172 def sysbytes(s):
171 """Convert an internal str (e.g. keyword, __doc__) back to bytes
173 """Convert an internal str (e.g. keyword, __doc__) back to bytes
172
174
173 This never raises UnicodeEncodeError, but only ASCII characters
175 This never raises UnicodeEncodeError, but only ASCII characters
174 can be round-trip by sysstr(sysbytes(s)).
176 can be round-trip by sysstr(sysbytes(s)).
175 """
177 """
176 return s.encode(u'utf-8')
178 return s.encode(u'utf-8')
177
179
178 def sysstr(s):
180 def sysstr(s):
179 """Return a keyword str to be passed to Python functions such as
181 """Return a keyword str to be passed to Python functions such as
180 getattr() and str.encode()
182 getattr() and str.encode()
181
183
182 This never raises UnicodeDecodeError. Non-ascii characters are
184 This never raises UnicodeDecodeError. Non-ascii characters are
183 considered invalid and mapped to arbitrary but unique code points
185 considered invalid and mapped to arbitrary but unique code points
184 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
186 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
185 """
187 """
186 if isinstance(s, builtins.str):
188 if isinstance(s, builtins.str):
187 return s
189 return s
188 return s.decode(u'latin-1')
190 return s.decode(u'latin-1')
189
191
190 def strurl(url):
192 def strurl(url):
191 """Converts a bytes url back to str"""
193 """Converts a bytes url back to str"""
192 return url.decode(u'ascii')
194 return url.decode(u'ascii')
193
195
194 def bytesurl(url):
196 def bytesurl(url):
195 """Converts a str url to bytes by encoding in ascii"""
197 """Converts a str url to bytes by encoding in ascii"""
196 return url.encode(u'ascii')
198 return url.encode(u'ascii')
197
199
198 def raisewithtb(exc, tb):
200 def raisewithtb(exc, tb):
199 """Raise exception with the given traceback"""
201 """Raise exception with the given traceback"""
200 raise exc.with_traceback(tb)
202 raise exc.with_traceback(tb)
201
203
202 def getdoc(obj):
204 def getdoc(obj):
203 """Get docstring as bytes; may be None so gettext() won't confuse it
205 """Get docstring as bytes; may be None so gettext() won't confuse it
204 with _('')"""
206 with _('')"""
205 doc = getattr(obj, u'__doc__', None)
207 doc = getattr(obj, u'__doc__', None)
206 if doc is None:
208 if doc is None:
207 return doc
209 return doc
208 return sysbytes(doc)
210 return sysbytes(doc)
209
211
210 def _wrapattrfunc(f):
212 def _wrapattrfunc(f):
211 @functools.wraps(f)
213 @functools.wraps(f)
212 def w(object, name, *args):
214 def w(object, name, *args):
213 return f(object, sysstr(name), *args)
215 return f(object, sysstr(name), *args)
214 return w
216 return w
215
217
216 # these wrappers are automagically imported by hgloader
218 # these wrappers are automagically imported by hgloader
217 delattr = _wrapattrfunc(builtins.delattr)
219 delattr = _wrapattrfunc(builtins.delattr)
218 getattr = _wrapattrfunc(builtins.getattr)
220 getattr = _wrapattrfunc(builtins.getattr)
219 hasattr = _wrapattrfunc(builtins.hasattr)
221 hasattr = _wrapattrfunc(builtins.hasattr)
220 setattr = _wrapattrfunc(builtins.setattr)
222 setattr = _wrapattrfunc(builtins.setattr)
221 xrange = builtins.range
223 xrange = builtins.range
222 unicode = str
224 unicode = str
223
225
224 def open(name, mode='r', buffering=-1):
226 def open(name, mode='r', buffering=-1):
225 return builtins.open(name, sysstr(mode), buffering)
227 return builtins.open(name, sysstr(mode), buffering)
226
228
227 def _getoptbwrapper(orig, args, shortlist, namelist):
229 def _getoptbwrapper(orig, args, shortlist, namelist):
228 """
230 """
229 Takes bytes arguments, converts them to unicode, pass them to
231 Takes bytes arguments, converts them to unicode, pass them to
230 getopt.getopt(), convert the returned values back to bytes and then
232 getopt.getopt(), convert the returned values back to bytes and then
231 return them for Python 3 compatibility as getopt.getopt() don't accepts
233 return them for Python 3 compatibility as getopt.getopt() don't accepts
232 bytes on Python 3.
234 bytes on Python 3.
233 """
235 """
234 args = [a.decode('latin-1') for a in args]
236 args = [a.decode('latin-1') for a in args]
235 shortlist = shortlist.decode('latin-1')
237 shortlist = shortlist.decode('latin-1')
236 namelist = [a.decode('latin-1') for a in namelist]
238 namelist = [a.decode('latin-1') for a in namelist]
237 opts, args = orig(args, shortlist, namelist)
239 opts, args = orig(args, shortlist, namelist)
238 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
240 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
239 for a in opts]
241 for a in opts]
240 args = [a.encode('latin-1') for a in args]
242 args = [a.encode('latin-1') for a in args]
241 return opts, args
243 return opts, args
242
244
243 def strkwargs(dic):
245 def strkwargs(dic):
244 """
246 """
245 Converts the keys of a python dictonary to str i.e. unicodes so that
247 Converts the keys of a python dictonary to str i.e. unicodes so that
246 they can be passed as keyword arguments as dictonaries with bytes keys
248 they can be passed as keyword arguments as dictonaries with bytes keys
247 can't be passed as keyword arguments to functions on Python 3.
249 can't be passed as keyword arguments to functions on Python 3.
248 """
250 """
249 dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
251 dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
250 return dic
252 return dic
251
253
252 def byteskwargs(dic):
254 def byteskwargs(dic):
253 """
255 """
254 Converts keys of python dictonaries to bytes as they were converted to
256 Converts keys of python dictonaries to bytes as they were converted to
255 str to pass that dictonary as a keyword argument on Python 3.
257 str to pass that dictonary as a keyword argument on Python 3.
256 """
258 """
257 dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
259 dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
258 return dic
260 return dic
259
261
260 # TODO: handle shlex.shlex().
262 # TODO: handle shlex.shlex().
261 def shlexsplit(s):
263 def shlexsplit(s):
262 """
264 """
263 Takes bytes argument, convert it to str i.e. unicodes, pass that into
265 Takes bytes argument, convert it to str i.e. unicodes, pass that into
264 shlex.split(), convert the returned value to bytes and return that for
266 shlex.split(), convert the returned value to bytes and return that for
265 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
267 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
266 """
268 """
267 ret = shlex.split(s.decode('latin-1'))
269 ret = shlex.split(s.decode('latin-1'))
268 return [a.encode('latin-1') for a in ret]
270 return [a.encode('latin-1') for a in ret]
269
271
270 def emailparser(*args, **kwargs):
272 def emailparser(*args, **kwargs):
271 import email.parser
273 import email.parser
272 return email.parser.BytesParser(*args, **kwargs)
274 return email.parser.BytesParser(*args, **kwargs)
273
275
274 else:
276 else:
275 import cStringIO
277 import cStringIO
276
278
277 bytechr = chr
279 bytechr = chr
278 bytestr = str
280 bytestr = str
279 iterbytestr = iter
281 iterbytestr = iter
280 maybebytestr = identity
282 maybebytestr = identity
281 sysbytes = identity
283 sysbytes = identity
282 sysstr = identity
284 sysstr = identity
283 strurl = identity
285 strurl = identity
284 bytesurl = identity
286 bytesurl = identity
285
287
286 # this can't be parsed on Python 3
288 # this can't be parsed on Python 3
287 exec('def raisewithtb(exc, tb):\n'
289 exec('def raisewithtb(exc, tb):\n'
288 ' raise exc, None, tb\n')
290 ' raise exc, None, tb\n')
289
291
290 def fsencode(filename):
292 def fsencode(filename):
291 """
293 """
292 Partial backport from os.py in Python 3, which only accepts bytes.
294 Partial backport from os.py in Python 3, which only accepts bytes.
293 In Python 2, our paths should only ever be bytes, a unicode path
295 In Python 2, our paths should only ever be bytes, a unicode path
294 indicates a bug.
296 indicates a bug.
295 """
297 """
296 if isinstance(filename, str):
298 if isinstance(filename, str):
297 return filename
299 return filename
298 else:
300 else:
299 raise TypeError(
301 raise TypeError(
300 "expect str, not %s" % type(filename).__name__)
302 "expect str, not %s" % type(filename).__name__)
301
303
302 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
304 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
303 # better not to touch Python 2 part as it's already working fine.
305 # better not to touch Python 2 part as it's already working fine.
304 fsdecode = identity
306 fsdecode = identity
305
307
306 def getdoc(obj):
308 def getdoc(obj):
307 return getattr(obj, '__doc__', None)
309 return getattr(obj, '__doc__', None)
308
310
309 def _getoptbwrapper(orig, args, shortlist, namelist):
311 def _getoptbwrapper(orig, args, shortlist, namelist):
310 return orig(args, shortlist, namelist)
312 return orig(args, shortlist, namelist)
311
313
312 strkwargs = identity
314 strkwargs = identity
313 byteskwargs = identity
315 byteskwargs = identity
314
316
315 oslinesep = os.linesep
317 oslinesep = os.linesep
316 osname = os.name
318 osname = os.name
317 ospathsep = os.pathsep
319 ospathsep = os.pathsep
318 ossep = os.sep
320 ossep = os.sep
319 osaltsep = os.altsep
321 osaltsep = os.altsep
320 stdin = sys.stdin
322 stdin = sys.stdin
321 stdout = sys.stdout
323 stdout = sys.stdout
322 stderr = sys.stderr
324 stderr = sys.stderr
323 if getattr(sys, 'argv', None) is not None:
325 if getattr(sys, 'argv', None) is not None:
324 sysargv = sys.argv
326 sysargv = sys.argv
325 sysplatform = sys.platform
327 sysplatform = sys.platform
326 getcwd = os.getcwd
328 getcwd = os.getcwd
327 sysexecutable = sys.executable
329 sysexecutable = sys.executable
328 shlexsplit = shlex.split
330 shlexsplit = shlex.split
329 stringio = cStringIO.StringIO
331 stringio = cStringIO.StringIO
330 maplist = map
332 maplist = map
331 ziplist = zip
333 ziplist = zip
332 rawinput = raw_input
334 rawinput = raw_input
335 getargspec = inspect.getargspec
333
336
334 def emailparser(*args, **kwargs):
337 def emailparser(*args, **kwargs):
335 import email.parser
338 import email.parser
336 return email.parser.Parser(*args, **kwargs)
339 return email.parser.Parser(*args, **kwargs)
337
340
338 isjython = sysplatform.startswith('java')
341 isjython = sysplatform.startswith('java')
339
342
340 isdarwin = sysplatform == 'darwin'
343 isdarwin = sysplatform == 'darwin'
341 isposix = osname == 'posix'
344 isposix = osname == 'posix'
342 iswindows = osname == 'nt'
345 iswindows = osname == 'nt'
343
346
344 def getoptb(args, shortlist, namelist):
347 def getoptb(args, shortlist, namelist):
345 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
348 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
346
349
347 def gnugetoptb(args, shortlist, namelist):
350 def gnugetoptb(args, shortlist, namelist):
348 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
351 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
General Comments 0
You need to be logged in to leave comments. Login now