##// END OF EJS Templates
dirstate: use keyword arguments to clarify walk()'s callers...
Martin von Zweigbergk -
r34344:255c761a default
parent child Browse files
Show More
@@ -1,1491 +1,1492 b''
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 # "historical portability" policy of perf.py:
4 # "historical portability" policy of perf.py:
5 #
5 #
6 # We have to do:
6 # We have to do:
7 # - make perf.py "loadable" with as wide Mercurial version as possible
7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 # This doesn't mean that perf commands work correctly with that Mercurial.
8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 # - make historical perf command work correctly with as wide Mercurial
10 # - make historical perf command work correctly with as wide Mercurial
11 # version as possible
11 # version as possible
12 #
12 #
13 # We have to do, if possible with reasonable cost:
13 # We have to do, if possible with reasonable cost:
14 # - make recent perf command for historical feature work correctly
14 # - make recent perf command for historical feature work correctly
15 # with early Mercurial
15 # with early Mercurial
16 #
16 #
17 # We don't have to do:
17 # We don't have to do:
18 # - make perf command for recent feature work correctly with early
18 # - make perf command for recent feature work correctly with early
19 # Mercurial
19 # Mercurial
20
20
21 from __future__ import absolute_import
21 from __future__ import absolute_import
22 import functools
22 import functools
23 import gc
23 import gc
24 import os
24 import os
25 import random
25 import random
26 import struct
26 import struct
27 import sys
27 import sys
28 import time
28 import time
29 from mercurial import (
29 from mercurial import (
30 changegroup,
30 changegroup,
31 cmdutil,
31 cmdutil,
32 commands,
32 commands,
33 copies,
33 copies,
34 error,
34 error,
35 extensions,
35 extensions,
36 mdiff,
36 mdiff,
37 merge,
37 merge,
38 revlog,
38 revlog,
39 util,
39 util,
40 )
40 )
41
41
42 # for "historical portability":
42 # for "historical portability":
43 # try to import modules separately (in dict order), and ignore
43 # try to import modules separately (in dict order), and ignore
44 # failure, because these aren't available with early Mercurial
44 # failure, because these aren't available with early Mercurial
45 try:
45 try:
46 from mercurial import branchmap # since 2.5 (or bcee63733aad)
46 from mercurial import branchmap # since 2.5 (or bcee63733aad)
47 except ImportError:
47 except ImportError:
48 pass
48 pass
49 try:
49 try:
50 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
50 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
51 except ImportError:
51 except ImportError:
52 pass
52 pass
53 try:
53 try:
54 from mercurial import registrar # since 3.7 (or 37d50250b696)
54 from mercurial import registrar # since 3.7 (or 37d50250b696)
55 dir(registrar) # forcibly load it
55 dir(registrar) # forcibly load it
56 except ImportError:
56 except ImportError:
57 registrar = None
57 registrar = None
58 try:
58 try:
59 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
59 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
60 except ImportError:
60 except ImportError:
61 pass
61 pass
62 try:
62 try:
63 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
63 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
64 except ImportError:
64 except ImportError:
65 pass
65 pass
66
66
67 # for "historical portability":
67 # for "historical portability":
68 # define util.safehasattr forcibly, because util.safehasattr has been
68 # define util.safehasattr forcibly, because util.safehasattr has been
69 # available since 1.9.3 (or 94b200a11cf7)
69 # available since 1.9.3 (or 94b200a11cf7)
70 _undefined = object()
70 _undefined = object()
71 def safehasattr(thing, attr):
71 def safehasattr(thing, attr):
72 return getattr(thing, attr, _undefined) is not _undefined
72 return getattr(thing, attr, _undefined) is not _undefined
73 setattr(util, 'safehasattr', safehasattr)
73 setattr(util, 'safehasattr', safehasattr)
74
74
75 # for "historical portability":
75 # for "historical portability":
76 # define util.timer forcibly, because util.timer has been available
76 # define util.timer forcibly, because util.timer has been available
77 # since ae5d60bb70c9
77 # since ae5d60bb70c9
78 if safehasattr(time, 'perf_counter'):
78 if safehasattr(time, 'perf_counter'):
79 util.timer = time.perf_counter
79 util.timer = time.perf_counter
80 elif os.name == 'nt':
80 elif os.name == 'nt':
81 util.timer = time.clock
81 util.timer = time.clock
82 else:
82 else:
83 util.timer = time.time
83 util.timer = time.time
84
84
85 # for "historical portability":
85 # for "historical portability":
86 # use locally defined empty option list, if formatteropts isn't
86 # use locally defined empty option list, if formatteropts isn't
87 # available, because commands.formatteropts has been available since
87 # available, because commands.formatteropts has been available since
88 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
88 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
89 # available since 2.2 (or ae5f92e154d3)
89 # available since 2.2 (or ae5f92e154d3)
90 formatteropts = getattr(cmdutil, "formatteropts",
90 formatteropts = getattr(cmdutil, "formatteropts",
91 getattr(commands, "formatteropts", []))
91 getattr(commands, "formatteropts", []))
92
92
93 # for "historical portability":
93 # for "historical portability":
94 # use locally defined option list, if debugrevlogopts isn't available,
94 # use locally defined option list, if debugrevlogopts isn't available,
95 # because commands.debugrevlogopts has been available since 3.7 (or
95 # because commands.debugrevlogopts has been available since 3.7 (or
96 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
96 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
97 # since 1.9 (or a79fea6b3e77).
97 # since 1.9 (or a79fea6b3e77).
98 revlogopts = getattr(cmdutil, "debugrevlogopts",
98 revlogopts = getattr(cmdutil, "debugrevlogopts",
99 getattr(commands, "debugrevlogopts", [
99 getattr(commands, "debugrevlogopts", [
100 ('c', 'changelog', False, ('open changelog')),
100 ('c', 'changelog', False, ('open changelog')),
101 ('m', 'manifest', False, ('open manifest')),
101 ('m', 'manifest', False, ('open manifest')),
102 ('', 'dir', False, ('open directory manifest')),
102 ('', 'dir', False, ('open directory manifest')),
103 ]))
103 ]))
104
104
105 cmdtable = {}
105 cmdtable = {}
106
106
107 # for "historical portability":
107 # for "historical portability":
108 # define parsealiases locally, because cmdutil.parsealiases has been
108 # define parsealiases locally, because cmdutil.parsealiases has been
109 # available since 1.5 (or 6252852b4332)
109 # available since 1.5 (or 6252852b4332)
110 def parsealiases(cmd):
110 def parsealiases(cmd):
111 return cmd.lstrip("^").split("|")
111 return cmd.lstrip("^").split("|")
112
112
113 if safehasattr(registrar, 'command'):
113 if safehasattr(registrar, 'command'):
114 command = registrar.command(cmdtable)
114 command = registrar.command(cmdtable)
115 elif safehasattr(cmdutil, 'command'):
115 elif safehasattr(cmdutil, 'command'):
116 import inspect
116 import inspect
117 command = cmdutil.command(cmdtable)
117 command = cmdutil.command(cmdtable)
118 if 'norepo' not in inspect.getargspec(command)[0]:
118 if 'norepo' not in inspect.getargspec(command)[0]:
119 # for "historical portability":
119 # for "historical portability":
120 # wrap original cmdutil.command, because "norepo" option has
120 # wrap original cmdutil.command, because "norepo" option has
121 # been available since 3.1 (or 75a96326cecb)
121 # been available since 3.1 (or 75a96326cecb)
122 _command = command
122 _command = command
123 def command(name, options=(), synopsis=None, norepo=False):
123 def command(name, options=(), synopsis=None, norepo=False):
124 if norepo:
124 if norepo:
125 commands.norepo += ' %s' % ' '.join(parsealiases(name))
125 commands.norepo += ' %s' % ' '.join(parsealiases(name))
126 return _command(name, list(options), synopsis)
126 return _command(name, list(options), synopsis)
127 else:
127 else:
128 # for "historical portability":
128 # for "historical portability":
129 # define "@command" annotation locally, because cmdutil.command
129 # define "@command" annotation locally, because cmdutil.command
130 # has been available since 1.9 (or 2daa5179e73f)
130 # has been available since 1.9 (or 2daa5179e73f)
131 def command(name, options=(), synopsis=None, norepo=False):
131 def command(name, options=(), synopsis=None, norepo=False):
132 def decorator(func):
132 def decorator(func):
133 if synopsis:
133 if synopsis:
134 cmdtable[name] = func, list(options), synopsis
134 cmdtable[name] = func, list(options), synopsis
135 else:
135 else:
136 cmdtable[name] = func, list(options)
136 cmdtable[name] = func, list(options)
137 if norepo:
137 if norepo:
138 commands.norepo += ' %s' % ' '.join(parsealiases(name))
138 commands.norepo += ' %s' % ' '.join(parsealiases(name))
139 return func
139 return func
140 return decorator
140 return decorator
141
141
142 def getlen(ui):
142 def getlen(ui):
143 if ui.configbool("perf", "stub"):
143 if ui.configbool("perf", "stub"):
144 return lambda x: 1
144 return lambda x: 1
145 return len
145 return len
146
146
147 def gettimer(ui, opts=None):
147 def gettimer(ui, opts=None):
148 """return a timer function and formatter: (timer, formatter)
148 """return a timer function and formatter: (timer, formatter)
149
149
150 This function exists to gather the creation of formatter in a single
150 This function exists to gather the creation of formatter in a single
151 place instead of duplicating it in all performance commands."""
151 place instead of duplicating it in all performance commands."""
152
152
153 # enforce an idle period before execution to counteract power management
153 # enforce an idle period before execution to counteract power management
154 # experimental config: perf.presleep
154 # experimental config: perf.presleep
155 time.sleep(getint(ui, "perf", "presleep", 1))
155 time.sleep(getint(ui, "perf", "presleep", 1))
156
156
157 if opts is None:
157 if opts is None:
158 opts = {}
158 opts = {}
159 # redirect all to stderr unless buffer api is in use
159 # redirect all to stderr unless buffer api is in use
160 if not ui._buffers:
160 if not ui._buffers:
161 ui = ui.copy()
161 ui = ui.copy()
162 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
162 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
163 if uifout:
163 if uifout:
164 # for "historical portability":
164 # for "historical portability":
165 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
165 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
166 uifout.set(ui.ferr)
166 uifout.set(ui.ferr)
167
167
168 # get a formatter
168 # get a formatter
169 uiformatter = getattr(ui, 'formatter', None)
169 uiformatter = getattr(ui, 'formatter', None)
170 if uiformatter:
170 if uiformatter:
171 fm = uiformatter('perf', opts)
171 fm = uiformatter('perf', opts)
172 else:
172 else:
173 # for "historical portability":
173 # for "historical portability":
174 # define formatter locally, because ui.formatter has been
174 # define formatter locally, because ui.formatter has been
175 # available since 2.2 (or ae5f92e154d3)
175 # available since 2.2 (or ae5f92e154d3)
176 from mercurial import node
176 from mercurial import node
177 class defaultformatter(object):
177 class defaultformatter(object):
178 """Minimized composition of baseformatter and plainformatter
178 """Minimized composition of baseformatter and plainformatter
179 """
179 """
180 def __init__(self, ui, topic, opts):
180 def __init__(self, ui, topic, opts):
181 self._ui = ui
181 self._ui = ui
182 if ui.debugflag:
182 if ui.debugflag:
183 self.hexfunc = node.hex
183 self.hexfunc = node.hex
184 else:
184 else:
185 self.hexfunc = node.short
185 self.hexfunc = node.short
186 def __nonzero__(self):
186 def __nonzero__(self):
187 return False
187 return False
188 __bool__ = __nonzero__
188 __bool__ = __nonzero__
189 def startitem(self):
189 def startitem(self):
190 pass
190 pass
191 def data(self, **data):
191 def data(self, **data):
192 pass
192 pass
193 def write(self, fields, deftext, *fielddata, **opts):
193 def write(self, fields, deftext, *fielddata, **opts):
194 self._ui.write(deftext % fielddata, **opts)
194 self._ui.write(deftext % fielddata, **opts)
195 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
195 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
196 if cond:
196 if cond:
197 self._ui.write(deftext % fielddata, **opts)
197 self._ui.write(deftext % fielddata, **opts)
198 def plain(self, text, **opts):
198 def plain(self, text, **opts):
199 self._ui.write(text, **opts)
199 self._ui.write(text, **opts)
200 def end(self):
200 def end(self):
201 pass
201 pass
202 fm = defaultformatter(ui, 'perf', opts)
202 fm = defaultformatter(ui, 'perf', opts)
203
203
204 # stub function, runs code only once instead of in a loop
204 # stub function, runs code only once instead of in a loop
205 # experimental config: perf.stub
205 # experimental config: perf.stub
206 if ui.configbool("perf", "stub"):
206 if ui.configbool("perf", "stub"):
207 return functools.partial(stub_timer, fm), fm
207 return functools.partial(stub_timer, fm), fm
208 return functools.partial(_timer, fm), fm
208 return functools.partial(_timer, fm), fm
209
209
210 def stub_timer(fm, func, title=None):
210 def stub_timer(fm, func, title=None):
211 func()
211 func()
212
212
213 def _timer(fm, func, title=None):
213 def _timer(fm, func, title=None):
214 gc.collect()
214 gc.collect()
215 results = []
215 results = []
216 begin = util.timer()
216 begin = util.timer()
217 count = 0
217 count = 0
218 while True:
218 while True:
219 ostart = os.times()
219 ostart = os.times()
220 cstart = util.timer()
220 cstart = util.timer()
221 r = func()
221 r = func()
222 cstop = util.timer()
222 cstop = util.timer()
223 ostop = os.times()
223 ostop = os.times()
224 count += 1
224 count += 1
225 a, b = ostart, ostop
225 a, b = ostart, ostop
226 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
226 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
227 if cstop - begin > 3 and count >= 100:
227 if cstop - begin > 3 and count >= 100:
228 break
228 break
229 if cstop - begin > 10 and count >= 3:
229 if cstop - begin > 10 and count >= 3:
230 break
230 break
231
231
232 fm.startitem()
232 fm.startitem()
233
233
234 if title:
234 if title:
235 fm.write('title', '! %s\n', title)
235 fm.write('title', '! %s\n', title)
236 if r:
236 if r:
237 fm.write('result', '! result: %s\n', r)
237 fm.write('result', '! result: %s\n', r)
238 m = min(results)
238 m = min(results)
239 fm.plain('!')
239 fm.plain('!')
240 fm.write('wall', ' wall %f', m[0])
240 fm.write('wall', ' wall %f', m[0])
241 fm.write('comb', ' comb %f', m[1] + m[2])
241 fm.write('comb', ' comb %f', m[1] + m[2])
242 fm.write('user', ' user %f', m[1])
242 fm.write('user', ' user %f', m[1])
243 fm.write('sys', ' sys %f', m[2])
243 fm.write('sys', ' sys %f', m[2])
244 fm.write('count', ' (best of %d)', count)
244 fm.write('count', ' (best of %d)', count)
245 fm.plain('\n')
245 fm.plain('\n')
246
246
247 # utilities for historical portability
247 # utilities for historical portability
248
248
249 def getint(ui, section, name, default):
249 def getint(ui, section, name, default):
250 # for "historical portability":
250 # for "historical portability":
251 # ui.configint has been available since 1.9 (or fa2b596db182)
251 # ui.configint has been available since 1.9 (or fa2b596db182)
252 v = ui.config(section, name, None)
252 v = ui.config(section, name, None)
253 if v is None:
253 if v is None:
254 return default
254 return default
255 try:
255 try:
256 return int(v)
256 return int(v)
257 except ValueError:
257 except ValueError:
258 raise error.ConfigError(("%s.%s is not an integer ('%s')")
258 raise error.ConfigError(("%s.%s is not an integer ('%s')")
259 % (section, name, v))
259 % (section, name, v))
260
260
261 def safeattrsetter(obj, name, ignoremissing=False):
261 def safeattrsetter(obj, name, ignoremissing=False):
262 """Ensure that 'obj' has 'name' attribute before subsequent setattr
262 """Ensure that 'obj' has 'name' attribute before subsequent setattr
263
263
264 This function is aborted, if 'obj' doesn't have 'name' attribute
264 This function is aborted, if 'obj' doesn't have 'name' attribute
265 at runtime. This avoids overlooking removal of an attribute, which
265 at runtime. This avoids overlooking removal of an attribute, which
266 breaks assumption of performance measurement, in the future.
266 breaks assumption of performance measurement, in the future.
267
267
268 This function returns the object to (1) assign a new value, and
268 This function returns the object to (1) assign a new value, and
269 (2) restore an original value to the attribute.
269 (2) restore an original value to the attribute.
270
270
271 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
271 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
272 abortion, and this function returns None. This is useful to
272 abortion, and this function returns None. This is useful to
273 examine an attribute, which isn't ensured in all Mercurial
273 examine an attribute, which isn't ensured in all Mercurial
274 versions.
274 versions.
275 """
275 """
276 if not util.safehasattr(obj, name):
276 if not util.safehasattr(obj, name):
277 if ignoremissing:
277 if ignoremissing:
278 return None
278 return None
279 raise error.Abort(("missing attribute %s of %s might break assumption"
279 raise error.Abort(("missing attribute %s of %s might break assumption"
280 " of performance measurement") % (name, obj))
280 " of performance measurement") % (name, obj))
281
281
282 origvalue = getattr(obj, name)
282 origvalue = getattr(obj, name)
283 class attrutil(object):
283 class attrutil(object):
284 def set(self, newvalue):
284 def set(self, newvalue):
285 setattr(obj, name, newvalue)
285 setattr(obj, name, newvalue)
286 def restore(self):
286 def restore(self):
287 setattr(obj, name, origvalue)
287 setattr(obj, name, origvalue)
288
288
289 return attrutil()
289 return attrutil()
290
290
291 # utilities to examine each internal API changes
291 # utilities to examine each internal API changes
292
292
293 def getbranchmapsubsettable():
293 def getbranchmapsubsettable():
294 # for "historical portability":
294 # for "historical portability":
295 # subsettable is defined in:
295 # subsettable is defined in:
296 # - branchmap since 2.9 (or 175c6fd8cacc)
296 # - branchmap since 2.9 (or 175c6fd8cacc)
297 # - repoview since 2.5 (or 59a9f18d4587)
297 # - repoview since 2.5 (or 59a9f18d4587)
298 for mod in (branchmap, repoview):
298 for mod in (branchmap, repoview):
299 subsettable = getattr(mod, 'subsettable', None)
299 subsettable = getattr(mod, 'subsettable', None)
300 if subsettable:
300 if subsettable:
301 return subsettable
301 return subsettable
302
302
303 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
303 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
304 # branchmap and repoview modules exist, but subsettable attribute
304 # branchmap and repoview modules exist, but subsettable attribute
305 # doesn't)
305 # doesn't)
306 raise error.Abort(("perfbranchmap not available with this Mercurial"),
306 raise error.Abort(("perfbranchmap not available with this Mercurial"),
307 hint="use 2.5 or later")
307 hint="use 2.5 or later")
308
308
309 def getsvfs(repo):
309 def getsvfs(repo):
310 """Return appropriate object to access files under .hg/store
310 """Return appropriate object to access files under .hg/store
311 """
311 """
312 # for "historical portability":
312 # for "historical portability":
313 # repo.svfs has been available since 2.3 (or 7034365089bf)
313 # repo.svfs has been available since 2.3 (or 7034365089bf)
314 svfs = getattr(repo, 'svfs', None)
314 svfs = getattr(repo, 'svfs', None)
315 if svfs:
315 if svfs:
316 return svfs
316 return svfs
317 else:
317 else:
318 return getattr(repo, 'sopener')
318 return getattr(repo, 'sopener')
319
319
320 def getvfs(repo):
320 def getvfs(repo):
321 """Return appropriate object to access files under .hg
321 """Return appropriate object to access files under .hg
322 """
322 """
323 # for "historical portability":
323 # for "historical portability":
324 # repo.vfs has been available since 2.3 (or 7034365089bf)
324 # repo.vfs has been available since 2.3 (or 7034365089bf)
325 vfs = getattr(repo, 'vfs', None)
325 vfs = getattr(repo, 'vfs', None)
326 if vfs:
326 if vfs:
327 return vfs
327 return vfs
328 else:
328 else:
329 return getattr(repo, 'opener')
329 return getattr(repo, 'opener')
330
330
331 def repocleartagscachefunc(repo):
331 def repocleartagscachefunc(repo):
332 """Return the function to clear tags cache according to repo internal API
332 """Return the function to clear tags cache according to repo internal API
333 """
333 """
334 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
334 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
335 # in this case, setattr(repo, '_tagscache', None) or so isn't
335 # in this case, setattr(repo, '_tagscache', None) or so isn't
336 # correct way to clear tags cache, because existing code paths
336 # correct way to clear tags cache, because existing code paths
337 # expect _tagscache to be a structured object.
337 # expect _tagscache to be a structured object.
338 def clearcache():
338 def clearcache():
339 # _tagscache has been filteredpropertycache since 2.5 (or
339 # _tagscache has been filteredpropertycache since 2.5 (or
340 # 98c867ac1330), and delattr() can't work in such case
340 # 98c867ac1330), and delattr() can't work in such case
341 if '_tagscache' in vars(repo):
341 if '_tagscache' in vars(repo):
342 del repo.__dict__['_tagscache']
342 del repo.__dict__['_tagscache']
343 return clearcache
343 return clearcache
344
344
345 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
345 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
346 if repotags: # since 1.4 (or 5614a628d173)
346 if repotags: # since 1.4 (or 5614a628d173)
347 return lambda : repotags.set(None)
347 return lambda : repotags.set(None)
348
348
349 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
349 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
350 if repotagscache: # since 0.6 (or d7df759d0e97)
350 if repotagscache: # since 0.6 (or d7df759d0e97)
351 return lambda : repotagscache.set(None)
351 return lambda : repotagscache.set(None)
352
352
353 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
353 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
354 # this point, but it isn't so problematic, because:
354 # this point, but it isn't so problematic, because:
355 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
355 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
356 # in perftags() causes failure soon
356 # in perftags() causes failure soon
357 # - perf.py itself has been available since 1.1 (or eb240755386d)
357 # - perf.py itself has been available since 1.1 (or eb240755386d)
358 raise error.Abort(("tags API of this hg command is unknown"))
358 raise error.Abort(("tags API of this hg command is unknown"))
359
359
360 # utilities to clear cache
360 # utilities to clear cache
361
361
362 def clearfilecache(repo, attrname):
362 def clearfilecache(repo, attrname):
363 unfi = repo.unfiltered()
363 unfi = repo.unfiltered()
364 if attrname in vars(unfi):
364 if attrname in vars(unfi):
365 delattr(unfi, attrname)
365 delattr(unfi, attrname)
366 unfi._filecache.pop(attrname, None)
366 unfi._filecache.pop(attrname, None)
367
367
368 # perf commands
368 # perf commands
369
369
370 @command('perfwalk', formatteropts)
370 @command('perfwalk', formatteropts)
371 def perfwalk(ui, repo, *pats, **opts):
371 def perfwalk(ui, repo, *pats, **opts):
372 timer, fm = gettimer(ui, opts)
372 timer, fm = gettimer(ui, opts)
373 m = scmutil.match(repo[None], pats, {})
373 m = scmutil.match(repo[None], pats, {})
374 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
374 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
375 ignored=False))))
375 fm.end()
376 fm.end()
376
377
377 @command('perfannotate', formatteropts)
378 @command('perfannotate', formatteropts)
378 def perfannotate(ui, repo, f, **opts):
379 def perfannotate(ui, repo, f, **opts):
379 timer, fm = gettimer(ui, opts)
380 timer, fm = gettimer(ui, opts)
380 fc = repo['.'][f]
381 fc = repo['.'][f]
381 timer(lambda: len(fc.annotate(True)))
382 timer(lambda: len(fc.annotate(True)))
382 fm.end()
383 fm.end()
383
384
384 @command('perfstatus',
385 @command('perfstatus',
385 [('u', 'unknown', False,
386 [('u', 'unknown', False,
386 'ask status to look for unknown files')] + formatteropts)
387 'ask status to look for unknown files')] + formatteropts)
387 def perfstatus(ui, repo, **opts):
388 def perfstatus(ui, repo, **opts):
388 #m = match.always(repo.root, repo.getcwd())
389 #m = match.always(repo.root, repo.getcwd())
389 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
390 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
390 # False))))
391 # False))))
391 timer, fm = gettimer(ui, opts)
392 timer, fm = gettimer(ui, opts)
392 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
393 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
393 fm.end()
394 fm.end()
394
395
395 @command('perfaddremove', formatteropts)
396 @command('perfaddremove', formatteropts)
396 def perfaddremove(ui, repo, **opts):
397 def perfaddremove(ui, repo, **opts):
397 timer, fm = gettimer(ui, opts)
398 timer, fm = gettimer(ui, opts)
398 try:
399 try:
399 oldquiet = repo.ui.quiet
400 oldquiet = repo.ui.quiet
400 repo.ui.quiet = True
401 repo.ui.quiet = True
401 matcher = scmutil.match(repo[None])
402 matcher = scmutil.match(repo[None])
402 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
403 timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True))
403 finally:
404 finally:
404 repo.ui.quiet = oldquiet
405 repo.ui.quiet = oldquiet
405 fm.end()
406 fm.end()
406
407
407 def clearcaches(cl):
408 def clearcaches(cl):
408 # behave somewhat consistently across internal API changes
409 # behave somewhat consistently across internal API changes
409 if util.safehasattr(cl, 'clearcaches'):
410 if util.safehasattr(cl, 'clearcaches'):
410 cl.clearcaches()
411 cl.clearcaches()
411 elif util.safehasattr(cl, '_nodecache'):
412 elif util.safehasattr(cl, '_nodecache'):
412 from mercurial.node import nullid, nullrev
413 from mercurial.node import nullid, nullrev
413 cl._nodecache = {nullid: nullrev}
414 cl._nodecache = {nullid: nullrev}
414 cl._nodepos = None
415 cl._nodepos = None
415
416
416 @command('perfheads', formatteropts)
417 @command('perfheads', formatteropts)
417 def perfheads(ui, repo, **opts):
418 def perfheads(ui, repo, **opts):
418 timer, fm = gettimer(ui, opts)
419 timer, fm = gettimer(ui, opts)
419 cl = repo.changelog
420 cl = repo.changelog
420 def d():
421 def d():
421 len(cl.headrevs())
422 len(cl.headrevs())
422 clearcaches(cl)
423 clearcaches(cl)
423 timer(d)
424 timer(d)
424 fm.end()
425 fm.end()
425
426
426 @command('perftags', formatteropts)
427 @command('perftags', formatteropts)
427 def perftags(ui, repo, **opts):
428 def perftags(ui, repo, **opts):
428 import mercurial.changelog
429 import mercurial.changelog
429 import mercurial.manifest
430 import mercurial.manifest
430 timer, fm = gettimer(ui, opts)
431 timer, fm = gettimer(ui, opts)
431 svfs = getsvfs(repo)
432 svfs = getsvfs(repo)
432 repocleartagscache = repocleartagscachefunc(repo)
433 repocleartagscache = repocleartagscachefunc(repo)
433 def t():
434 def t():
434 repo.changelog = mercurial.changelog.changelog(svfs)
435 repo.changelog = mercurial.changelog.changelog(svfs)
435 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
436 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
436 repocleartagscache()
437 repocleartagscache()
437 return len(repo.tags())
438 return len(repo.tags())
438 timer(t)
439 timer(t)
439 fm.end()
440 fm.end()
440
441
441 @command('perfancestors', formatteropts)
442 @command('perfancestors', formatteropts)
442 def perfancestors(ui, repo, **opts):
443 def perfancestors(ui, repo, **opts):
443 timer, fm = gettimer(ui, opts)
444 timer, fm = gettimer(ui, opts)
444 heads = repo.changelog.headrevs()
445 heads = repo.changelog.headrevs()
445 def d():
446 def d():
446 for a in repo.changelog.ancestors(heads):
447 for a in repo.changelog.ancestors(heads):
447 pass
448 pass
448 timer(d)
449 timer(d)
449 fm.end()
450 fm.end()
450
451
451 @command('perfancestorset', formatteropts)
452 @command('perfancestorset', formatteropts)
452 def perfancestorset(ui, repo, revset, **opts):
453 def perfancestorset(ui, repo, revset, **opts):
453 timer, fm = gettimer(ui, opts)
454 timer, fm = gettimer(ui, opts)
454 revs = repo.revs(revset)
455 revs = repo.revs(revset)
455 heads = repo.changelog.headrevs()
456 heads = repo.changelog.headrevs()
456 def d():
457 def d():
457 s = repo.changelog.ancestors(heads)
458 s = repo.changelog.ancestors(heads)
458 for rev in revs:
459 for rev in revs:
459 rev in s
460 rev in s
460 timer(d)
461 timer(d)
461 fm.end()
462 fm.end()
462
463
463 @command('perfbookmarks', formatteropts)
464 @command('perfbookmarks', formatteropts)
464 def perfbookmarks(ui, repo, **opts):
465 def perfbookmarks(ui, repo, **opts):
465 """benchmark parsing bookmarks from disk to memory"""
466 """benchmark parsing bookmarks from disk to memory"""
466 timer, fm = gettimer(ui, opts)
467 timer, fm = gettimer(ui, opts)
467 def d():
468 def d():
468 clearfilecache(repo, '_bookmarks')
469 clearfilecache(repo, '_bookmarks')
469 repo._bookmarks
470 repo._bookmarks
470 timer(d)
471 timer(d)
471 fm.end()
472 fm.end()
472
473
473 @command('perfchangegroupchangelog', formatteropts +
474 @command('perfchangegroupchangelog', formatteropts +
474 [('', 'version', '02', 'changegroup version'),
475 [('', 'version', '02', 'changegroup version'),
475 ('r', 'rev', '', 'revisions to add to changegroup')])
476 ('r', 'rev', '', 'revisions to add to changegroup')])
476 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
477 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
477 """Benchmark producing a changelog group for a changegroup.
478 """Benchmark producing a changelog group for a changegroup.
478
479
479 This measures the time spent processing the changelog during a
480 This measures the time spent processing the changelog during a
480 bundle operation. This occurs during `hg bundle` and on a server
481 bundle operation. This occurs during `hg bundle` and on a server
481 processing a `getbundle` wire protocol request (handles clones
482 processing a `getbundle` wire protocol request (handles clones
482 and pull requests).
483 and pull requests).
483
484
484 By default, all revisions are added to the changegroup.
485 By default, all revisions are added to the changegroup.
485 """
486 """
486 cl = repo.changelog
487 cl = repo.changelog
487 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
488 revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
488 bundler = changegroup.getbundler(version, repo)
489 bundler = changegroup.getbundler(version, repo)
489
490
490 def lookup(node):
491 def lookup(node):
491 # The real bundler reads the revision in order to access the
492 # The real bundler reads the revision in order to access the
492 # manifest node and files list. Do that here.
493 # manifest node and files list. Do that here.
493 cl.read(node)
494 cl.read(node)
494 return node
495 return node
495
496
496 def d():
497 def d():
497 for chunk in bundler.group(revs, cl, lookup):
498 for chunk in bundler.group(revs, cl, lookup):
498 pass
499 pass
499
500
500 timer, fm = gettimer(ui, opts)
501 timer, fm = gettimer(ui, opts)
501 timer(d)
502 timer(d)
502 fm.end()
503 fm.end()
503
504
504 @command('perfdirs', formatteropts)
505 @command('perfdirs', formatteropts)
505 def perfdirs(ui, repo, **opts):
506 def perfdirs(ui, repo, **opts):
506 timer, fm = gettimer(ui, opts)
507 timer, fm = gettimer(ui, opts)
507 dirstate = repo.dirstate
508 dirstate = repo.dirstate
508 'a' in dirstate
509 'a' in dirstate
509 def d():
510 def d():
510 dirstate.dirs()
511 dirstate.dirs()
511 del dirstate._dirs
512 del dirstate._dirs
512 timer(d)
513 timer(d)
513 fm.end()
514 fm.end()
514
515
515 @command('perfdirstate', formatteropts)
516 @command('perfdirstate', formatteropts)
516 def perfdirstate(ui, repo, **opts):
517 def perfdirstate(ui, repo, **opts):
517 timer, fm = gettimer(ui, opts)
518 timer, fm = gettimer(ui, opts)
518 "a" in repo.dirstate
519 "a" in repo.dirstate
519 def d():
520 def d():
520 repo.dirstate.invalidate()
521 repo.dirstate.invalidate()
521 "a" in repo.dirstate
522 "a" in repo.dirstate
522 timer(d)
523 timer(d)
523 fm.end()
524 fm.end()
524
525
525 @command('perfdirstatedirs', formatteropts)
526 @command('perfdirstatedirs', formatteropts)
526 def perfdirstatedirs(ui, repo, **opts):
527 def perfdirstatedirs(ui, repo, **opts):
527 timer, fm = gettimer(ui, opts)
528 timer, fm = gettimer(ui, opts)
528 "a" in repo.dirstate
529 "a" in repo.dirstate
529 def d():
530 def d():
530 "a" in repo.dirstate._dirs
531 "a" in repo.dirstate._dirs
531 del repo.dirstate._dirs
532 del repo.dirstate._dirs
532 timer(d)
533 timer(d)
533 fm.end()
534 fm.end()
534
535
535 @command('perfdirstatefoldmap', formatteropts)
536 @command('perfdirstatefoldmap', formatteropts)
536 def perfdirstatefoldmap(ui, repo, **opts):
537 def perfdirstatefoldmap(ui, repo, **opts):
537 timer, fm = gettimer(ui, opts)
538 timer, fm = gettimer(ui, opts)
538 dirstate = repo.dirstate
539 dirstate = repo.dirstate
539 'a' in dirstate
540 'a' in dirstate
540 def d():
541 def d():
541 dirstate._filefoldmap.get('a')
542 dirstate._filefoldmap.get('a')
542 del dirstate._filefoldmap
543 del dirstate._filefoldmap
543 timer(d)
544 timer(d)
544 fm.end()
545 fm.end()
545
546
546 @command('perfdirfoldmap', formatteropts)
547 @command('perfdirfoldmap', formatteropts)
547 def perfdirfoldmap(ui, repo, **opts):
548 def perfdirfoldmap(ui, repo, **opts):
548 timer, fm = gettimer(ui, opts)
549 timer, fm = gettimer(ui, opts)
549 dirstate = repo.dirstate
550 dirstate = repo.dirstate
550 'a' in dirstate
551 'a' in dirstate
551 def d():
552 def d():
552 dirstate._dirfoldmap.get('a')
553 dirstate._dirfoldmap.get('a')
553 del dirstate._dirfoldmap
554 del dirstate._dirfoldmap
554 del dirstate._dirs
555 del dirstate._dirs
555 timer(d)
556 timer(d)
556 fm.end()
557 fm.end()
557
558
558 @command('perfdirstatewrite', formatteropts)
559 @command('perfdirstatewrite', formatteropts)
559 def perfdirstatewrite(ui, repo, **opts):
560 def perfdirstatewrite(ui, repo, **opts):
560 timer, fm = gettimer(ui, opts)
561 timer, fm = gettimer(ui, opts)
561 ds = repo.dirstate
562 ds = repo.dirstate
562 "a" in ds
563 "a" in ds
563 def d():
564 def d():
564 ds._dirty = True
565 ds._dirty = True
565 ds.write(repo.currenttransaction())
566 ds.write(repo.currenttransaction())
566 timer(d)
567 timer(d)
567 fm.end()
568 fm.end()
568
569
569 @command('perfmergecalculate',
570 @command('perfmergecalculate',
570 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
571 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
571 def perfmergecalculate(ui, repo, rev, **opts):
572 def perfmergecalculate(ui, repo, rev, **opts):
572 timer, fm = gettimer(ui, opts)
573 timer, fm = gettimer(ui, opts)
573 wctx = repo[None]
574 wctx = repo[None]
574 rctx = scmutil.revsingle(repo, rev, rev)
575 rctx = scmutil.revsingle(repo, rev, rev)
575 ancestor = wctx.ancestor(rctx)
576 ancestor = wctx.ancestor(rctx)
576 # we don't want working dir files to be stat'd in the benchmark, so prime
577 # we don't want working dir files to be stat'd in the benchmark, so prime
577 # that cache
578 # that cache
578 wctx.dirty()
579 wctx.dirty()
579 def d():
580 def d():
580 # acceptremote is True because we don't want prompts in the middle of
581 # acceptremote is True because we don't want prompts in the middle of
581 # our benchmark
582 # our benchmark
582 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
583 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
583 acceptremote=True, followcopies=True)
584 acceptremote=True, followcopies=True)
584 timer(d)
585 timer(d)
585 fm.end()
586 fm.end()
586
587
587 @command('perfpathcopies', [], "REV REV")
588 @command('perfpathcopies', [], "REV REV")
588 def perfpathcopies(ui, repo, rev1, rev2, **opts):
589 def perfpathcopies(ui, repo, rev1, rev2, **opts):
589 timer, fm = gettimer(ui, opts)
590 timer, fm = gettimer(ui, opts)
590 ctx1 = scmutil.revsingle(repo, rev1, rev1)
591 ctx1 = scmutil.revsingle(repo, rev1, rev1)
591 ctx2 = scmutil.revsingle(repo, rev2, rev2)
592 ctx2 = scmutil.revsingle(repo, rev2, rev2)
592 def d():
593 def d():
593 copies.pathcopies(ctx1, ctx2)
594 copies.pathcopies(ctx1, ctx2)
594 timer(d)
595 timer(d)
595 fm.end()
596 fm.end()
596
597
597 @command('perfphases',
598 @command('perfphases',
598 [('', 'full', False, 'include file reading time too'),
599 [('', 'full', False, 'include file reading time too'),
599 ], "")
600 ], "")
600 def perfphases(ui, repo, **opts):
601 def perfphases(ui, repo, **opts):
601 """benchmark phasesets computation"""
602 """benchmark phasesets computation"""
602 timer, fm = gettimer(ui, opts)
603 timer, fm = gettimer(ui, opts)
603 _phases = repo._phasecache
604 _phases = repo._phasecache
604 full = opts.get('full')
605 full = opts.get('full')
605 def d():
606 def d():
606 phases = _phases
607 phases = _phases
607 if full:
608 if full:
608 clearfilecache(repo, '_phasecache')
609 clearfilecache(repo, '_phasecache')
609 phases = repo._phasecache
610 phases = repo._phasecache
610 phases.invalidate()
611 phases.invalidate()
611 phases.loadphaserevs(repo)
612 phases.loadphaserevs(repo)
612 timer(d)
613 timer(d)
613 fm.end()
614 fm.end()
614
615
615 @command('perfmanifest', [], 'REV')
616 @command('perfmanifest', [], 'REV')
616 def perfmanifest(ui, repo, rev, **opts):
617 def perfmanifest(ui, repo, rev, **opts):
617 timer, fm = gettimer(ui, opts)
618 timer, fm = gettimer(ui, opts)
618 ctx = scmutil.revsingle(repo, rev, rev)
619 ctx = scmutil.revsingle(repo, rev, rev)
619 t = ctx.manifestnode()
620 t = ctx.manifestnode()
620 def d():
621 def d():
621 repo.manifestlog.clearcaches()
622 repo.manifestlog.clearcaches()
622 repo.manifestlog[t].read()
623 repo.manifestlog[t].read()
623 timer(d)
624 timer(d)
624 fm.end()
625 fm.end()
625
626
626 @command('perfchangeset', formatteropts)
627 @command('perfchangeset', formatteropts)
627 def perfchangeset(ui, repo, rev, **opts):
628 def perfchangeset(ui, repo, rev, **opts):
628 timer, fm = gettimer(ui, opts)
629 timer, fm = gettimer(ui, opts)
629 n = repo[rev].node()
630 n = repo[rev].node()
630 def d():
631 def d():
631 repo.changelog.read(n)
632 repo.changelog.read(n)
632 #repo.changelog._cache = None
633 #repo.changelog._cache = None
633 timer(d)
634 timer(d)
634 fm.end()
635 fm.end()
635
636
636 @command('perfindex', formatteropts)
637 @command('perfindex', formatteropts)
637 def perfindex(ui, repo, **opts):
638 def perfindex(ui, repo, **opts):
638 import mercurial.revlog
639 import mercurial.revlog
639 timer, fm = gettimer(ui, opts)
640 timer, fm = gettimer(ui, opts)
640 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
641 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
641 n = repo["tip"].node()
642 n = repo["tip"].node()
642 svfs = getsvfs(repo)
643 svfs = getsvfs(repo)
643 def d():
644 def d():
644 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
645 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
645 cl.rev(n)
646 cl.rev(n)
646 timer(d)
647 timer(d)
647 fm.end()
648 fm.end()
648
649
649 @command('perfstartup', formatteropts)
650 @command('perfstartup', formatteropts)
650 def perfstartup(ui, repo, **opts):
651 def perfstartup(ui, repo, **opts):
651 timer, fm = gettimer(ui, opts)
652 timer, fm = gettimer(ui, opts)
652 cmd = sys.argv[0]
653 cmd = sys.argv[0]
653 def d():
654 def d():
654 if os.name != 'nt':
655 if os.name != 'nt':
655 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
656 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
656 else:
657 else:
657 os.environ['HGRCPATH'] = ' '
658 os.environ['HGRCPATH'] = ' '
658 os.system("%s version -q > NUL" % cmd)
659 os.system("%s version -q > NUL" % cmd)
659 timer(d)
660 timer(d)
660 fm.end()
661 fm.end()
661
662
662 @command('perfparents', formatteropts)
663 @command('perfparents', formatteropts)
663 def perfparents(ui, repo, **opts):
664 def perfparents(ui, repo, **opts):
664 timer, fm = gettimer(ui, opts)
665 timer, fm = gettimer(ui, opts)
665 # control the number of commits perfparents iterates over
666 # control the number of commits perfparents iterates over
666 # experimental config: perf.parentscount
667 # experimental config: perf.parentscount
667 count = getint(ui, "perf", "parentscount", 1000)
668 count = getint(ui, "perf", "parentscount", 1000)
668 if len(repo.changelog) < count:
669 if len(repo.changelog) < count:
669 raise error.Abort("repo needs %d commits for this test" % count)
670 raise error.Abort("repo needs %d commits for this test" % count)
670 repo = repo.unfiltered()
671 repo = repo.unfiltered()
671 nl = [repo.changelog.node(i) for i in xrange(count)]
672 nl = [repo.changelog.node(i) for i in xrange(count)]
672 def d():
673 def d():
673 for n in nl:
674 for n in nl:
674 repo.changelog.parents(n)
675 repo.changelog.parents(n)
675 timer(d)
676 timer(d)
676 fm.end()
677 fm.end()
677
678
678 @command('perfctxfiles', formatteropts)
679 @command('perfctxfiles', formatteropts)
679 def perfctxfiles(ui, repo, x, **opts):
680 def perfctxfiles(ui, repo, x, **opts):
680 x = int(x)
681 x = int(x)
681 timer, fm = gettimer(ui, opts)
682 timer, fm = gettimer(ui, opts)
682 def d():
683 def d():
683 len(repo[x].files())
684 len(repo[x].files())
684 timer(d)
685 timer(d)
685 fm.end()
686 fm.end()
686
687
687 @command('perfrawfiles', formatteropts)
688 @command('perfrawfiles', formatteropts)
688 def perfrawfiles(ui, repo, x, **opts):
689 def perfrawfiles(ui, repo, x, **opts):
689 x = int(x)
690 x = int(x)
690 timer, fm = gettimer(ui, opts)
691 timer, fm = gettimer(ui, opts)
691 cl = repo.changelog
692 cl = repo.changelog
692 def d():
693 def d():
693 len(cl.read(x)[3])
694 len(cl.read(x)[3])
694 timer(d)
695 timer(d)
695 fm.end()
696 fm.end()
696
697
697 @command('perflookup', formatteropts)
698 @command('perflookup', formatteropts)
698 def perflookup(ui, repo, rev, **opts):
699 def perflookup(ui, repo, rev, **opts):
699 timer, fm = gettimer(ui, opts)
700 timer, fm = gettimer(ui, opts)
700 timer(lambda: len(repo.lookup(rev)))
701 timer(lambda: len(repo.lookup(rev)))
701 fm.end()
702 fm.end()
702
703
703 @command('perfrevrange', formatteropts)
704 @command('perfrevrange', formatteropts)
704 def perfrevrange(ui, repo, *specs, **opts):
705 def perfrevrange(ui, repo, *specs, **opts):
705 timer, fm = gettimer(ui, opts)
706 timer, fm = gettimer(ui, opts)
706 revrange = scmutil.revrange
707 revrange = scmutil.revrange
707 timer(lambda: len(revrange(repo, specs)))
708 timer(lambda: len(revrange(repo, specs)))
708 fm.end()
709 fm.end()
709
710
710 @command('perfnodelookup', formatteropts)
711 @command('perfnodelookup', formatteropts)
711 def perfnodelookup(ui, repo, rev, **opts):
712 def perfnodelookup(ui, repo, rev, **opts):
712 timer, fm = gettimer(ui, opts)
713 timer, fm = gettimer(ui, opts)
713 import mercurial.revlog
714 import mercurial.revlog
714 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
715 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
715 n = repo[rev].node()
716 n = repo[rev].node()
716 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
717 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
717 def d():
718 def d():
718 cl.rev(n)
719 cl.rev(n)
719 clearcaches(cl)
720 clearcaches(cl)
720 timer(d)
721 timer(d)
721 fm.end()
722 fm.end()
722
723
723 @command('perflog',
724 @command('perflog',
724 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
725 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
725 def perflog(ui, repo, rev=None, **opts):
726 def perflog(ui, repo, rev=None, **opts):
726 if rev is None:
727 if rev is None:
727 rev=[]
728 rev=[]
728 timer, fm = gettimer(ui, opts)
729 timer, fm = gettimer(ui, opts)
729 ui.pushbuffer()
730 ui.pushbuffer()
730 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
731 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
731 copies=opts.get('rename')))
732 copies=opts.get('rename')))
732 ui.popbuffer()
733 ui.popbuffer()
733 fm.end()
734 fm.end()
734
735
735 @command('perfmoonwalk', formatteropts)
736 @command('perfmoonwalk', formatteropts)
736 def perfmoonwalk(ui, repo, **opts):
737 def perfmoonwalk(ui, repo, **opts):
737 """benchmark walking the changelog backwards
738 """benchmark walking the changelog backwards
738
739
739 This also loads the changelog data for each revision in the changelog.
740 This also loads the changelog data for each revision in the changelog.
740 """
741 """
741 timer, fm = gettimer(ui, opts)
742 timer, fm = gettimer(ui, opts)
742 def moonwalk():
743 def moonwalk():
743 for i in xrange(len(repo), -1, -1):
744 for i in xrange(len(repo), -1, -1):
744 ctx = repo[i]
745 ctx = repo[i]
745 ctx.branch() # read changelog data (in addition to the index)
746 ctx.branch() # read changelog data (in addition to the index)
746 timer(moonwalk)
747 timer(moonwalk)
747 fm.end()
748 fm.end()
748
749
749 @command('perftemplating', formatteropts)
750 @command('perftemplating', formatteropts)
750 def perftemplating(ui, repo, rev=None, **opts):
751 def perftemplating(ui, repo, rev=None, **opts):
751 if rev is None:
752 if rev is None:
752 rev=[]
753 rev=[]
753 timer, fm = gettimer(ui, opts)
754 timer, fm = gettimer(ui, opts)
754 ui.pushbuffer()
755 ui.pushbuffer()
755 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
756 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
756 template='{date|shortdate} [{rev}:{node|short}]'
757 template='{date|shortdate} [{rev}:{node|short}]'
757 ' {author|person}: {desc|firstline}\n'))
758 ' {author|person}: {desc|firstline}\n'))
758 ui.popbuffer()
759 ui.popbuffer()
759 fm.end()
760 fm.end()
760
761
761 @command('perfcca', formatteropts)
762 @command('perfcca', formatteropts)
762 def perfcca(ui, repo, **opts):
763 def perfcca(ui, repo, **opts):
763 timer, fm = gettimer(ui, opts)
764 timer, fm = gettimer(ui, opts)
764 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
765 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
765 fm.end()
766 fm.end()
766
767
767 @command('perffncacheload', formatteropts)
768 @command('perffncacheload', formatteropts)
768 def perffncacheload(ui, repo, **opts):
769 def perffncacheload(ui, repo, **opts):
769 timer, fm = gettimer(ui, opts)
770 timer, fm = gettimer(ui, opts)
770 s = repo.store
771 s = repo.store
771 def d():
772 def d():
772 s.fncache._load()
773 s.fncache._load()
773 timer(d)
774 timer(d)
774 fm.end()
775 fm.end()
775
776
776 @command('perffncachewrite', formatteropts)
777 @command('perffncachewrite', formatteropts)
777 def perffncachewrite(ui, repo, **opts):
778 def perffncachewrite(ui, repo, **opts):
778 timer, fm = gettimer(ui, opts)
779 timer, fm = gettimer(ui, opts)
779 s = repo.store
780 s = repo.store
780 s.fncache._load()
781 s.fncache._load()
781 lock = repo.lock()
782 lock = repo.lock()
782 tr = repo.transaction('perffncachewrite')
783 tr = repo.transaction('perffncachewrite')
783 def d():
784 def d():
784 s.fncache._dirty = True
785 s.fncache._dirty = True
785 s.fncache.write(tr)
786 s.fncache.write(tr)
786 timer(d)
787 timer(d)
787 tr.close()
788 tr.close()
788 lock.release()
789 lock.release()
789 fm.end()
790 fm.end()
790
791
791 @command('perffncacheencode', formatteropts)
792 @command('perffncacheencode', formatteropts)
792 def perffncacheencode(ui, repo, **opts):
793 def perffncacheencode(ui, repo, **opts):
793 timer, fm = gettimer(ui, opts)
794 timer, fm = gettimer(ui, opts)
794 s = repo.store
795 s = repo.store
795 s.fncache._load()
796 s.fncache._load()
796 def d():
797 def d():
797 for p in s.fncache.entries:
798 for p in s.fncache.entries:
798 s.encode(p)
799 s.encode(p)
799 timer(d)
800 timer(d)
800 fm.end()
801 fm.end()
801
802
802 @command('perfbdiff', revlogopts + formatteropts + [
803 @command('perfbdiff', revlogopts + formatteropts + [
803 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
804 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
804 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
805 ('', 'alldata', False, 'test bdiffs for all associated revisions')],
805 '-c|-m|FILE REV')
806 '-c|-m|FILE REV')
806 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
807 def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
807 """benchmark a bdiff between revisions
808 """benchmark a bdiff between revisions
808
809
809 By default, benchmark a bdiff between its delta parent and itself.
810 By default, benchmark a bdiff between its delta parent and itself.
810
811
811 With ``--count``, benchmark bdiffs between delta parents and self for N
812 With ``--count``, benchmark bdiffs between delta parents and self for N
812 revisions starting at the specified revision.
813 revisions starting at the specified revision.
813
814
814 With ``--alldata``, assume the requested revision is a changeset and
815 With ``--alldata``, assume the requested revision is a changeset and
815 measure bdiffs for all changes related to that changeset (manifest
816 measure bdiffs for all changes related to that changeset (manifest
816 and filelogs).
817 and filelogs).
817 """
818 """
818 if opts['alldata']:
819 if opts['alldata']:
819 opts['changelog'] = True
820 opts['changelog'] = True
820
821
821 if opts.get('changelog') or opts.get('manifest'):
822 if opts.get('changelog') or opts.get('manifest'):
822 file_, rev = None, file_
823 file_, rev = None, file_
823 elif rev is None:
824 elif rev is None:
824 raise error.CommandError('perfbdiff', 'invalid arguments')
825 raise error.CommandError('perfbdiff', 'invalid arguments')
825
826
826 textpairs = []
827 textpairs = []
827
828
828 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
829 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
829
830
830 startrev = r.rev(r.lookup(rev))
831 startrev = r.rev(r.lookup(rev))
831 for rev in range(startrev, min(startrev + count, len(r) - 1)):
832 for rev in range(startrev, min(startrev + count, len(r) - 1)):
832 if opts['alldata']:
833 if opts['alldata']:
833 # Load revisions associated with changeset.
834 # Load revisions associated with changeset.
834 ctx = repo[rev]
835 ctx = repo[rev]
835 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
836 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
836 for pctx in ctx.parents():
837 for pctx in ctx.parents():
837 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
838 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
838 textpairs.append((pman, mtext))
839 textpairs.append((pman, mtext))
839
840
840 # Load filelog revisions by iterating manifest delta.
841 # Load filelog revisions by iterating manifest delta.
841 man = ctx.manifest()
842 man = ctx.manifest()
842 pman = ctx.p1().manifest()
843 pman = ctx.p1().manifest()
843 for filename, change in pman.diff(man).items():
844 for filename, change in pman.diff(man).items():
844 fctx = repo.file(filename)
845 fctx = repo.file(filename)
845 f1 = fctx.revision(change[0][0] or -1)
846 f1 = fctx.revision(change[0][0] or -1)
846 f2 = fctx.revision(change[1][0] or -1)
847 f2 = fctx.revision(change[1][0] or -1)
847 textpairs.append((f1, f2))
848 textpairs.append((f1, f2))
848 else:
849 else:
849 dp = r.deltaparent(rev)
850 dp = r.deltaparent(rev)
850 textpairs.append((r.revision(dp), r.revision(rev)))
851 textpairs.append((r.revision(dp), r.revision(rev)))
851
852
852 def d():
853 def d():
853 for pair in textpairs:
854 for pair in textpairs:
854 mdiff.textdiff(*pair)
855 mdiff.textdiff(*pair)
855
856
856 timer, fm = gettimer(ui, opts)
857 timer, fm = gettimer(ui, opts)
857 timer(d)
858 timer(d)
858 fm.end()
859 fm.end()
859
860
860 @command('perfdiffwd', formatteropts)
861 @command('perfdiffwd', formatteropts)
861 def perfdiffwd(ui, repo, **opts):
862 def perfdiffwd(ui, repo, **opts):
862 """Profile diff of working directory changes"""
863 """Profile diff of working directory changes"""
863 timer, fm = gettimer(ui, opts)
864 timer, fm = gettimer(ui, opts)
864 options = {
865 options = {
865 'w': 'ignore_all_space',
866 'w': 'ignore_all_space',
866 'b': 'ignore_space_change',
867 'b': 'ignore_space_change',
867 'B': 'ignore_blank_lines',
868 'B': 'ignore_blank_lines',
868 }
869 }
869
870
870 for diffopt in ('', 'w', 'b', 'B', 'wB'):
871 for diffopt in ('', 'w', 'b', 'B', 'wB'):
871 opts = dict((options[c], '1') for c in diffopt)
872 opts = dict((options[c], '1') for c in diffopt)
872 def d():
873 def d():
873 ui.pushbuffer()
874 ui.pushbuffer()
874 commands.diff(ui, repo, **opts)
875 commands.diff(ui, repo, **opts)
875 ui.popbuffer()
876 ui.popbuffer()
876 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
877 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
877 timer(d, title)
878 timer(d, title)
878 fm.end()
879 fm.end()
879
880
880 @command('perfrevlogindex', revlogopts + formatteropts,
881 @command('perfrevlogindex', revlogopts + formatteropts,
881 '-c|-m|FILE')
882 '-c|-m|FILE')
882 def perfrevlogindex(ui, repo, file_=None, **opts):
883 def perfrevlogindex(ui, repo, file_=None, **opts):
883 """Benchmark operations against a revlog index.
884 """Benchmark operations against a revlog index.
884
885
885 This tests constructing a revlog instance, reading index data,
886 This tests constructing a revlog instance, reading index data,
886 parsing index data, and performing various operations related to
887 parsing index data, and performing various operations related to
887 index data.
888 index data.
888 """
889 """
889
890
890 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
891 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
891
892
892 opener = getattr(rl, 'opener') # trick linter
893 opener = getattr(rl, 'opener') # trick linter
893 indexfile = rl.indexfile
894 indexfile = rl.indexfile
894 data = opener.read(indexfile)
895 data = opener.read(indexfile)
895
896
896 header = struct.unpack('>I', data[0:4])[0]
897 header = struct.unpack('>I', data[0:4])[0]
897 version = header & 0xFFFF
898 version = header & 0xFFFF
898 if version == 1:
899 if version == 1:
899 revlogio = revlog.revlogio()
900 revlogio = revlog.revlogio()
900 inline = header & (1 << 16)
901 inline = header & (1 << 16)
901 else:
902 else:
902 raise error.Abort(('unsupported revlog version: %d') % version)
903 raise error.Abort(('unsupported revlog version: %d') % version)
903
904
904 rllen = len(rl)
905 rllen = len(rl)
905
906
906 node0 = rl.node(0)
907 node0 = rl.node(0)
907 node25 = rl.node(rllen // 4)
908 node25 = rl.node(rllen // 4)
908 node50 = rl.node(rllen // 2)
909 node50 = rl.node(rllen // 2)
909 node75 = rl.node(rllen // 4 * 3)
910 node75 = rl.node(rllen // 4 * 3)
910 node100 = rl.node(rllen - 1)
911 node100 = rl.node(rllen - 1)
911
912
912 allrevs = range(rllen)
913 allrevs = range(rllen)
913 allrevsrev = list(reversed(allrevs))
914 allrevsrev = list(reversed(allrevs))
914 allnodes = [rl.node(rev) for rev in range(rllen)]
915 allnodes = [rl.node(rev) for rev in range(rllen)]
915 allnodesrev = list(reversed(allnodes))
916 allnodesrev = list(reversed(allnodes))
916
917
917 def constructor():
918 def constructor():
918 revlog.revlog(opener, indexfile)
919 revlog.revlog(opener, indexfile)
919
920
920 def read():
921 def read():
921 with opener(indexfile) as fh:
922 with opener(indexfile) as fh:
922 fh.read()
923 fh.read()
923
924
924 def parseindex():
925 def parseindex():
925 revlogio.parseindex(data, inline)
926 revlogio.parseindex(data, inline)
926
927
927 def getentry(revornode):
928 def getentry(revornode):
928 index = revlogio.parseindex(data, inline)[0]
929 index = revlogio.parseindex(data, inline)[0]
929 index[revornode]
930 index[revornode]
930
931
931 def getentries(revs, count=1):
932 def getentries(revs, count=1):
932 index = revlogio.parseindex(data, inline)[0]
933 index = revlogio.parseindex(data, inline)[0]
933
934
934 for i in range(count):
935 for i in range(count):
935 for rev in revs:
936 for rev in revs:
936 index[rev]
937 index[rev]
937
938
938 def resolvenode(node):
939 def resolvenode(node):
939 nodemap = revlogio.parseindex(data, inline)[1]
940 nodemap = revlogio.parseindex(data, inline)[1]
940 # This only works for the C code.
941 # This only works for the C code.
941 if nodemap is None:
942 if nodemap is None:
942 return
943 return
943
944
944 try:
945 try:
945 nodemap[node]
946 nodemap[node]
946 except error.RevlogError:
947 except error.RevlogError:
947 pass
948 pass
948
949
949 def resolvenodes(nodes, count=1):
950 def resolvenodes(nodes, count=1):
950 nodemap = revlogio.parseindex(data, inline)[1]
951 nodemap = revlogio.parseindex(data, inline)[1]
951 if nodemap is None:
952 if nodemap is None:
952 return
953 return
953
954
954 for i in range(count):
955 for i in range(count):
955 for node in nodes:
956 for node in nodes:
956 try:
957 try:
957 nodemap[node]
958 nodemap[node]
958 except error.RevlogError:
959 except error.RevlogError:
959 pass
960 pass
960
961
961 benches = [
962 benches = [
962 (constructor, 'revlog constructor'),
963 (constructor, 'revlog constructor'),
963 (read, 'read'),
964 (read, 'read'),
964 (parseindex, 'create index object'),
965 (parseindex, 'create index object'),
965 (lambda: getentry(0), 'retrieve index entry for rev 0'),
966 (lambda: getentry(0), 'retrieve index entry for rev 0'),
966 (lambda: resolvenode('a' * 20), 'look up missing node'),
967 (lambda: resolvenode('a' * 20), 'look up missing node'),
967 (lambda: resolvenode(node0), 'look up node at rev 0'),
968 (lambda: resolvenode(node0), 'look up node at rev 0'),
968 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
969 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
969 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
970 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
970 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
971 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
971 (lambda: resolvenode(node100), 'look up node at tip'),
972 (lambda: resolvenode(node100), 'look up node at tip'),
972 # 2x variation is to measure caching impact.
973 # 2x variation is to measure caching impact.
973 (lambda: resolvenodes(allnodes),
974 (lambda: resolvenodes(allnodes),
974 'look up all nodes (forward)'),
975 'look up all nodes (forward)'),
975 (lambda: resolvenodes(allnodes, 2),
976 (lambda: resolvenodes(allnodes, 2),
976 'look up all nodes 2x (forward)'),
977 'look up all nodes 2x (forward)'),
977 (lambda: resolvenodes(allnodesrev),
978 (lambda: resolvenodes(allnodesrev),
978 'look up all nodes (reverse)'),
979 'look up all nodes (reverse)'),
979 (lambda: resolvenodes(allnodesrev, 2),
980 (lambda: resolvenodes(allnodesrev, 2),
980 'look up all nodes 2x (reverse)'),
981 'look up all nodes 2x (reverse)'),
981 (lambda: getentries(allrevs),
982 (lambda: getentries(allrevs),
982 'retrieve all index entries (forward)'),
983 'retrieve all index entries (forward)'),
983 (lambda: getentries(allrevs, 2),
984 (lambda: getentries(allrevs, 2),
984 'retrieve all index entries 2x (forward)'),
985 'retrieve all index entries 2x (forward)'),
985 (lambda: getentries(allrevsrev),
986 (lambda: getentries(allrevsrev),
986 'retrieve all index entries (reverse)'),
987 'retrieve all index entries (reverse)'),
987 (lambda: getentries(allrevsrev, 2),
988 (lambda: getentries(allrevsrev, 2),
988 'retrieve all index entries 2x (reverse)'),
989 'retrieve all index entries 2x (reverse)'),
989 ]
990 ]
990
991
991 for fn, title in benches:
992 for fn, title in benches:
992 timer, fm = gettimer(ui, opts)
993 timer, fm = gettimer(ui, opts)
993 timer(fn, title=title)
994 timer(fn, title=title)
994 fm.end()
995 fm.end()
995
996
996 @command('perfrevlogrevisions', revlogopts + formatteropts +
997 @command('perfrevlogrevisions', revlogopts + formatteropts +
997 [('d', 'dist', 100, 'distance between the revisions'),
998 [('d', 'dist', 100, 'distance between the revisions'),
998 ('s', 'startrev', 0, 'revision to start reading at'),
999 ('s', 'startrev', 0, 'revision to start reading at'),
999 ('', 'reverse', False, 'read in reverse')],
1000 ('', 'reverse', False, 'read in reverse')],
1000 '-c|-m|FILE')
1001 '-c|-m|FILE')
1001 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1002 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1002 **opts):
1003 **opts):
1003 """Benchmark reading a series of revisions from a revlog.
1004 """Benchmark reading a series of revisions from a revlog.
1004
1005
1005 By default, we read every ``-d/--dist`` revision from 0 to tip of
1006 By default, we read every ``-d/--dist`` revision from 0 to tip of
1006 the specified revlog.
1007 the specified revlog.
1007
1008
1008 The start revision can be defined via ``-s/--startrev``.
1009 The start revision can be defined via ``-s/--startrev``.
1009 """
1010 """
1010 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1011 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1011 rllen = getlen(ui)(rl)
1012 rllen = getlen(ui)(rl)
1012
1013
1013 def d():
1014 def d():
1014 rl.clearcaches()
1015 rl.clearcaches()
1015
1016
1016 beginrev = startrev
1017 beginrev = startrev
1017 endrev = rllen
1018 endrev = rllen
1018 dist = opts['dist']
1019 dist = opts['dist']
1019
1020
1020 if reverse:
1021 if reverse:
1021 beginrev, endrev = endrev, beginrev
1022 beginrev, endrev = endrev, beginrev
1022 dist = -1 * dist
1023 dist = -1 * dist
1023
1024
1024 for x in xrange(beginrev, endrev, dist):
1025 for x in xrange(beginrev, endrev, dist):
1025 # Old revisions don't support passing int.
1026 # Old revisions don't support passing int.
1026 n = rl.node(x)
1027 n = rl.node(x)
1027 rl.revision(n)
1028 rl.revision(n)
1028
1029
1029 timer, fm = gettimer(ui, opts)
1030 timer, fm = gettimer(ui, opts)
1030 timer(d)
1031 timer(d)
1031 fm.end()
1032 fm.end()
1032
1033
1033 @command('perfrevlogchunks', revlogopts + formatteropts +
1034 @command('perfrevlogchunks', revlogopts + formatteropts +
1034 [('e', 'engines', '', 'compression engines to use'),
1035 [('e', 'engines', '', 'compression engines to use'),
1035 ('s', 'startrev', 0, 'revision to start at')],
1036 ('s', 'startrev', 0, 'revision to start at')],
1036 '-c|-m|FILE')
1037 '-c|-m|FILE')
1037 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1038 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1038 """Benchmark operations on revlog chunks.
1039 """Benchmark operations on revlog chunks.
1039
1040
1040 Logically, each revlog is a collection of fulltext revisions. However,
1041 Logically, each revlog is a collection of fulltext revisions. However,
1041 stored within each revlog are "chunks" of possibly compressed data. This
1042 stored within each revlog are "chunks" of possibly compressed data. This
1042 data needs to be read and decompressed or compressed and written.
1043 data needs to be read and decompressed or compressed and written.
1043
1044
1044 This command measures the time it takes to read+decompress and recompress
1045 This command measures the time it takes to read+decompress and recompress
1045 chunks in a revlog. It effectively isolates I/O and compression performance.
1046 chunks in a revlog. It effectively isolates I/O and compression performance.
1046 For measurements of higher-level operations like resolving revisions,
1047 For measurements of higher-level operations like resolving revisions,
1047 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1048 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1048 """
1049 """
1049 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1050 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1050
1051
1051 # _chunkraw was renamed to _getsegmentforrevs.
1052 # _chunkraw was renamed to _getsegmentforrevs.
1052 try:
1053 try:
1053 segmentforrevs = rl._getsegmentforrevs
1054 segmentforrevs = rl._getsegmentforrevs
1054 except AttributeError:
1055 except AttributeError:
1055 segmentforrevs = rl._chunkraw
1056 segmentforrevs = rl._chunkraw
1056
1057
1057 # Verify engines argument.
1058 # Verify engines argument.
1058 if engines:
1059 if engines:
1059 engines = set(e.strip() for e in engines.split(','))
1060 engines = set(e.strip() for e in engines.split(','))
1060 for engine in engines:
1061 for engine in engines:
1061 try:
1062 try:
1062 util.compressionengines[engine]
1063 util.compressionengines[engine]
1063 except KeyError:
1064 except KeyError:
1064 raise error.Abort('unknown compression engine: %s' % engine)
1065 raise error.Abort('unknown compression engine: %s' % engine)
1065 else:
1066 else:
1066 engines = []
1067 engines = []
1067 for e in util.compengines:
1068 for e in util.compengines:
1068 engine = util.compengines[e]
1069 engine = util.compengines[e]
1069 try:
1070 try:
1070 if engine.available():
1071 if engine.available():
1071 engine.revlogcompressor().compress('dummy')
1072 engine.revlogcompressor().compress('dummy')
1072 engines.append(e)
1073 engines.append(e)
1073 except NotImplementedError:
1074 except NotImplementedError:
1074 pass
1075 pass
1075
1076
1076 revs = list(rl.revs(startrev, len(rl) - 1))
1077 revs = list(rl.revs(startrev, len(rl) - 1))
1077
1078
1078 def rlfh(rl):
1079 def rlfh(rl):
1079 if rl._inline:
1080 if rl._inline:
1080 return getsvfs(repo)(rl.indexfile)
1081 return getsvfs(repo)(rl.indexfile)
1081 else:
1082 else:
1082 return getsvfs(repo)(rl.datafile)
1083 return getsvfs(repo)(rl.datafile)
1083
1084
1084 def doread():
1085 def doread():
1085 rl.clearcaches()
1086 rl.clearcaches()
1086 for rev in revs:
1087 for rev in revs:
1087 segmentforrevs(rev, rev)
1088 segmentforrevs(rev, rev)
1088
1089
1089 def doreadcachedfh():
1090 def doreadcachedfh():
1090 rl.clearcaches()
1091 rl.clearcaches()
1091 fh = rlfh(rl)
1092 fh = rlfh(rl)
1092 for rev in revs:
1093 for rev in revs:
1093 segmentforrevs(rev, rev, df=fh)
1094 segmentforrevs(rev, rev, df=fh)
1094
1095
1095 def doreadbatch():
1096 def doreadbatch():
1096 rl.clearcaches()
1097 rl.clearcaches()
1097 segmentforrevs(revs[0], revs[-1])
1098 segmentforrevs(revs[0], revs[-1])
1098
1099
1099 def doreadbatchcachedfh():
1100 def doreadbatchcachedfh():
1100 rl.clearcaches()
1101 rl.clearcaches()
1101 fh = rlfh(rl)
1102 fh = rlfh(rl)
1102 segmentforrevs(revs[0], revs[-1], df=fh)
1103 segmentforrevs(revs[0], revs[-1], df=fh)
1103
1104
1104 def dochunk():
1105 def dochunk():
1105 rl.clearcaches()
1106 rl.clearcaches()
1106 fh = rlfh(rl)
1107 fh = rlfh(rl)
1107 for rev in revs:
1108 for rev in revs:
1108 rl._chunk(rev, df=fh)
1109 rl._chunk(rev, df=fh)
1109
1110
1110 chunks = [None]
1111 chunks = [None]
1111
1112
1112 def dochunkbatch():
1113 def dochunkbatch():
1113 rl.clearcaches()
1114 rl.clearcaches()
1114 fh = rlfh(rl)
1115 fh = rlfh(rl)
1115 # Save chunks as a side-effect.
1116 # Save chunks as a side-effect.
1116 chunks[0] = rl._chunks(revs, df=fh)
1117 chunks[0] = rl._chunks(revs, df=fh)
1117
1118
1118 def docompress(compressor):
1119 def docompress(compressor):
1119 rl.clearcaches()
1120 rl.clearcaches()
1120
1121
1121 try:
1122 try:
1122 # Swap in the requested compression engine.
1123 # Swap in the requested compression engine.
1123 oldcompressor = rl._compressor
1124 oldcompressor = rl._compressor
1124 rl._compressor = compressor
1125 rl._compressor = compressor
1125 for chunk in chunks[0]:
1126 for chunk in chunks[0]:
1126 rl.compress(chunk)
1127 rl.compress(chunk)
1127 finally:
1128 finally:
1128 rl._compressor = oldcompressor
1129 rl._compressor = oldcompressor
1129
1130
1130 benches = [
1131 benches = [
1131 (lambda: doread(), 'read'),
1132 (lambda: doread(), 'read'),
1132 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1133 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1133 (lambda: doreadbatch(), 'read batch'),
1134 (lambda: doreadbatch(), 'read batch'),
1134 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1135 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1135 (lambda: dochunk(), 'chunk'),
1136 (lambda: dochunk(), 'chunk'),
1136 (lambda: dochunkbatch(), 'chunk batch'),
1137 (lambda: dochunkbatch(), 'chunk batch'),
1137 ]
1138 ]
1138
1139
1139 for engine in sorted(engines):
1140 for engine in sorted(engines):
1140 compressor = util.compengines[engine].revlogcompressor()
1141 compressor = util.compengines[engine].revlogcompressor()
1141 benches.append((functools.partial(docompress, compressor),
1142 benches.append((functools.partial(docompress, compressor),
1142 'compress w/ %s' % engine))
1143 'compress w/ %s' % engine))
1143
1144
1144 for fn, title in benches:
1145 for fn, title in benches:
1145 timer, fm = gettimer(ui, opts)
1146 timer, fm = gettimer(ui, opts)
1146 timer(fn, title=title)
1147 timer(fn, title=title)
1147 fm.end()
1148 fm.end()
1148
1149
1149 @command('perfrevlogrevision', revlogopts + formatteropts +
1150 @command('perfrevlogrevision', revlogopts + formatteropts +
1150 [('', 'cache', False, 'use caches instead of clearing')],
1151 [('', 'cache', False, 'use caches instead of clearing')],
1151 '-c|-m|FILE REV')
1152 '-c|-m|FILE REV')
1152 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1153 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1153 """Benchmark obtaining a revlog revision.
1154 """Benchmark obtaining a revlog revision.
1154
1155
1155 Obtaining a revlog revision consists of roughly the following steps:
1156 Obtaining a revlog revision consists of roughly the following steps:
1156
1157
1157 1. Compute the delta chain
1158 1. Compute the delta chain
1158 2. Obtain the raw chunks for that delta chain
1159 2. Obtain the raw chunks for that delta chain
1159 3. Decompress each raw chunk
1160 3. Decompress each raw chunk
1160 4. Apply binary patches to obtain fulltext
1161 4. Apply binary patches to obtain fulltext
1161 5. Verify hash of fulltext
1162 5. Verify hash of fulltext
1162
1163
1163 This command measures the time spent in each of these phases.
1164 This command measures the time spent in each of these phases.
1164 """
1165 """
1165 if opts.get('changelog') or opts.get('manifest'):
1166 if opts.get('changelog') or opts.get('manifest'):
1166 file_, rev = None, file_
1167 file_, rev = None, file_
1167 elif rev is None:
1168 elif rev is None:
1168 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1169 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1169
1170
1170 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1171 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1171
1172
1172 # _chunkraw was renamed to _getsegmentforrevs.
1173 # _chunkraw was renamed to _getsegmentforrevs.
1173 try:
1174 try:
1174 segmentforrevs = r._getsegmentforrevs
1175 segmentforrevs = r._getsegmentforrevs
1175 except AttributeError:
1176 except AttributeError:
1176 segmentforrevs = r._chunkraw
1177 segmentforrevs = r._chunkraw
1177
1178
1178 node = r.lookup(rev)
1179 node = r.lookup(rev)
1179 rev = r.rev(node)
1180 rev = r.rev(node)
1180
1181
1181 def getrawchunks(data, chain):
1182 def getrawchunks(data, chain):
1182 start = r.start
1183 start = r.start
1183 length = r.length
1184 length = r.length
1184 inline = r._inline
1185 inline = r._inline
1185 iosize = r._io.size
1186 iosize = r._io.size
1186 buffer = util.buffer
1187 buffer = util.buffer
1187 offset = start(chain[0])
1188 offset = start(chain[0])
1188
1189
1189 chunks = []
1190 chunks = []
1190 ladd = chunks.append
1191 ladd = chunks.append
1191
1192
1192 for rev in chain:
1193 for rev in chain:
1193 chunkstart = start(rev)
1194 chunkstart = start(rev)
1194 if inline:
1195 if inline:
1195 chunkstart += (rev + 1) * iosize
1196 chunkstart += (rev + 1) * iosize
1196 chunklength = length(rev)
1197 chunklength = length(rev)
1197 ladd(buffer(data, chunkstart - offset, chunklength))
1198 ladd(buffer(data, chunkstart - offset, chunklength))
1198
1199
1199 return chunks
1200 return chunks
1200
1201
1201 def dodeltachain(rev):
1202 def dodeltachain(rev):
1202 if not cache:
1203 if not cache:
1203 r.clearcaches()
1204 r.clearcaches()
1204 r._deltachain(rev)
1205 r._deltachain(rev)
1205
1206
1206 def doread(chain):
1207 def doread(chain):
1207 if not cache:
1208 if not cache:
1208 r.clearcaches()
1209 r.clearcaches()
1209 segmentforrevs(chain[0], chain[-1])
1210 segmentforrevs(chain[0], chain[-1])
1210
1211
1211 def dorawchunks(data, chain):
1212 def dorawchunks(data, chain):
1212 if not cache:
1213 if not cache:
1213 r.clearcaches()
1214 r.clearcaches()
1214 getrawchunks(data, chain)
1215 getrawchunks(data, chain)
1215
1216
1216 def dodecompress(chunks):
1217 def dodecompress(chunks):
1217 decomp = r.decompress
1218 decomp = r.decompress
1218 for chunk in chunks:
1219 for chunk in chunks:
1219 decomp(chunk)
1220 decomp(chunk)
1220
1221
1221 def dopatch(text, bins):
1222 def dopatch(text, bins):
1222 if not cache:
1223 if not cache:
1223 r.clearcaches()
1224 r.clearcaches()
1224 mdiff.patches(text, bins)
1225 mdiff.patches(text, bins)
1225
1226
1226 def dohash(text):
1227 def dohash(text):
1227 if not cache:
1228 if not cache:
1228 r.clearcaches()
1229 r.clearcaches()
1229 r.checkhash(text, node, rev=rev)
1230 r.checkhash(text, node, rev=rev)
1230
1231
1231 def dorevision():
1232 def dorevision():
1232 if not cache:
1233 if not cache:
1233 r.clearcaches()
1234 r.clearcaches()
1234 r.revision(node)
1235 r.revision(node)
1235
1236
1236 chain = r._deltachain(rev)[0]
1237 chain = r._deltachain(rev)[0]
1237 data = segmentforrevs(chain[0], chain[-1])[1]
1238 data = segmentforrevs(chain[0], chain[-1])[1]
1238 rawchunks = getrawchunks(data, chain)
1239 rawchunks = getrawchunks(data, chain)
1239 bins = r._chunks(chain)
1240 bins = r._chunks(chain)
1240 text = str(bins[0])
1241 text = str(bins[0])
1241 bins = bins[1:]
1242 bins = bins[1:]
1242 text = mdiff.patches(text, bins)
1243 text = mdiff.patches(text, bins)
1243
1244
1244 benches = [
1245 benches = [
1245 (lambda: dorevision(), 'full'),
1246 (lambda: dorevision(), 'full'),
1246 (lambda: dodeltachain(rev), 'deltachain'),
1247 (lambda: dodeltachain(rev), 'deltachain'),
1247 (lambda: doread(chain), 'read'),
1248 (lambda: doread(chain), 'read'),
1248 (lambda: dorawchunks(data, chain), 'rawchunks'),
1249 (lambda: dorawchunks(data, chain), 'rawchunks'),
1249 (lambda: dodecompress(rawchunks), 'decompress'),
1250 (lambda: dodecompress(rawchunks), 'decompress'),
1250 (lambda: dopatch(text, bins), 'patch'),
1251 (lambda: dopatch(text, bins), 'patch'),
1251 (lambda: dohash(text), 'hash'),
1252 (lambda: dohash(text), 'hash'),
1252 ]
1253 ]
1253
1254
1254 for fn, title in benches:
1255 for fn, title in benches:
1255 timer, fm = gettimer(ui, opts)
1256 timer, fm = gettimer(ui, opts)
1256 timer(fn, title=title)
1257 timer(fn, title=title)
1257 fm.end()
1258 fm.end()
1258
1259
1259 @command('perfrevset',
1260 @command('perfrevset',
1260 [('C', 'clear', False, 'clear volatile cache between each call.'),
1261 [('C', 'clear', False, 'clear volatile cache between each call.'),
1261 ('', 'contexts', False, 'obtain changectx for each revision')]
1262 ('', 'contexts', False, 'obtain changectx for each revision')]
1262 + formatteropts, "REVSET")
1263 + formatteropts, "REVSET")
1263 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1264 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1264 """benchmark the execution time of a revset
1265 """benchmark the execution time of a revset
1265
1266
1266 Use the --clean option if need to evaluate the impact of build volatile
1267 Use the --clean option if need to evaluate the impact of build volatile
1267 revisions set cache on the revset execution. Volatile cache hold filtered
1268 revisions set cache on the revset execution. Volatile cache hold filtered
1268 and obsolete related cache."""
1269 and obsolete related cache."""
1269 timer, fm = gettimer(ui, opts)
1270 timer, fm = gettimer(ui, opts)
1270 def d():
1271 def d():
1271 if clear:
1272 if clear:
1272 repo.invalidatevolatilesets()
1273 repo.invalidatevolatilesets()
1273 if contexts:
1274 if contexts:
1274 for ctx in repo.set(expr): pass
1275 for ctx in repo.set(expr): pass
1275 else:
1276 else:
1276 for r in repo.revs(expr): pass
1277 for r in repo.revs(expr): pass
1277 timer(d)
1278 timer(d)
1278 fm.end()
1279 fm.end()
1279
1280
1280 @command('perfvolatilesets',
1281 @command('perfvolatilesets',
1281 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1282 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1282 ] + formatteropts)
1283 ] + formatteropts)
1283 def perfvolatilesets(ui, repo, *names, **opts):
1284 def perfvolatilesets(ui, repo, *names, **opts):
1284 """benchmark the computation of various volatile set
1285 """benchmark the computation of various volatile set
1285
1286
1286 Volatile set computes element related to filtering and obsolescence."""
1287 Volatile set computes element related to filtering and obsolescence."""
1287 timer, fm = gettimer(ui, opts)
1288 timer, fm = gettimer(ui, opts)
1288 repo = repo.unfiltered()
1289 repo = repo.unfiltered()
1289
1290
1290 def getobs(name):
1291 def getobs(name):
1291 def d():
1292 def d():
1292 repo.invalidatevolatilesets()
1293 repo.invalidatevolatilesets()
1293 if opts['clear_obsstore']:
1294 if opts['clear_obsstore']:
1294 clearfilecache(repo, 'obsstore')
1295 clearfilecache(repo, 'obsstore')
1295 obsolete.getrevs(repo, name)
1296 obsolete.getrevs(repo, name)
1296 return d
1297 return d
1297
1298
1298 allobs = sorted(obsolete.cachefuncs)
1299 allobs = sorted(obsolete.cachefuncs)
1299 if names:
1300 if names:
1300 allobs = [n for n in allobs if n in names]
1301 allobs = [n for n in allobs if n in names]
1301
1302
1302 for name in allobs:
1303 for name in allobs:
1303 timer(getobs(name), title=name)
1304 timer(getobs(name), title=name)
1304
1305
1305 def getfiltered(name):
1306 def getfiltered(name):
1306 def d():
1307 def d():
1307 repo.invalidatevolatilesets()
1308 repo.invalidatevolatilesets()
1308 if opts['clear_obsstore']:
1309 if opts['clear_obsstore']:
1309 clearfilecache(repo, 'obsstore')
1310 clearfilecache(repo, 'obsstore')
1310 repoview.filterrevs(repo, name)
1311 repoview.filterrevs(repo, name)
1311 return d
1312 return d
1312
1313
1313 allfilter = sorted(repoview.filtertable)
1314 allfilter = sorted(repoview.filtertable)
1314 if names:
1315 if names:
1315 allfilter = [n for n in allfilter if n in names]
1316 allfilter = [n for n in allfilter if n in names]
1316
1317
1317 for name in allfilter:
1318 for name in allfilter:
1318 timer(getfiltered(name), title=name)
1319 timer(getfiltered(name), title=name)
1319 fm.end()
1320 fm.end()
1320
1321
1321 @command('perfbranchmap',
1322 @command('perfbranchmap',
1322 [('f', 'full', False,
1323 [('f', 'full', False,
1323 'Includes build time of subset'),
1324 'Includes build time of subset'),
1324 ('', 'clear-revbranch', False,
1325 ('', 'clear-revbranch', False,
1325 'purge the revbranch cache between computation'),
1326 'purge the revbranch cache between computation'),
1326 ] + formatteropts)
1327 ] + formatteropts)
1327 def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
1328 def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
1328 """benchmark the update of a branchmap
1329 """benchmark the update of a branchmap
1329
1330
1330 This benchmarks the full repo.branchmap() call with read and write disabled
1331 This benchmarks the full repo.branchmap() call with read and write disabled
1331 """
1332 """
1332 timer, fm = gettimer(ui, opts)
1333 timer, fm = gettimer(ui, opts)
1333 def getbranchmap(filtername):
1334 def getbranchmap(filtername):
1334 """generate a benchmark function for the filtername"""
1335 """generate a benchmark function for the filtername"""
1335 if filtername is None:
1336 if filtername is None:
1336 view = repo
1337 view = repo
1337 else:
1338 else:
1338 view = repo.filtered(filtername)
1339 view = repo.filtered(filtername)
1339 def d():
1340 def d():
1340 if clear_revbranch:
1341 if clear_revbranch:
1341 repo.revbranchcache()._clear()
1342 repo.revbranchcache()._clear()
1342 if full:
1343 if full:
1343 view._branchcaches.clear()
1344 view._branchcaches.clear()
1344 else:
1345 else:
1345 view._branchcaches.pop(filtername, None)
1346 view._branchcaches.pop(filtername, None)
1346 view.branchmap()
1347 view.branchmap()
1347 return d
1348 return d
1348 # add filter in smaller subset to bigger subset
1349 # add filter in smaller subset to bigger subset
1349 possiblefilters = set(repoview.filtertable)
1350 possiblefilters = set(repoview.filtertable)
1350 subsettable = getbranchmapsubsettable()
1351 subsettable = getbranchmapsubsettable()
1351 allfilters = []
1352 allfilters = []
1352 while possiblefilters:
1353 while possiblefilters:
1353 for name in possiblefilters:
1354 for name in possiblefilters:
1354 subset = subsettable.get(name)
1355 subset = subsettable.get(name)
1355 if subset not in possiblefilters:
1356 if subset not in possiblefilters:
1356 break
1357 break
1357 else:
1358 else:
1358 assert False, 'subset cycle %s!' % possiblefilters
1359 assert False, 'subset cycle %s!' % possiblefilters
1359 allfilters.append(name)
1360 allfilters.append(name)
1360 possiblefilters.remove(name)
1361 possiblefilters.remove(name)
1361
1362
1362 # warm the cache
1363 # warm the cache
1363 if not full:
1364 if not full:
1364 for name in allfilters:
1365 for name in allfilters:
1365 repo.filtered(name).branchmap()
1366 repo.filtered(name).branchmap()
1366 # add unfiltered
1367 # add unfiltered
1367 allfilters.append(None)
1368 allfilters.append(None)
1368
1369
1369 branchcacheread = safeattrsetter(branchmap, 'read')
1370 branchcacheread = safeattrsetter(branchmap, 'read')
1370 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1371 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1371 branchcacheread.set(lambda repo: None)
1372 branchcacheread.set(lambda repo: None)
1372 branchcachewrite.set(lambda bc, repo: None)
1373 branchcachewrite.set(lambda bc, repo: None)
1373 try:
1374 try:
1374 for name in allfilters:
1375 for name in allfilters:
1375 timer(getbranchmap(name), title=str(name))
1376 timer(getbranchmap(name), title=str(name))
1376 finally:
1377 finally:
1377 branchcacheread.restore()
1378 branchcacheread.restore()
1378 branchcachewrite.restore()
1379 branchcachewrite.restore()
1379 fm.end()
1380 fm.end()
1380
1381
1381 @command('perfloadmarkers')
1382 @command('perfloadmarkers')
1382 def perfloadmarkers(ui, repo):
1383 def perfloadmarkers(ui, repo):
1383 """benchmark the time to parse the on-disk markers for a repo
1384 """benchmark the time to parse the on-disk markers for a repo
1384
1385
1385 Result is the number of markers in the repo."""
1386 Result is the number of markers in the repo."""
1386 timer, fm = gettimer(ui)
1387 timer, fm = gettimer(ui)
1387 svfs = getsvfs(repo)
1388 svfs = getsvfs(repo)
1388 timer(lambda: len(obsolete.obsstore(svfs)))
1389 timer(lambda: len(obsolete.obsstore(svfs)))
1389 fm.end()
1390 fm.end()
1390
1391
1391 @command('perflrucachedict', formatteropts +
1392 @command('perflrucachedict', formatteropts +
1392 [('', 'size', 4, 'size of cache'),
1393 [('', 'size', 4, 'size of cache'),
1393 ('', 'gets', 10000, 'number of key lookups'),
1394 ('', 'gets', 10000, 'number of key lookups'),
1394 ('', 'sets', 10000, 'number of key sets'),
1395 ('', 'sets', 10000, 'number of key sets'),
1395 ('', 'mixed', 10000, 'number of mixed mode operations'),
1396 ('', 'mixed', 10000, 'number of mixed mode operations'),
1396 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1397 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1397 norepo=True)
1398 norepo=True)
1398 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1399 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1399 mixedgetfreq=50, **opts):
1400 mixedgetfreq=50, **opts):
1400 def doinit():
1401 def doinit():
1401 for i in xrange(10000):
1402 for i in xrange(10000):
1402 util.lrucachedict(size)
1403 util.lrucachedict(size)
1403
1404
1404 values = []
1405 values = []
1405 for i in xrange(size):
1406 for i in xrange(size):
1406 values.append(random.randint(0, sys.maxint))
1407 values.append(random.randint(0, sys.maxint))
1407
1408
1408 # Get mode fills the cache and tests raw lookup performance with no
1409 # Get mode fills the cache and tests raw lookup performance with no
1409 # eviction.
1410 # eviction.
1410 getseq = []
1411 getseq = []
1411 for i in xrange(gets):
1412 for i in xrange(gets):
1412 getseq.append(random.choice(values))
1413 getseq.append(random.choice(values))
1413
1414
1414 def dogets():
1415 def dogets():
1415 d = util.lrucachedict(size)
1416 d = util.lrucachedict(size)
1416 for v in values:
1417 for v in values:
1417 d[v] = v
1418 d[v] = v
1418 for key in getseq:
1419 for key in getseq:
1419 value = d[key]
1420 value = d[key]
1420 value # silence pyflakes warning
1421 value # silence pyflakes warning
1421
1422
1422 # Set mode tests insertion speed with cache eviction.
1423 # Set mode tests insertion speed with cache eviction.
1423 setseq = []
1424 setseq = []
1424 for i in xrange(sets):
1425 for i in xrange(sets):
1425 setseq.append(random.randint(0, sys.maxint))
1426 setseq.append(random.randint(0, sys.maxint))
1426
1427
1427 def dosets():
1428 def dosets():
1428 d = util.lrucachedict(size)
1429 d = util.lrucachedict(size)
1429 for v in setseq:
1430 for v in setseq:
1430 d[v] = v
1431 d[v] = v
1431
1432
1432 # Mixed mode randomly performs gets and sets with eviction.
1433 # Mixed mode randomly performs gets and sets with eviction.
1433 mixedops = []
1434 mixedops = []
1434 for i in xrange(mixed):
1435 for i in xrange(mixed):
1435 r = random.randint(0, 100)
1436 r = random.randint(0, 100)
1436 if r < mixedgetfreq:
1437 if r < mixedgetfreq:
1437 op = 0
1438 op = 0
1438 else:
1439 else:
1439 op = 1
1440 op = 1
1440
1441
1441 mixedops.append((op, random.randint(0, size * 2)))
1442 mixedops.append((op, random.randint(0, size * 2)))
1442
1443
1443 def domixed():
1444 def domixed():
1444 d = util.lrucachedict(size)
1445 d = util.lrucachedict(size)
1445
1446
1446 for op, v in mixedops:
1447 for op, v in mixedops:
1447 if op == 0:
1448 if op == 0:
1448 try:
1449 try:
1449 d[v]
1450 d[v]
1450 except KeyError:
1451 except KeyError:
1451 pass
1452 pass
1452 else:
1453 else:
1453 d[v] = v
1454 d[v] = v
1454
1455
1455 benches = [
1456 benches = [
1456 (doinit, 'init'),
1457 (doinit, 'init'),
1457 (dogets, 'gets'),
1458 (dogets, 'gets'),
1458 (dosets, 'sets'),
1459 (dosets, 'sets'),
1459 (domixed, 'mixed')
1460 (domixed, 'mixed')
1460 ]
1461 ]
1461
1462
1462 for fn, title in benches:
1463 for fn, title in benches:
1463 timer, fm = gettimer(ui, opts)
1464 timer, fm = gettimer(ui, opts)
1464 timer(fn, title=title)
1465 timer(fn, title=title)
1465 fm.end()
1466 fm.end()
1466
1467
1467 @command('perfwrite', formatteropts)
1468 @command('perfwrite', formatteropts)
1468 def perfwrite(ui, repo, **opts):
1469 def perfwrite(ui, repo, **opts):
1469 """microbenchmark ui.write
1470 """microbenchmark ui.write
1470 """
1471 """
1471 timer, fm = gettimer(ui, opts)
1472 timer, fm = gettimer(ui, opts)
1472 def write():
1473 def write():
1473 for i in range(100000):
1474 for i in range(100000):
1474 ui.write(('Testing write performance\n'))
1475 ui.write(('Testing write performance\n'))
1475 timer(write)
1476 timer(write)
1476 fm.end()
1477 fm.end()
1477
1478
1478 def uisetup(ui):
1479 def uisetup(ui):
1479 if (util.safehasattr(cmdutil, 'openrevlog') and
1480 if (util.safehasattr(cmdutil, 'openrevlog') and
1480 not util.safehasattr(commands, 'debugrevlogopts')):
1481 not util.safehasattr(commands, 'debugrevlogopts')):
1481 # for "historical portability":
1482 # for "historical portability":
1482 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1483 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1483 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1484 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1484 # openrevlog() should cause failure, because it has been
1485 # openrevlog() should cause failure, because it has been
1485 # available since 3.5 (or 49c583ca48c4).
1486 # available since 3.5 (or 49c583ca48c4).
1486 def openrevlog(orig, repo, cmd, file_, opts):
1487 def openrevlog(orig, repo, cmd, file_, opts):
1487 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1488 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1488 raise error.Abort("This version doesn't support --dir option",
1489 raise error.Abort("This version doesn't support --dir option",
1489 hint="use 3.5 or later")
1490 hint="use 3.5 or later")
1490 return orig(repo, cmd, file_, opts)
1491 return orig(repo, cmd, file_, opts)
1491 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
1492 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,670 +1,673 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34 shortname = '.hglf'
34 shortname = '.hglf'
35 shortnameslash = shortname + '/'
35 shortnameslash = shortname + '/'
36 longname = 'largefiles'
36 longname = 'largefiles'
37
37
38 # -- Private worker functions ------------------------------------------
38 # -- Private worker functions ------------------------------------------
39
39
40 def getminsize(ui, assumelfiles, opt, default=10):
40 def getminsize(ui, assumelfiles, opt, default=10):
41 lfsize = opt
41 lfsize = opt
42 if not lfsize and assumelfiles:
42 if not lfsize and assumelfiles:
43 lfsize = ui.config(longname, 'minsize', default=default)
43 lfsize = ui.config(longname, 'minsize', default=default)
44 if lfsize:
44 if lfsize:
45 try:
45 try:
46 lfsize = float(lfsize)
46 lfsize = float(lfsize)
47 except ValueError:
47 except ValueError:
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
49 % lfsize)
49 % lfsize)
50 if lfsize is None:
50 if lfsize is None:
51 raise error.Abort(_('minimum size for largefiles must be specified'))
51 raise error.Abort(_('minimum size for largefiles must be specified'))
52 return lfsize
52 return lfsize
53
53
54 def link(src, dest):
54 def link(src, dest):
55 """Try to create hardlink - if that fails, efficiently make a copy."""
55 """Try to create hardlink - if that fails, efficiently make a copy."""
56 util.makedirs(os.path.dirname(dest))
56 util.makedirs(os.path.dirname(dest))
57 try:
57 try:
58 util.oslink(src, dest)
58 util.oslink(src, dest)
59 except OSError:
59 except OSError:
60 # if hardlinks fail, fallback on atomic copy
60 # if hardlinks fail, fallback on atomic copy
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
62 for chunk in util.filechunkiter(srcf):
62 for chunk in util.filechunkiter(srcf):
63 dstf.write(chunk)
63 dstf.write(chunk)
64 os.chmod(dest, os.stat(src).st_mode)
64 os.chmod(dest, os.stat(src).st_mode)
65
65
66 def usercachepath(ui, hash):
66 def usercachepath(ui, hash):
67 '''Return the correct location in the "global" largefiles cache for a file
67 '''Return the correct location in the "global" largefiles cache for a file
68 with the given hash.
68 with the given hash.
69 This cache is used for sharing of largefiles across repositories - both
69 This cache is used for sharing of largefiles across repositories - both
70 to preserve download bandwidth and storage space.'''
70 to preserve download bandwidth and storage space.'''
71 return os.path.join(_usercachedir(ui), hash)
71 return os.path.join(_usercachedir(ui), hash)
72
72
73 def _usercachedir(ui):
73 def _usercachedir(ui):
74 '''Return the location of the "global" largefiles cache.'''
74 '''Return the location of the "global" largefiles cache.'''
75 path = ui.configpath(longname, 'usercache', None)
75 path = ui.configpath(longname, 'usercache', None)
76 if path:
76 if path:
77 return path
77 return path
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 encoding.environ.get('APPDATA'))
80 encoding.environ.get('APPDATA'))
81 if appdata:
81 if appdata:
82 return os.path.join(appdata, longname)
82 return os.path.join(appdata, longname)
83 elif platform.system() == 'Darwin':
83 elif platform.system() == 'Darwin':
84 home = encoding.environ.get('HOME')
84 home = encoding.environ.get('HOME')
85 if home:
85 if home:
86 return os.path.join(home, 'Library', 'Caches', longname)
86 return os.path.join(home, 'Library', 'Caches', longname)
87 elif pycompat.osname == 'posix':
87 elif pycompat.osname == 'posix':
88 path = encoding.environ.get('XDG_CACHE_HOME')
88 path = encoding.environ.get('XDG_CACHE_HOME')
89 if path:
89 if path:
90 return os.path.join(path, longname)
90 return os.path.join(path, longname)
91 home = encoding.environ.get('HOME')
91 home = encoding.environ.get('HOME')
92 if home:
92 if home:
93 return os.path.join(home, '.cache', longname)
93 return os.path.join(home, '.cache', longname)
94 else:
94 else:
95 raise error.Abort(_('unknown operating system: %s\n')
95 raise error.Abort(_('unknown operating system: %s\n')
96 % pycompat.osname)
96 % pycompat.osname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
98
98
99 def inusercache(ui, hash):
99 def inusercache(ui, hash):
100 path = usercachepath(ui, hash)
100 path = usercachepath(ui, hash)
101 return os.path.exists(path)
101 return os.path.exists(path)
102
102
103 def findfile(repo, hash):
103 def findfile(repo, hash):
104 '''Return store path of the largefile with the specified hash.
104 '''Return store path of the largefile with the specified hash.
105 As a side effect, the file might be linked from user cache.
105 As a side effect, the file might be linked from user cache.
106 Return None if the file can't be found locally.'''
106 Return None if the file can't be found locally.'''
107 path, exists = findstorepath(repo, hash)
107 path, exists = findstorepath(repo, hash)
108 if exists:
108 if exists:
109 repo.ui.note(_('found %s in store\n') % hash)
109 repo.ui.note(_('found %s in store\n') % hash)
110 return path
110 return path
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('found %s in system cache\n') % hash)
112 repo.ui.note(_('found %s in system cache\n') % hash)
113 path = storepath(repo, hash)
113 path = storepath(repo, hash)
114 link(usercachepath(repo.ui, hash), path)
114 link(usercachepath(repo.ui, hash), path)
115 return path
115 return path
116 return None
116 return None
117
117
118 class largefilesdirstate(dirstate.dirstate):
118 class largefilesdirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefilesdirstate, self).normal(unixpath(f))
122 return super(largefilesdirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefilesdirstate, self).remove(unixpath(f))
124 return super(largefilesdirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefilesdirstate, self).add(unixpath(f))
126 return super(largefilesdirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefilesdirstate, self).drop(unixpath(f))
128 return super(largefilesdirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefilesdirstate, self).forget(unixpath(f))
130 return super(largefilesdirstate, self).forget(unixpath(f))
131 def normallookup(self, f):
131 def normallookup(self, f):
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 def _ignore(self, f):
133 def _ignore(self, f):
134 return False
134 return False
135 def write(self, tr=False):
135 def write(self, tr=False):
136 # (1) disable PENDING mode always
136 # (1) disable PENDING mode always
137 # (lfdirstate isn't yet managed as a part of the transaction)
137 # (lfdirstate isn't yet managed as a part of the transaction)
138 # (2) avoid develwarn 'use dirstate.write with ....'
138 # (2) avoid develwarn 'use dirstate.write with ....'
139 super(largefilesdirstate, self).write(None)
139 super(largefilesdirstate, self).write(None)
140
140
141 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
142 '''
142 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
145 '''
146 vfs = repo.vfs
146 vfs = repo.vfs
147 lfstoredir = longname
147 lfstoredir = longname
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 repo.dirstate._validate,
150 repo.dirstate._validate,
151 lambda: sparse.matcher(repo))
151 lambda: sparse.matcher(repo))
152
152
153 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
154 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
155 # largefiles operation in a new clone.
155 # largefiles operation in a new clone.
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
157 matcher = getstandinmatcher(repo)
157 matcher = getstandinmatcher(repo)
158 standins = repo.dirstate.walk(matcher, [], False, False)
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
159 ignored=False)
159
160
160 if len(standins) > 0:
161 if len(standins) > 0:
161 vfs.makedirs(lfstoredir)
162 vfs.makedirs(lfstoredir)
162
163
163 for standin in standins:
164 for standin in standins:
164 lfile = splitstandin(standin)
165 lfile = splitstandin(standin)
165 lfdirstate.normallookup(lfile)
166 lfdirstate.normallookup(lfile)
166 return lfdirstate
167 return lfdirstate
167
168
168 def lfdirstatestatus(lfdirstate, repo):
169 def lfdirstatestatus(lfdirstate, repo):
169 pctx = repo['.']
170 pctx = repo['.']
170 match = matchmod.always(repo.root, repo.getcwd())
171 match = matchmod.always(repo.root, repo.getcwd())
171 unsure, s = lfdirstate.status(match, [], False, False, False)
172 unsure, s = lfdirstate.status(match, [], False, False, False)
172 modified, clean = s.modified, s.clean
173 modified, clean = s.modified, s.clean
173 for lfile in unsure:
174 for lfile in unsure:
174 try:
175 try:
175 fctx = pctx[standin(lfile)]
176 fctx = pctx[standin(lfile)]
176 except LookupError:
177 except LookupError:
177 fctx = None
178 fctx = None
178 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
179 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
180 modified.append(lfile)
180 else:
181 else:
181 clean.append(lfile)
182 clean.append(lfile)
182 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
183 return s
184 return s
184
185
185 def listlfiles(repo, rev=None, matcher=None):
186 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
187 '''return a list of largefiles in the working copy or the
187 specified changeset'''
188 specified changeset'''
188
189
189 if matcher is None:
190 if matcher is None:
190 matcher = getstandinmatcher(repo)
191 matcher = getstandinmatcher(repo)
191
192
192 # ignore unknown files in working directory
193 # ignore unknown files in working directory
193 return [splitstandin(f)
194 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
195 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
196 if rev is not None or repo.dirstate[f] != '?']
196
197
197 def instore(repo, hash, forcelocal=False):
198 def instore(repo, hash, forcelocal=False):
198 '''Return true if a largefile with the given hash exists in the store'''
199 '''Return true if a largefile with the given hash exists in the store'''
199 return os.path.exists(storepath(repo, hash, forcelocal))
200 return os.path.exists(storepath(repo, hash, forcelocal))
200
201
201 def storepath(repo, hash, forcelocal=False):
202 def storepath(repo, hash, forcelocal=False):
202 '''Return the correct location in the repository largefiles store for a
203 '''Return the correct location in the repository largefiles store for a
203 file with the given hash.'''
204 file with the given hash.'''
204 if not forcelocal and repo.shared():
205 if not forcelocal and repo.shared():
205 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
206 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
206 return repo.vfs.join(longname, hash)
207 return repo.vfs.join(longname, hash)
207
208
208 def findstorepath(repo, hash):
209 def findstorepath(repo, hash):
209 '''Search through the local store path(s) to find the file for the given
210 '''Search through the local store path(s) to find the file for the given
210 hash. If the file is not found, its path in the primary store is returned.
211 hash. If the file is not found, its path in the primary store is returned.
211 The return value is a tuple of (path, exists(path)).
212 The return value is a tuple of (path, exists(path)).
212 '''
213 '''
213 # For shared repos, the primary store is in the share source. But for
214 # For shared repos, the primary store is in the share source. But for
214 # backward compatibility, force a lookup in the local store if it wasn't
215 # backward compatibility, force a lookup in the local store if it wasn't
215 # found in the share source.
216 # found in the share source.
216 path = storepath(repo, hash, False)
217 path = storepath(repo, hash, False)
217
218
218 if instore(repo, hash):
219 if instore(repo, hash):
219 return (path, True)
220 return (path, True)
220 elif repo.shared() and instore(repo, hash, True):
221 elif repo.shared() and instore(repo, hash, True):
221 return storepath(repo, hash, True), True
222 return storepath(repo, hash, True), True
222
223
223 return (path, False)
224 return (path, False)
224
225
225 def copyfromcache(repo, hash, filename):
226 def copyfromcache(repo, hash, filename):
226 '''Copy the specified largefile from the repo or system cache to
227 '''Copy the specified largefile from the repo or system cache to
227 filename in the repository. Return true on success or false if the
228 filename in the repository. Return true on success or false if the
228 file was not found in either cache (which should not happened:
229 file was not found in either cache (which should not happened:
229 this is meant to be called only after ensuring that the needed
230 this is meant to be called only after ensuring that the needed
230 largefile exists in the cache).'''
231 largefile exists in the cache).'''
231 wvfs = repo.wvfs
232 wvfs = repo.wvfs
232 path = findfile(repo, hash)
233 path = findfile(repo, hash)
233 if path is None:
234 if path is None:
234 return False
235 return False
235 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
236 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
236 # The write may fail before the file is fully written, but we
237 # The write may fail before the file is fully written, but we
237 # don't use atomic writes in the working copy.
238 # don't use atomic writes in the working copy.
238 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
239 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
239 gothash = copyandhash(
240 gothash = copyandhash(
240 util.filechunkiter(srcfd), destfd)
241 util.filechunkiter(srcfd), destfd)
241 if gothash != hash:
242 if gothash != hash:
242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 % (filename, path, gothash))
244 % (filename, path, gothash))
244 wvfs.unlink(filename)
245 wvfs.unlink(filename)
245 return False
246 return False
246 return True
247 return True
247
248
248 def copytostore(repo, ctx, file, fstandin):
249 def copytostore(repo, ctx, file, fstandin):
249 wvfs = repo.wvfs
250 wvfs = repo.wvfs
250 hash = readasstandin(ctx[fstandin])
251 hash = readasstandin(ctx[fstandin])
251 if instore(repo, hash):
252 if instore(repo, hash):
252 return
253 return
253 if wvfs.exists(file):
254 if wvfs.exists(file):
254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 copytostoreabsolute(repo, wvfs.join(file), hash)
255 else:
256 else:
256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 (file, hash))
258 (file, hash))
258
259
259 def copyalltostore(repo, node):
260 def copyalltostore(repo, node):
260 '''Copy all largefiles in a given revision to the store'''
261 '''Copy all largefiles in a given revision to the store'''
261
262
262 ctx = repo[node]
263 ctx = repo[node]
263 for filename in ctx.files():
264 for filename in ctx.files():
264 realfile = splitstandin(filename)
265 realfile = splitstandin(filename)
265 if realfile is not None and filename in ctx.manifest():
266 if realfile is not None and filename in ctx.manifest():
266 copytostore(repo, ctx, realfile, filename)
267 copytostore(repo, ctx, realfile, filename)
267
268
268 def copytostoreabsolute(repo, file, hash):
269 def copytostoreabsolute(repo, file, hash):
269 if inusercache(repo.ui, hash):
270 if inusercache(repo.ui, hash):
270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 else:
272 else:
272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 with open(file, 'rb') as srcf:
274 with open(file, 'rb') as srcf:
274 with util.atomictempfile(storepath(repo, hash),
275 with util.atomictempfile(storepath(repo, hash),
275 createmode=repo.store.createmode) as dstf:
276 createmode=repo.store.createmode) as dstf:
276 for chunk in util.filechunkiter(srcf):
277 for chunk in util.filechunkiter(srcf):
277 dstf.write(chunk)
278 dstf.write(chunk)
278 linktousercache(repo, hash)
279 linktousercache(repo, hash)
279
280
280 def linktousercache(repo, hash):
281 def linktousercache(repo, hash):
281 '''Link / copy the largefile with the specified hash from the store
282 '''Link / copy the largefile with the specified hash from the store
282 to the cache.'''
283 to the cache.'''
283 path = usercachepath(repo.ui, hash)
284 path = usercachepath(repo.ui, hash)
284 link(storepath(repo, hash), path)
285 link(storepath(repo, hash), path)
285
286
286 def getstandinmatcher(repo, rmatcher=None):
287 def getstandinmatcher(repo, rmatcher=None):
287 '''Return a match object that applies rmatcher to the standin directory'''
288 '''Return a match object that applies rmatcher to the standin directory'''
288 wvfs = repo.wvfs
289 wvfs = repo.wvfs
289 standindir = shortname
290 standindir = shortname
290
291
291 # no warnings about missing files or directories
292 # no warnings about missing files or directories
292 badfn = lambda f, msg: None
293 badfn = lambda f, msg: None
293
294
294 if rmatcher and not rmatcher.always():
295 if rmatcher and not rmatcher.always():
295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 if not pats:
297 if not pats:
297 pats = [wvfs.join(standindir)]
298 pats = [wvfs.join(standindir)]
298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 match = scmutil.match(repo[None], pats, badfn=badfn)
299 else:
300 else:
300 # no patterns: relative to repo root
301 # no patterns: relative to repo root
301 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
302 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
302 return match
303 return match
303
304
304 def composestandinmatcher(repo, rmatcher):
305 def composestandinmatcher(repo, rmatcher):
305 '''Return a matcher that accepts standins corresponding to the
306 '''Return a matcher that accepts standins corresponding to the
306 files accepted by rmatcher. Pass the list of files in the matcher
307 files accepted by rmatcher. Pass the list of files in the matcher
307 as the paths specified by the user.'''
308 as the paths specified by the user.'''
308 smatcher = getstandinmatcher(repo, rmatcher)
309 smatcher = getstandinmatcher(repo, rmatcher)
309 isstandin = smatcher.matchfn
310 isstandin = smatcher.matchfn
310 def composedmatchfn(f):
311 def composedmatchfn(f):
311 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
312 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
312 smatcher.matchfn = composedmatchfn
313 smatcher.matchfn = composedmatchfn
313
314
314 return smatcher
315 return smatcher
315
316
316 def standin(filename):
317 def standin(filename):
317 '''Return the repo-relative path to the standin for the specified big
318 '''Return the repo-relative path to the standin for the specified big
318 file.'''
319 file.'''
319 # Notes:
320 # Notes:
320 # 1) Some callers want an absolute path, but for instance addlargefiles
321 # 1) Some callers want an absolute path, but for instance addlargefiles
321 # needs it repo-relative so it can be passed to repo[None].add(). So
322 # needs it repo-relative so it can be passed to repo[None].add(). So
322 # leave it up to the caller to use repo.wjoin() to get an absolute path.
323 # leave it up to the caller to use repo.wjoin() to get an absolute path.
323 # 2) Join with '/' because that's what dirstate always uses, even on
324 # 2) Join with '/' because that's what dirstate always uses, even on
324 # Windows. Change existing separator to '/' first in case we are
325 # Windows. Change existing separator to '/' first in case we are
325 # passed filenames from an external source (like the command line).
326 # passed filenames from an external source (like the command line).
326 return shortnameslash + util.pconvert(filename)
327 return shortnameslash + util.pconvert(filename)
327
328
328 def isstandin(filename):
329 def isstandin(filename):
329 '''Return true if filename is a big file standin. filename must be
330 '''Return true if filename is a big file standin. filename must be
330 in Mercurial's internal form (slash-separated).'''
331 in Mercurial's internal form (slash-separated).'''
331 return filename.startswith(shortnameslash)
332 return filename.startswith(shortnameslash)
332
333
333 def splitstandin(filename):
334 def splitstandin(filename):
334 # Split on / because that's what dirstate always uses, even on Windows.
335 # Split on / because that's what dirstate always uses, even on Windows.
335 # Change local separator to / first just in case we are passed filenames
336 # Change local separator to / first just in case we are passed filenames
336 # from an external source (like the command line).
337 # from an external source (like the command line).
337 bits = util.pconvert(filename).split('/', 1)
338 bits = util.pconvert(filename).split('/', 1)
338 if len(bits) == 2 and bits[0] == shortname:
339 if len(bits) == 2 and bits[0] == shortname:
339 return bits[1]
340 return bits[1]
340 else:
341 else:
341 return None
342 return None
342
343
343 def updatestandin(repo, lfile, standin):
344 def updatestandin(repo, lfile, standin):
344 """Re-calculate hash value of lfile and write it into standin
345 """Re-calculate hash value of lfile and write it into standin
345
346
346 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
347 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
347 """
348 """
348 file = repo.wjoin(lfile)
349 file = repo.wjoin(lfile)
349 if repo.wvfs.exists(lfile):
350 if repo.wvfs.exists(lfile):
350 hash = hashfile(file)
351 hash = hashfile(file)
351 executable = getexecutable(file)
352 executable = getexecutable(file)
352 writestandin(repo, standin, hash, executable)
353 writestandin(repo, standin, hash, executable)
353 else:
354 else:
354 raise error.Abort(_('%s: file not found!') % lfile)
355 raise error.Abort(_('%s: file not found!') % lfile)
355
356
356 def readasstandin(fctx):
357 def readasstandin(fctx):
357 '''read hex hash from given filectx of standin file
358 '''read hex hash from given filectx of standin file
358
359
359 This encapsulates how "standin" data is stored into storage layer.'''
360 This encapsulates how "standin" data is stored into storage layer.'''
360 return fctx.data().strip()
361 return fctx.data().strip()
361
362
362 def writestandin(repo, standin, hash, executable):
363 def writestandin(repo, standin, hash, executable):
363 '''write hash to <repo.root>/<standin>'''
364 '''write hash to <repo.root>/<standin>'''
364 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
365 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
365
366
366 def copyandhash(instream, outfile):
367 def copyandhash(instream, outfile):
367 '''Read bytes from instream (iterable) and write them to outfile,
368 '''Read bytes from instream (iterable) and write them to outfile,
368 computing the SHA-1 hash of the data along the way. Return the hash.'''
369 computing the SHA-1 hash of the data along the way. Return the hash.'''
369 hasher = hashlib.sha1('')
370 hasher = hashlib.sha1('')
370 for data in instream:
371 for data in instream:
371 hasher.update(data)
372 hasher.update(data)
372 outfile.write(data)
373 outfile.write(data)
373 return hasher.hexdigest()
374 return hasher.hexdigest()
374
375
375 def hashfile(file):
376 def hashfile(file):
376 if not os.path.exists(file):
377 if not os.path.exists(file):
377 return ''
378 return ''
378 with open(file, 'rb') as fd:
379 with open(file, 'rb') as fd:
379 return hexsha1(fd)
380 return hexsha1(fd)
380
381
381 def getexecutable(filename):
382 def getexecutable(filename):
382 mode = os.stat(filename).st_mode
383 mode = os.stat(filename).st_mode
383 return ((mode & stat.S_IXUSR) and
384 return ((mode & stat.S_IXUSR) and
384 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXOTH))
386 (mode & stat.S_IXOTH))
386
387
387 def urljoin(first, second, *arg):
388 def urljoin(first, second, *arg):
388 def join(left, right):
389 def join(left, right):
389 if not left.endswith('/'):
390 if not left.endswith('/'):
390 left += '/'
391 left += '/'
391 if right.startswith('/'):
392 if right.startswith('/'):
392 right = right[1:]
393 right = right[1:]
393 return left + right
394 return left + right
394
395
395 url = join(first, second)
396 url = join(first, second)
396 for a in arg:
397 for a in arg:
397 url = join(url, a)
398 url = join(url, a)
398 return url
399 return url
399
400
400 def hexsha1(fileobj):
401 def hexsha1(fileobj):
401 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 object data"""
403 object data"""
403 h = hashlib.sha1()
404 h = hashlib.sha1()
404 for chunk in util.filechunkiter(fileobj):
405 for chunk in util.filechunkiter(fileobj):
405 h.update(chunk)
406 h.update(chunk)
406 return h.hexdigest()
407 return h.hexdigest()
407
408
408 def httpsendfile(ui, filename):
409 def httpsendfile(ui, filename):
409 return httpconnection.httpsendfile(ui, filename, 'rb')
410 return httpconnection.httpsendfile(ui, filename, 'rb')
410
411
411 def unixpath(path):
412 def unixpath(path):
412 '''Return a version of path normalized for use with the lfdirstate.'''
413 '''Return a version of path normalized for use with the lfdirstate.'''
413 return util.pconvert(os.path.normpath(path))
414 return util.pconvert(os.path.normpath(path))
414
415
415 def islfilesrepo(repo):
416 def islfilesrepo(repo):
416 '''Return true if the repo is a largefile repo.'''
417 '''Return true if the repo is a largefile repo.'''
417 if ('largefiles' in repo.requirements and
418 if ('largefiles' in repo.requirements and
418 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 return True
420 return True
420
421
421 return any(openlfdirstate(repo.ui, repo, False))
422 return any(openlfdirstate(repo.ui, repo, False))
422
423
423 class storeprotonotcapable(Exception):
424 class storeprotonotcapable(Exception):
424 def __init__(self, storetypes):
425 def __init__(self, storetypes):
425 self.storetypes = storetypes
426 self.storetypes = storetypes
426
427
427 def getstandinsstate(repo):
428 def getstandinsstate(repo):
428 standins = []
429 standins = []
429 matcher = getstandinmatcher(repo)
430 matcher = getstandinmatcher(repo)
430 wctx = repo[None]
431 wctx = repo[None]
431 for standin in repo.dirstate.walk(matcher, [], False, False):
432 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
433 ignored=False):
432 lfile = splitstandin(standin)
434 lfile = splitstandin(standin)
433 try:
435 try:
434 hash = readasstandin(wctx[standin])
436 hash = readasstandin(wctx[standin])
435 except IOError:
437 except IOError:
436 hash = None
438 hash = None
437 standins.append((lfile, hash))
439 standins.append((lfile, hash))
438 return standins
440 return standins
439
441
440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
442 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
441 lfstandin = standin(lfile)
443 lfstandin = standin(lfile)
442 if lfstandin in repo.dirstate:
444 if lfstandin in repo.dirstate:
443 stat = repo.dirstate._map[lfstandin]
445 stat = repo.dirstate._map[lfstandin]
444 state, mtime = stat[0], stat[3]
446 state, mtime = stat[0], stat[3]
445 else:
447 else:
446 state, mtime = '?', -1
448 state, mtime = '?', -1
447 if state == 'n':
449 if state == 'n':
448 if (normallookup or mtime < 0 or
450 if (normallookup or mtime < 0 or
449 not repo.wvfs.exists(lfile)):
451 not repo.wvfs.exists(lfile)):
450 # state 'n' doesn't ensure 'clean' in this case
452 # state 'n' doesn't ensure 'clean' in this case
451 lfdirstate.normallookup(lfile)
453 lfdirstate.normallookup(lfile)
452 else:
454 else:
453 lfdirstate.normal(lfile)
455 lfdirstate.normal(lfile)
454 elif state == 'm':
456 elif state == 'm':
455 lfdirstate.normallookup(lfile)
457 lfdirstate.normallookup(lfile)
456 elif state == 'r':
458 elif state == 'r':
457 lfdirstate.remove(lfile)
459 lfdirstate.remove(lfile)
458 elif state == 'a':
460 elif state == 'a':
459 lfdirstate.add(lfile)
461 lfdirstate.add(lfile)
460 elif state == '?':
462 elif state == '?':
461 lfdirstate.drop(lfile)
463 lfdirstate.drop(lfile)
462
464
463 def markcommitted(orig, ctx, node):
465 def markcommitted(orig, ctx, node):
464 repo = ctx.repo()
466 repo = ctx.repo()
465
467
466 orig(node)
468 orig(node)
467
469
468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
470 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
469 # because files coming from the 2nd parent are omitted in the latter.
471 # because files coming from the 2nd parent are omitted in the latter.
470 #
472 #
471 # The former should be used to get targets of "synclfdirstate",
473 # The former should be used to get targets of "synclfdirstate",
472 # because such files:
474 # because such files:
473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
475 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
474 # - have to be marked as "n" after commit, but
476 # - have to be marked as "n" after commit, but
475 # - aren't listed in "repo[node].files()"
477 # - aren't listed in "repo[node].files()"
476
478
477 lfdirstate = openlfdirstate(repo.ui, repo)
479 lfdirstate = openlfdirstate(repo.ui, repo)
478 for f in ctx.files():
480 for f in ctx.files():
479 lfile = splitstandin(f)
481 lfile = splitstandin(f)
480 if lfile is not None:
482 if lfile is not None:
481 synclfdirstate(repo, lfdirstate, lfile, False)
483 synclfdirstate(repo, lfdirstate, lfile, False)
482 lfdirstate.write()
484 lfdirstate.write()
483
485
484 # As part of committing, copy all of the largefiles into the cache.
486 # As part of committing, copy all of the largefiles into the cache.
485 #
487 #
486 # Using "node" instead of "ctx" implies additional "repo[node]"
488 # Using "node" instead of "ctx" implies additional "repo[node]"
487 # lookup while copyalltostore(), but can omit redundant check for
489 # lookup while copyalltostore(), but can omit redundant check for
488 # files comming from the 2nd parent, which should exist in store
490 # files comming from the 2nd parent, which should exist in store
489 # at merging.
491 # at merging.
490 copyalltostore(repo, node)
492 copyalltostore(repo, node)
491
493
492 def getlfilestoupdate(oldstandins, newstandins):
494 def getlfilestoupdate(oldstandins, newstandins):
493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
495 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
494 filelist = []
496 filelist = []
495 for f in changedstandins:
497 for f in changedstandins:
496 if f[0] not in filelist:
498 if f[0] not in filelist:
497 filelist.append(f[0])
499 filelist.append(f[0])
498 return filelist
500 return filelist
499
501
500 def getlfilestoupload(repo, missing, addfunc):
502 def getlfilestoupload(repo, missing, addfunc):
501 for i, n in enumerate(missing):
503 for i, n in enumerate(missing):
502 repo.ui.progress(_('finding outgoing largefiles'), i,
504 repo.ui.progress(_('finding outgoing largefiles'), i,
503 unit=_('revisions'), total=len(missing))
505 unit=_('revisions'), total=len(missing))
504 parents = [p for p in repo[n].parents() if p != node.nullid]
506 parents = [p for p in repo[n].parents() if p != node.nullid]
505
507
506 oldlfstatus = repo.lfstatus
508 oldlfstatus = repo.lfstatus
507 repo.lfstatus = False
509 repo.lfstatus = False
508 try:
510 try:
509 ctx = repo[n]
511 ctx = repo[n]
510 finally:
512 finally:
511 repo.lfstatus = oldlfstatus
513 repo.lfstatus = oldlfstatus
512
514
513 files = set(ctx.files())
515 files = set(ctx.files())
514 if len(parents) == 2:
516 if len(parents) == 2:
515 mc = ctx.manifest()
517 mc = ctx.manifest()
516 mp1 = ctx.parents()[0].manifest()
518 mp1 = ctx.parents()[0].manifest()
517 mp2 = ctx.parents()[1].manifest()
519 mp2 = ctx.parents()[1].manifest()
518 for f in mp1:
520 for f in mp1:
519 if f not in mc:
521 if f not in mc:
520 files.add(f)
522 files.add(f)
521 for f in mp2:
523 for f in mp2:
522 if f not in mc:
524 if f not in mc:
523 files.add(f)
525 files.add(f)
524 for f in mc:
526 for f in mc:
525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
527 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
526 files.add(f)
528 files.add(f)
527 for fn in files:
529 for fn in files:
528 if isstandin(fn) and fn in ctx:
530 if isstandin(fn) and fn in ctx:
529 addfunc(fn, readasstandin(ctx[fn]))
531 addfunc(fn, readasstandin(ctx[fn]))
530 repo.ui.progress(_('finding outgoing largefiles'), None)
532 repo.ui.progress(_('finding outgoing largefiles'), None)
531
533
532 def updatestandinsbymatch(repo, match):
534 def updatestandinsbymatch(repo, match):
533 '''Update standins in the working directory according to specified match
535 '''Update standins in the working directory according to specified match
534
536
535 This returns (possibly modified) ``match`` object to be used for
537 This returns (possibly modified) ``match`` object to be used for
536 subsequent commit process.
538 subsequent commit process.
537 '''
539 '''
538
540
539 ui = repo.ui
541 ui = repo.ui
540
542
541 # Case 1: user calls commit with no specific files or
543 # Case 1: user calls commit with no specific files or
542 # include/exclude patterns: refresh and commit all files that
544 # include/exclude patterns: refresh and commit all files that
543 # are "dirty".
545 # are "dirty".
544 if match is None or match.always():
546 if match is None or match.always():
545 # Spend a bit of time here to get a list of files we know
547 # Spend a bit of time here to get a list of files we know
546 # are modified so we can compare only against those.
548 # are modified so we can compare only against those.
547 # It can cost a lot of time (several seconds)
549 # It can cost a lot of time (several seconds)
548 # otherwise to update all standins if the largefiles are
550 # otherwise to update all standins if the largefiles are
549 # large.
551 # large.
550 lfdirstate = openlfdirstate(ui, repo)
552 lfdirstate = openlfdirstate(ui, repo)
551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
553 dirtymatch = matchmod.always(repo.root, repo.getcwd())
552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
554 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
553 False)
555 False)
554 modifiedfiles = unsure + s.modified + s.added + s.removed
556 modifiedfiles = unsure + s.modified + s.added + s.removed
555 lfiles = listlfiles(repo)
557 lfiles = listlfiles(repo)
556 # this only loops through largefiles that exist (not
558 # this only loops through largefiles that exist (not
557 # removed/renamed)
559 # removed/renamed)
558 for lfile in lfiles:
560 for lfile in lfiles:
559 if lfile in modifiedfiles:
561 if lfile in modifiedfiles:
560 fstandin = standin(lfile)
562 fstandin = standin(lfile)
561 if repo.wvfs.exists(fstandin):
563 if repo.wvfs.exists(fstandin):
562 # this handles the case where a rebase is being
564 # this handles the case where a rebase is being
563 # performed and the working copy is not updated
565 # performed and the working copy is not updated
564 # yet.
566 # yet.
565 if repo.wvfs.exists(lfile):
567 if repo.wvfs.exists(lfile):
566 updatestandin(repo, lfile, fstandin)
568 updatestandin(repo, lfile, fstandin)
567
569
568 return match
570 return match
569
571
570 lfiles = listlfiles(repo)
572 lfiles = listlfiles(repo)
571 match._files = repo._subdirlfs(match.files(), lfiles)
573 match._files = repo._subdirlfs(match.files(), lfiles)
572
574
573 # Case 2: user calls commit with specified patterns: refresh
575 # Case 2: user calls commit with specified patterns: refresh
574 # any matching big files.
576 # any matching big files.
575 smatcher = composestandinmatcher(repo, match)
577 smatcher = composestandinmatcher(repo, match)
576 standins = repo.dirstate.walk(smatcher, [], False, False)
578 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
579 ignored=False)
577
580
578 # No matching big files: get out of the way and pass control to
581 # No matching big files: get out of the way and pass control to
579 # the usual commit() method.
582 # the usual commit() method.
580 if not standins:
583 if not standins:
581 return match
584 return match
582
585
583 # Refresh all matching big files. It's possible that the
586 # Refresh all matching big files. It's possible that the
584 # commit will end up failing, in which case the big files will
587 # commit will end up failing, in which case the big files will
585 # stay refreshed. No harm done: the user modified them and
588 # stay refreshed. No harm done: the user modified them and
586 # asked to commit them, so sooner or later we're going to
589 # asked to commit them, so sooner or later we're going to
587 # refresh the standins. Might as well leave them refreshed.
590 # refresh the standins. Might as well leave them refreshed.
588 lfdirstate = openlfdirstate(ui, repo)
591 lfdirstate = openlfdirstate(ui, repo)
589 for fstandin in standins:
592 for fstandin in standins:
590 lfile = splitstandin(fstandin)
593 lfile = splitstandin(fstandin)
591 if lfdirstate[lfile] != 'r':
594 if lfdirstate[lfile] != 'r':
592 updatestandin(repo, lfile, fstandin)
595 updatestandin(repo, lfile, fstandin)
593
596
594 # Cook up a new matcher that only matches regular files or
597 # Cook up a new matcher that only matches regular files or
595 # standins corresponding to the big files requested by the
598 # standins corresponding to the big files requested by the
596 # user. Have to modify _files to prevent commit() from
599 # user. Have to modify _files to prevent commit() from
597 # complaining "not tracked" for big files.
600 # complaining "not tracked" for big files.
598 match = copy.copy(match)
601 match = copy.copy(match)
599 origmatchfn = match.matchfn
602 origmatchfn = match.matchfn
600
603
601 # Check both the list of largefiles and the list of
604 # Check both the list of largefiles and the list of
602 # standins because if a largefile was removed, it
605 # standins because if a largefile was removed, it
603 # won't be in the list of largefiles at this point
606 # won't be in the list of largefiles at this point
604 match._files += sorted(standins)
607 match._files += sorted(standins)
605
608
606 actualfiles = []
609 actualfiles = []
607 for f in match._files:
610 for f in match._files:
608 fstandin = standin(f)
611 fstandin = standin(f)
609
612
610 # For largefiles, only one of the normal and standin should be
613 # For largefiles, only one of the normal and standin should be
611 # committed (except if one of them is a remove). In the case of a
614 # committed (except if one of them is a remove). In the case of a
612 # standin removal, drop the normal file if it is unknown to dirstate.
615 # standin removal, drop the normal file if it is unknown to dirstate.
613 # Thus, skip plain largefile names but keep the standin.
616 # Thus, skip plain largefile names but keep the standin.
614 if f in lfiles or fstandin in standins:
617 if f in lfiles or fstandin in standins:
615 if repo.dirstate[fstandin] != 'r':
618 if repo.dirstate[fstandin] != 'r':
616 if repo.dirstate[f] != 'r':
619 if repo.dirstate[f] != 'r':
617 continue
620 continue
618 elif repo.dirstate[f] == '?':
621 elif repo.dirstate[f] == '?':
619 continue
622 continue
620
623
621 actualfiles.append(f)
624 actualfiles.append(f)
622 match._files = actualfiles
625 match._files = actualfiles
623
626
624 def matchfn(f):
627 def matchfn(f):
625 if origmatchfn(f):
628 if origmatchfn(f):
626 return f not in lfiles
629 return f not in lfiles
627 else:
630 else:
628 return f in standins
631 return f in standins
629
632
630 match.matchfn = matchfn
633 match.matchfn = matchfn
631
634
632 return match
635 return match
633
636
634 class automatedcommithook(object):
637 class automatedcommithook(object):
635 '''Stateful hook to update standins at the 1st commit of resuming
638 '''Stateful hook to update standins at the 1st commit of resuming
636
639
637 For efficiency, updating standins in the working directory should
640 For efficiency, updating standins in the working directory should
638 be avoided while automated committing (like rebase, transplant and
641 be avoided while automated committing (like rebase, transplant and
639 so on), because they should be updated before committing.
642 so on), because they should be updated before committing.
640
643
641 But the 1st commit of resuming automated committing (e.g. ``rebase
644 But the 1st commit of resuming automated committing (e.g. ``rebase
642 --continue``) should update them, because largefiles may be
645 --continue``) should update them, because largefiles may be
643 modified manually.
646 modified manually.
644 '''
647 '''
645 def __init__(self, resuming):
648 def __init__(self, resuming):
646 self.resuming = resuming
649 self.resuming = resuming
647
650
648 def __call__(self, repo, match):
651 def __call__(self, repo, match):
649 if self.resuming:
652 if self.resuming:
650 self.resuming = False # avoids updating at subsequent commits
653 self.resuming = False # avoids updating at subsequent commits
651 return updatestandinsbymatch(repo, match)
654 return updatestandinsbymatch(repo, match)
652 else:
655 else:
653 return match
656 return match
654
657
655 def getstatuswriter(ui, repo, forcibly=None):
658 def getstatuswriter(ui, repo, forcibly=None):
656 '''Return the function to write largefiles specific status out
659 '''Return the function to write largefiles specific status out
657
660
658 If ``forcibly`` is ``None``, this returns the last element of
661 If ``forcibly`` is ``None``, this returns the last element of
659 ``repo._lfstatuswriters`` as "default" writer function.
662 ``repo._lfstatuswriters`` as "default" writer function.
660
663
661 Otherwise, this returns the function to always write out (or
664 Otherwise, this returns the function to always write out (or
662 ignore if ``not forcibly``) status.
665 ignore if ``not forcibly``) status.
663 '''
666 '''
664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
667 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
665 return repo._lfstatuswriters[-1]
668 return repo._lfstatuswriters[-1]
666 else:
669 else:
667 if forcibly:
670 if forcibly:
668 return ui.status # forcibly WRITE OUT
671 return ui.status # forcibly WRITE OUT
669 else:
672 else:
670 return lambda *msg, **opts: None # forcibly IGNORE
673 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,3882 +1,3882 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import itertools
11 import itertools
12 import os
12 import os
13 import re
13 import re
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 changelog,
26 changelog,
27 copies,
27 copies,
28 crecord as crecordmod,
28 crecord as crecordmod,
29 dirstateguard,
29 dirstateguard,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 pycompat,
38 pycompat,
39 registrar,
39 registrar,
40 revlog,
40 revlog,
41 revset,
41 revset,
42 scmutil,
42 scmutil,
43 smartset,
43 smartset,
44 templatekw,
44 templatekw,
45 templater,
45 templater,
46 util,
46 util,
47 vfs as vfsmod,
47 vfs as vfsmod,
48 )
48 )
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 # templates of common command options
51 # templates of common command options
52
52
53 dryrunopts = [
53 dryrunopts = [
54 ('n', 'dry-run', None,
54 ('n', 'dry-run', None,
55 _('do not perform actions, just print output')),
55 _('do not perform actions, just print output')),
56 ]
56 ]
57
57
58 remoteopts = [
58 remoteopts = [
59 ('e', 'ssh', '',
59 ('e', 'ssh', '',
60 _('specify ssh command to use'), _('CMD')),
60 _('specify ssh command to use'), _('CMD')),
61 ('', 'remotecmd', '',
61 ('', 'remotecmd', '',
62 _('specify hg command to run on the remote side'), _('CMD')),
62 _('specify hg command to run on the remote side'), _('CMD')),
63 ('', 'insecure', None,
63 ('', 'insecure', None,
64 _('do not verify server certificate (ignoring web.cacerts config)')),
64 _('do not verify server certificate (ignoring web.cacerts config)')),
65 ]
65 ]
66
66
67 walkopts = [
67 walkopts = [
68 ('I', 'include', [],
68 ('I', 'include', [],
69 _('include names matching the given patterns'), _('PATTERN')),
69 _('include names matching the given patterns'), _('PATTERN')),
70 ('X', 'exclude', [],
70 ('X', 'exclude', [],
71 _('exclude names matching the given patterns'), _('PATTERN')),
71 _('exclude names matching the given patterns'), _('PATTERN')),
72 ]
72 ]
73
73
74 commitopts = [
74 commitopts = [
75 ('m', 'message', '',
75 ('m', 'message', '',
76 _('use text as commit message'), _('TEXT')),
76 _('use text as commit message'), _('TEXT')),
77 ('l', 'logfile', '',
77 ('l', 'logfile', '',
78 _('read commit message from file'), _('FILE')),
78 _('read commit message from file'), _('FILE')),
79 ]
79 ]
80
80
81 commitopts2 = [
81 commitopts2 = [
82 ('d', 'date', '',
82 ('d', 'date', '',
83 _('record the specified date as commit date'), _('DATE')),
83 _('record the specified date as commit date'), _('DATE')),
84 ('u', 'user', '',
84 ('u', 'user', '',
85 _('record the specified user as committer'), _('USER')),
85 _('record the specified user as committer'), _('USER')),
86 ]
86 ]
87
87
88 # hidden for now
88 # hidden for now
89 formatteropts = [
89 formatteropts = [
90 ('T', 'template', '',
90 ('T', 'template', '',
91 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
91 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
92 ]
92 ]
93
93
94 templateopts = [
94 templateopts = [
95 ('', 'style', '',
95 ('', 'style', '',
96 _('display using template map file (DEPRECATED)'), _('STYLE')),
96 _('display using template map file (DEPRECATED)'), _('STYLE')),
97 ('T', 'template', '',
97 ('T', 'template', '',
98 _('display with template'), _('TEMPLATE')),
98 _('display with template'), _('TEMPLATE')),
99 ]
99 ]
100
100
101 logopts = [
101 logopts = [
102 ('p', 'patch', None, _('show patch')),
102 ('p', 'patch', None, _('show patch')),
103 ('g', 'git', None, _('use git extended diff format')),
103 ('g', 'git', None, _('use git extended diff format')),
104 ('l', 'limit', '',
104 ('l', 'limit', '',
105 _('limit number of changes displayed'), _('NUM')),
105 _('limit number of changes displayed'), _('NUM')),
106 ('M', 'no-merges', None, _('do not show merges')),
106 ('M', 'no-merges', None, _('do not show merges')),
107 ('', 'stat', None, _('output diffstat-style summary of changes')),
107 ('', 'stat', None, _('output diffstat-style summary of changes')),
108 ('G', 'graph', None, _("show the revision DAG")),
108 ('G', 'graph', None, _("show the revision DAG")),
109 ] + templateopts
109 ] + templateopts
110
110
111 diffopts = [
111 diffopts = [
112 ('a', 'text', None, _('treat all files as text')),
112 ('a', 'text', None, _('treat all files as text')),
113 ('g', 'git', None, _('use git extended diff format')),
113 ('g', 'git', None, _('use git extended diff format')),
114 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
114 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
115 ('', 'nodates', None, _('omit dates from diff headers'))
115 ('', 'nodates', None, _('omit dates from diff headers'))
116 ]
116 ]
117
117
118 diffwsopts = [
118 diffwsopts = [
119 ('w', 'ignore-all-space', None,
119 ('w', 'ignore-all-space', None,
120 _('ignore white space when comparing lines')),
120 _('ignore white space when comparing lines')),
121 ('b', 'ignore-space-change', None,
121 ('b', 'ignore-space-change', None,
122 _('ignore changes in the amount of white space')),
122 _('ignore changes in the amount of white space')),
123 ('B', 'ignore-blank-lines', None,
123 ('B', 'ignore-blank-lines', None,
124 _('ignore changes whose lines are all blank')),
124 _('ignore changes whose lines are all blank')),
125 ('Z', 'ignore-space-at-eol', None,
125 ('Z', 'ignore-space-at-eol', None,
126 _('ignore changes in whitespace at EOL')),
126 _('ignore changes in whitespace at EOL')),
127 ]
127 ]
128
128
129 diffopts2 = [
129 diffopts2 = [
130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
131 ('p', 'show-function', None, _('show which function each change is in')),
131 ('p', 'show-function', None, _('show which function each change is in')),
132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
133 ] + diffwsopts + [
133 ] + diffwsopts + [
134 ('U', 'unified', '',
134 ('U', 'unified', '',
135 _('number of lines of context to show'), _('NUM')),
135 _('number of lines of context to show'), _('NUM')),
136 ('', 'stat', None, _('output diffstat-style summary of changes')),
136 ('', 'stat', None, _('output diffstat-style summary of changes')),
137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
138 ]
138 ]
139
139
140 mergetoolopts = [
140 mergetoolopts = [
141 ('t', 'tool', '', _('specify merge tool')),
141 ('t', 'tool', '', _('specify merge tool')),
142 ]
142 ]
143
143
144 similarityopts = [
144 similarityopts = [
145 ('s', 'similarity', '',
145 ('s', 'similarity', '',
146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
147 ]
147 ]
148
148
149 subrepoopts = [
149 subrepoopts = [
150 ('S', 'subrepos', None,
150 ('S', 'subrepos', None,
151 _('recurse into subrepositories'))
151 _('recurse into subrepositories'))
152 ]
152 ]
153
153
154 debugrevlogopts = [
154 debugrevlogopts = [
155 ('c', 'changelog', False, _('open changelog')),
155 ('c', 'changelog', False, _('open changelog')),
156 ('m', 'manifest', False, _('open manifest')),
156 ('m', 'manifest', False, _('open manifest')),
157 ('', 'dir', '', _('open directory manifest')),
157 ('', 'dir', '', _('open directory manifest')),
158 ]
158 ]
159
159
160 # special string such that everything below this line will be ingored in the
160 # special string such that everything below this line will be ingored in the
161 # editor text
161 # editor text
162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
163
163
164 def ishunk(x):
164 def ishunk(x):
165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
166 return isinstance(x, hunkclasses)
166 return isinstance(x, hunkclasses)
167
167
168 def newandmodified(chunks, originalchunks):
168 def newandmodified(chunks, originalchunks):
169 newlyaddedandmodifiedfiles = set()
169 newlyaddedandmodifiedfiles = set()
170 for chunk in chunks:
170 for chunk in chunks:
171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
172 originalchunks:
172 originalchunks:
173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
174 return newlyaddedandmodifiedfiles
174 return newlyaddedandmodifiedfiles
175
175
176 def parsealiases(cmd):
176 def parsealiases(cmd):
177 return cmd.lstrip("^").split("|")
177 return cmd.lstrip("^").split("|")
178
178
179 def setupwrapcolorwrite(ui):
179 def setupwrapcolorwrite(ui):
180 # wrap ui.write so diff output can be labeled/colorized
180 # wrap ui.write so diff output can be labeled/colorized
181 def wrapwrite(orig, *args, **kw):
181 def wrapwrite(orig, *args, **kw):
182 label = kw.pop('label', '')
182 label = kw.pop('label', '')
183 for chunk, l in patch.difflabel(lambda: args):
183 for chunk, l in patch.difflabel(lambda: args):
184 orig(chunk, label=label + l)
184 orig(chunk, label=label + l)
185
185
186 oldwrite = ui.write
186 oldwrite = ui.write
187 def wrap(*args, **kwargs):
187 def wrap(*args, **kwargs):
188 return wrapwrite(oldwrite, *args, **kwargs)
188 return wrapwrite(oldwrite, *args, **kwargs)
189 setattr(ui, 'write', wrap)
189 setattr(ui, 'write', wrap)
190 return oldwrite
190 return oldwrite
191
191
192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
193 if usecurses:
193 if usecurses:
194 if testfile:
194 if testfile:
195 recordfn = crecordmod.testdecorator(testfile,
195 recordfn = crecordmod.testdecorator(testfile,
196 crecordmod.testchunkselector)
196 crecordmod.testchunkselector)
197 else:
197 else:
198 recordfn = crecordmod.chunkselector
198 recordfn = crecordmod.chunkselector
199
199
200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
201
201
202 else:
202 else:
203 return patch.filterpatch(ui, originalhunks, operation)
203 return patch.filterpatch(ui, originalhunks, operation)
204
204
205 def recordfilter(ui, originalhunks, operation=None):
205 def recordfilter(ui, originalhunks, operation=None):
206 """ Prompts the user to filter the originalhunks and return a list of
206 """ Prompts the user to filter the originalhunks and return a list of
207 selected hunks.
207 selected hunks.
208 *operation* is used for to build ui messages to indicate the user what
208 *operation* is used for to build ui messages to indicate the user what
209 kind of filtering they are doing: reverting, committing, shelving, etc.
209 kind of filtering they are doing: reverting, committing, shelving, etc.
210 (see patch.filterpatch).
210 (see patch.filterpatch).
211 """
211 """
212 usecurses = crecordmod.checkcurses(ui)
212 usecurses = crecordmod.checkcurses(ui)
213 testfile = ui.config('experimental', 'crecordtest')
213 testfile = ui.config('experimental', 'crecordtest')
214 oldwrite = setupwrapcolorwrite(ui)
214 oldwrite = setupwrapcolorwrite(ui)
215 try:
215 try:
216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
217 testfile, operation)
217 testfile, operation)
218 finally:
218 finally:
219 ui.write = oldwrite
219 ui.write = oldwrite
220 return newchunks, newopts
220 return newchunks, newopts
221
221
222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
223 filterfn, *pats, **opts):
223 filterfn, *pats, **opts):
224 from . import merge as mergemod
224 from . import merge as mergemod
225 opts = pycompat.byteskwargs(opts)
225 opts = pycompat.byteskwargs(opts)
226 if not ui.interactive():
226 if not ui.interactive():
227 if cmdsuggest:
227 if cmdsuggest:
228 msg = _('running non-interactively, use %s instead') % cmdsuggest
228 msg = _('running non-interactively, use %s instead') % cmdsuggest
229 else:
229 else:
230 msg = _('running non-interactively')
230 msg = _('running non-interactively')
231 raise error.Abort(msg)
231 raise error.Abort(msg)
232
232
233 # make sure username is set before going interactive
233 # make sure username is set before going interactive
234 if not opts.get('user'):
234 if not opts.get('user'):
235 ui.username() # raise exception, username not provided
235 ui.username() # raise exception, username not provided
236
236
237 def recordfunc(ui, repo, message, match, opts):
237 def recordfunc(ui, repo, message, match, opts):
238 """This is generic record driver.
238 """This is generic record driver.
239
239
240 Its job is to interactively filter local changes, and
240 Its job is to interactively filter local changes, and
241 accordingly prepare working directory into a state in which the
241 accordingly prepare working directory into a state in which the
242 job can be delegated to a non-interactive commit command such as
242 job can be delegated to a non-interactive commit command such as
243 'commit' or 'qrefresh'.
243 'commit' or 'qrefresh'.
244
244
245 After the actual job is done by non-interactive command, the
245 After the actual job is done by non-interactive command, the
246 working directory is restored to its original state.
246 working directory is restored to its original state.
247
247
248 In the end we'll record interesting changes, and everything else
248 In the end we'll record interesting changes, and everything else
249 will be left in place, so the user can continue working.
249 will be left in place, so the user can continue working.
250 """
250 """
251
251
252 checkunfinished(repo, commit=True)
252 checkunfinished(repo, commit=True)
253 wctx = repo[None]
253 wctx = repo[None]
254 merge = len(wctx.parents()) > 1
254 merge = len(wctx.parents()) > 1
255 if merge:
255 if merge:
256 raise error.Abort(_('cannot partially commit a merge '
256 raise error.Abort(_('cannot partially commit a merge '
257 '(use "hg commit" instead)'))
257 '(use "hg commit" instead)'))
258
258
259 def fail(f, msg):
259 def fail(f, msg):
260 raise error.Abort('%s: %s' % (f, msg))
260 raise error.Abort('%s: %s' % (f, msg))
261
261
262 force = opts.get('force')
262 force = opts.get('force')
263 if not force:
263 if not force:
264 vdirs = []
264 vdirs = []
265 match.explicitdir = vdirs.append
265 match.explicitdir = vdirs.append
266 match.bad = fail
266 match.bad = fail
267
267
268 status = repo.status(match=match)
268 status = repo.status(match=match)
269 if not force:
269 if not force:
270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
272 diffopts.nodates = True
272 diffopts.nodates = True
273 diffopts.git = True
273 diffopts.git = True
274 diffopts.showfunc = True
274 diffopts.showfunc = True
275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
276 originalchunks = patch.parsepatch(originaldiff)
276 originalchunks = patch.parsepatch(originaldiff)
277
277
278 # 1. filter patch, since we are intending to apply subset of it
278 # 1. filter patch, since we are intending to apply subset of it
279 try:
279 try:
280 chunks, newopts = filterfn(ui, originalchunks)
280 chunks, newopts = filterfn(ui, originalchunks)
281 except error.PatchError as err:
281 except error.PatchError as err:
282 raise error.Abort(_('error parsing patch: %s') % err)
282 raise error.Abort(_('error parsing patch: %s') % err)
283 opts.update(newopts)
283 opts.update(newopts)
284
284
285 # We need to keep a backup of files that have been newly added and
285 # We need to keep a backup of files that have been newly added and
286 # modified during the recording process because there is a previous
286 # modified during the recording process because there is a previous
287 # version without the edit in the workdir
287 # version without the edit in the workdir
288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
289 contenders = set()
289 contenders = set()
290 for h in chunks:
290 for h in chunks:
291 try:
291 try:
292 contenders.update(set(h.files()))
292 contenders.update(set(h.files()))
293 except AttributeError:
293 except AttributeError:
294 pass
294 pass
295
295
296 changed = status.modified + status.added + status.removed
296 changed = status.modified + status.added + status.removed
297 newfiles = [f for f in changed if f in contenders]
297 newfiles = [f for f in changed if f in contenders]
298 if not newfiles:
298 if not newfiles:
299 ui.status(_('no changes to record\n'))
299 ui.status(_('no changes to record\n'))
300 return 0
300 return 0
301
301
302 modified = set(status.modified)
302 modified = set(status.modified)
303
303
304 # 2. backup changed files, so we can restore them in the end
304 # 2. backup changed files, so we can restore them in the end
305
305
306 if backupall:
306 if backupall:
307 tobackup = changed
307 tobackup = changed
308 else:
308 else:
309 tobackup = [f for f in newfiles if f in modified or f in \
309 tobackup = [f for f in newfiles if f in modified or f in \
310 newlyaddedandmodifiedfiles]
310 newlyaddedandmodifiedfiles]
311 backups = {}
311 backups = {}
312 if tobackup:
312 if tobackup:
313 backupdir = repo.vfs.join('record-backups')
313 backupdir = repo.vfs.join('record-backups')
314 try:
314 try:
315 os.mkdir(backupdir)
315 os.mkdir(backupdir)
316 except OSError as err:
316 except OSError as err:
317 if err.errno != errno.EEXIST:
317 if err.errno != errno.EEXIST:
318 raise
318 raise
319 try:
319 try:
320 # backup continues
320 # backup continues
321 for f in tobackup:
321 for f in tobackup:
322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
323 dir=backupdir)
323 dir=backupdir)
324 os.close(fd)
324 os.close(fd)
325 ui.debug('backup %r as %r\n' % (f, tmpname))
325 ui.debug('backup %r as %r\n' % (f, tmpname))
326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
327 backups[f] = tmpname
327 backups[f] = tmpname
328
328
329 fp = stringio()
329 fp = stringio()
330 for c in chunks:
330 for c in chunks:
331 fname = c.filename()
331 fname = c.filename()
332 if fname in backups:
332 if fname in backups:
333 c.write(fp)
333 c.write(fp)
334 dopatch = fp.tell()
334 dopatch = fp.tell()
335 fp.seek(0)
335 fp.seek(0)
336
336
337 # 2.5 optionally review / modify patch in text editor
337 # 2.5 optionally review / modify patch in text editor
338 if opts.get('review', False):
338 if opts.get('review', False):
339 patchtext = (crecordmod.diffhelptext
339 patchtext = (crecordmod.diffhelptext
340 + crecordmod.patchhelptext
340 + crecordmod.patchhelptext
341 + fp.read())
341 + fp.read())
342 reviewedpatch = ui.edit(patchtext, "",
342 reviewedpatch = ui.edit(patchtext, "",
343 action="diff",
343 action="diff",
344 repopath=repo.path)
344 repopath=repo.path)
345 fp.truncate(0)
345 fp.truncate(0)
346 fp.write(reviewedpatch)
346 fp.write(reviewedpatch)
347 fp.seek(0)
347 fp.seek(0)
348
348
349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
350 # 3a. apply filtered patch to clean repo (clean)
350 # 3a. apply filtered patch to clean repo (clean)
351 if backups:
351 if backups:
352 # Equivalent to hg.revert
352 # Equivalent to hg.revert
353 m = scmutil.matchfiles(repo, backups.keys())
353 m = scmutil.matchfiles(repo, backups.keys())
354 mergemod.update(repo, repo.dirstate.p1(),
354 mergemod.update(repo, repo.dirstate.p1(),
355 False, True, matcher=m)
355 False, True, matcher=m)
356
356
357 # 3b. (apply)
357 # 3b. (apply)
358 if dopatch:
358 if dopatch:
359 try:
359 try:
360 ui.debug('applying patch\n')
360 ui.debug('applying patch\n')
361 ui.debug(fp.getvalue())
361 ui.debug(fp.getvalue())
362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
363 except error.PatchError as err:
363 except error.PatchError as err:
364 raise error.Abort(str(err))
364 raise error.Abort(str(err))
365 del fp
365 del fp
366
366
367 # 4. We prepared working directory according to filtered
367 # 4. We prepared working directory according to filtered
368 # patch. Now is the time to delegate the job to
368 # patch. Now is the time to delegate the job to
369 # commit/qrefresh or the like!
369 # commit/qrefresh or the like!
370
370
371 # Make all of the pathnames absolute.
371 # Make all of the pathnames absolute.
372 newfiles = [repo.wjoin(nf) for nf in newfiles]
372 newfiles = [repo.wjoin(nf) for nf in newfiles]
373 return commitfunc(ui, repo, *newfiles, **opts)
373 return commitfunc(ui, repo, *newfiles, **opts)
374 finally:
374 finally:
375 # 5. finally restore backed-up files
375 # 5. finally restore backed-up files
376 try:
376 try:
377 dirstate = repo.dirstate
377 dirstate = repo.dirstate
378 for realname, tmpname in backups.iteritems():
378 for realname, tmpname in backups.iteritems():
379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
380
380
381 if dirstate[realname] == 'n':
381 if dirstate[realname] == 'n':
382 # without normallookup, restoring timestamp
382 # without normallookup, restoring timestamp
383 # may cause partially committed files
383 # may cause partially committed files
384 # to be treated as unmodified
384 # to be treated as unmodified
385 dirstate.normallookup(realname)
385 dirstate.normallookup(realname)
386
386
387 # copystat=True here and above are a hack to trick any
387 # copystat=True here and above are a hack to trick any
388 # editors that have f open that we haven't modified them.
388 # editors that have f open that we haven't modified them.
389 #
389 #
390 # Also note that this racy as an editor could notice the
390 # Also note that this racy as an editor could notice the
391 # file's mtime before we've finished writing it.
391 # file's mtime before we've finished writing it.
392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
393 os.unlink(tmpname)
393 os.unlink(tmpname)
394 if tobackup:
394 if tobackup:
395 os.rmdir(backupdir)
395 os.rmdir(backupdir)
396 except OSError:
396 except OSError:
397 pass
397 pass
398
398
399 def recordinwlock(ui, repo, message, match, opts):
399 def recordinwlock(ui, repo, message, match, opts):
400 with repo.wlock():
400 with repo.wlock():
401 return recordfunc(ui, repo, message, match, opts)
401 return recordfunc(ui, repo, message, match, opts)
402
402
403 return commit(ui, repo, recordinwlock, pats, opts)
403 return commit(ui, repo, recordinwlock, pats, opts)
404
404
405 def tersestatus(root, statlist, status, ignorefn, ignore):
405 def tersestatus(root, statlist, status, ignorefn, ignore):
406 """
406 """
407 Returns a list of statuses with directory collapsed if all the files in the
407 Returns a list of statuses with directory collapsed if all the files in the
408 directory has the same status.
408 directory has the same status.
409 """
409 """
410
410
411 def numfiles(dirname):
411 def numfiles(dirname):
412 """
412 """
413 Calculates the number of tracked files in a given directory which also
413 Calculates the number of tracked files in a given directory which also
414 includes files which were removed or deleted. Considers ignored files
414 includes files which were removed or deleted. Considers ignored files
415 if ignore argument is True or 'i' is present in status argument.
415 if ignore argument is True or 'i' is present in status argument.
416 """
416 """
417 if lencache.get(dirname):
417 if lencache.get(dirname):
418 return lencache[dirname]
418 return lencache[dirname]
419 if 'i' in status or ignore:
419 if 'i' in status or ignore:
420 def match(localpath):
420 def match(localpath):
421 absolutepath = os.path.join(root, localpath)
421 absolutepath = os.path.join(root, localpath)
422 if os.path.isdir(absolutepath) and isemptydir(absolutepath):
422 if os.path.isdir(absolutepath) and isemptydir(absolutepath):
423 return True
423 return True
424 return False
424 return False
425 else:
425 else:
426 def match(localpath):
426 def match(localpath):
427 # there can be directory whose all the files are ignored and
427 # there can be directory whose all the files are ignored and
428 # hence the drectory should also be ignored while counting
428 # hence the drectory should also be ignored while counting
429 # number of files or subdirs in it's parent directory. This
429 # number of files or subdirs in it's parent directory. This
430 # checks the same.
430 # checks the same.
431 # XXX: We need a better logic here.
431 # XXX: We need a better logic here.
432 if os.path.isdir(os.path.join(root, localpath)):
432 if os.path.isdir(os.path.join(root, localpath)):
433 return isignoreddir(localpath)
433 return isignoreddir(localpath)
434 else:
434 else:
435 # XXX: there can be files which have the ignored pattern but
435 # XXX: there can be files which have the ignored pattern but
436 # are not ignored. That leads to bug in counting number of
436 # are not ignored. That leads to bug in counting number of
437 # tracked files in the directory.
437 # tracked files in the directory.
438 return ignorefn(localpath)
438 return ignorefn(localpath)
439 lendir = 0
439 lendir = 0
440 abspath = os.path.join(root, dirname)
440 abspath = os.path.join(root, dirname)
441 # There might be cases when a directory does not exists as the whole
441 # There might be cases when a directory does not exists as the whole
442 # directory can be removed and/or deleted.
442 # directory can be removed and/or deleted.
443 try:
443 try:
444 for f in os.listdir(abspath):
444 for f in os.listdir(abspath):
445 localpath = os.path.join(dirname, f)
445 localpath = os.path.join(dirname, f)
446 if not match(localpath):
446 if not match(localpath):
447 lendir += 1
447 lendir += 1
448 except OSError:
448 except OSError:
449 pass
449 pass
450 lendir += len(absentdir.get(dirname, []))
450 lendir += len(absentdir.get(dirname, []))
451 lencache[dirname] = lendir
451 lencache[dirname] = lendir
452 return lendir
452 return lendir
453
453
454 def isemptydir(abspath):
454 def isemptydir(abspath):
455 """
455 """
456 Check whether a directory is empty or not, i.e. there is no files in the
456 Check whether a directory is empty or not, i.e. there is no files in the
457 directory and all its subdirectories.
457 directory and all its subdirectories.
458 """
458 """
459 for f in os.listdir(abspath):
459 for f in os.listdir(abspath):
460 fullpath = os.path.join(abspath, f)
460 fullpath = os.path.join(abspath, f)
461 if os.path.isdir(fullpath):
461 if os.path.isdir(fullpath):
462 # recursion here
462 # recursion here
463 ret = isemptydir(fullpath)
463 ret = isemptydir(fullpath)
464 if not ret:
464 if not ret:
465 return False
465 return False
466 else:
466 else:
467 return False
467 return False
468 return True
468 return True
469
469
470 def isignoreddir(localpath):
470 def isignoreddir(localpath):
471 """Return True if `localpath` directory is ignored or contains only
471 """Return True if `localpath` directory is ignored or contains only
472 ignored files and should hence be considered ignored.
472 ignored files and should hence be considered ignored.
473 """
473 """
474 dirpath = os.path.join(root, localpath)
474 dirpath = os.path.join(root, localpath)
475 if ignorefn(dirpath):
475 if ignorefn(dirpath):
476 return True
476 return True
477 for f in os.listdir(dirpath):
477 for f in os.listdir(dirpath):
478 filepath = os.path.join(dirpath, f)
478 filepath = os.path.join(dirpath, f)
479 if os.path.isdir(filepath):
479 if os.path.isdir(filepath):
480 # recursion here
480 # recursion here
481 ret = isignoreddir(os.path.join(localpath, f))
481 ret = isignoreddir(os.path.join(localpath, f))
482 if not ret:
482 if not ret:
483 return False
483 return False
484 else:
484 else:
485 if not ignorefn(os.path.join(localpath, f)):
485 if not ignorefn(os.path.join(localpath, f)):
486 return False
486 return False
487 return True
487 return True
488
488
489 def absentones(removedfiles, missingfiles):
489 def absentones(removedfiles, missingfiles):
490 """
490 """
491 Returns a dictionary of directories with files in it which are either
491 Returns a dictionary of directories with files in it which are either
492 removed or missing (deleted) in them.
492 removed or missing (deleted) in them.
493 """
493 """
494 absentdir = {}
494 absentdir = {}
495 absentfiles = removedfiles + missingfiles
495 absentfiles = removedfiles + missingfiles
496 while absentfiles:
496 while absentfiles:
497 f = absentfiles.pop()
497 f = absentfiles.pop()
498 par = os.path.dirname(f)
498 par = os.path.dirname(f)
499 if par == '':
499 if par == '':
500 continue
500 continue
501 # we need to store files rather than number of files as some files
501 # we need to store files rather than number of files as some files
502 # or subdirectories in a directory can be counted twice. This is
502 # or subdirectories in a directory can be counted twice. This is
503 # also we have used sets here.
503 # also we have used sets here.
504 try:
504 try:
505 absentdir[par].add(f)
505 absentdir[par].add(f)
506 except KeyError:
506 except KeyError:
507 absentdir[par] = set([f])
507 absentdir[par] = set([f])
508 absentfiles.append(par)
508 absentfiles.append(par)
509 return absentdir
509 return absentdir
510
510
511 indexes = {'m': 0, 'a': 1, 'r': 2, 'd': 3, 'u': 4, 'i': 5, 'c': 6}
511 indexes = {'m': 0, 'a': 1, 'r': 2, 'd': 3, 'u': 4, 'i': 5, 'c': 6}
512 # get a dictonary of directories and files which are missing as os.listdir()
512 # get a dictonary of directories and files which are missing as os.listdir()
513 # won't be able to list them.
513 # won't be able to list them.
514 absentdir = absentones(statlist[2], statlist[3])
514 absentdir = absentones(statlist[2], statlist[3])
515 finalrs = [[]] * len(indexes)
515 finalrs = [[]] * len(indexes)
516 didsomethingchanged = False
516 didsomethingchanged = False
517 # dictionary to store number of files and subdir in a directory so that we
517 # dictionary to store number of files and subdir in a directory so that we
518 # don't compute that again.
518 # don't compute that again.
519 lencache = {}
519 lencache = {}
520
520
521 for st in pycompat.bytestr(status):
521 for st in pycompat.bytestr(status):
522
522
523 try:
523 try:
524 ind = indexes[st]
524 ind = indexes[st]
525 except KeyError:
525 except KeyError:
526 # TODO: Need a better error message here
526 # TODO: Need a better error message here
527 raise error.Abort("'%s' not recognized" % st)
527 raise error.Abort("'%s' not recognized" % st)
528
528
529 sfiles = statlist[ind]
529 sfiles = statlist[ind]
530 if not sfiles:
530 if not sfiles:
531 continue
531 continue
532 pardict = {}
532 pardict = {}
533 for a in sfiles:
533 for a in sfiles:
534 par = os.path.dirname(a)
534 par = os.path.dirname(a)
535 pardict.setdefault(par, []).append(a)
535 pardict.setdefault(par, []).append(a)
536
536
537 rs = []
537 rs = []
538 newls = []
538 newls = []
539 for par, files in sorted(pardict.iteritems()):
539 for par, files in sorted(pardict.iteritems()):
540 lenpar = numfiles(par)
540 lenpar = numfiles(par)
541 if lenpar == len(files):
541 if lenpar == len(files):
542 newls.append(par)
542 newls.append(par)
543
543
544 if not newls:
544 if not newls:
545 continue
545 continue
546
546
547 while newls:
547 while newls:
548 newel = newls.pop()
548 newel = newls.pop()
549 if newel == '':
549 if newel == '':
550 continue
550 continue
551 parn = os.path.dirname(newel)
551 parn = os.path.dirname(newel)
552 pardict[newel] = []
552 pardict[newel] = []
553 # Adding pycompat.ossep as newel is a directory.
553 # Adding pycompat.ossep as newel is a directory.
554 pardict.setdefault(parn, []).append(newel + pycompat.ossep)
554 pardict.setdefault(parn, []).append(newel + pycompat.ossep)
555 lenpar = numfiles(parn)
555 lenpar = numfiles(parn)
556 if lenpar == len(pardict[parn]):
556 if lenpar == len(pardict[parn]):
557 newls.append(parn)
557 newls.append(parn)
558
558
559 # dict.values() for Py3 compatibility
559 # dict.values() for Py3 compatibility
560 for files in pardict.values():
560 for files in pardict.values():
561 rs.extend(files)
561 rs.extend(files)
562
562
563 rs.sort()
563 rs.sort()
564 finalrs[ind] = rs
564 finalrs[ind] = rs
565 didsomethingchanged = True
565 didsomethingchanged = True
566
566
567 # If nothing is changed, make sure the order of files is preserved.
567 # If nothing is changed, make sure the order of files is preserved.
568 if not didsomethingchanged:
568 if not didsomethingchanged:
569 return statlist
569 return statlist
570
570
571 for x in xrange(len(indexes)):
571 for x in xrange(len(indexes)):
572 if not finalrs[x]:
572 if not finalrs[x]:
573 finalrs[x] = statlist[x]
573 finalrs[x] = statlist[x]
574
574
575 return finalrs
575 return finalrs
576
576
577 def _commentlines(raw):
577 def _commentlines(raw):
578 '''Surround lineswith a comment char and a new line'''
578 '''Surround lineswith a comment char and a new line'''
579 lines = raw.splitlines()
579 lines = raw.splitlines()
580 commentedlines = ['# %s' % line for line in lines]
580 commentedlines = ['# %s' % line for line in lines]
581 return '\n'.join(commentedlines) + '\n'
581 return '\n'.join(commentedlines) + '\n'
582
582
583 def _conflictsmsg(repo):
583 def _conflictsmsg(repo):
584 # avoid merge cycle
584 # avoid merge cycle
585 from . import merge as mergemod
585 from . import merge as mergemod
586 mergestate = mergemod.mergestate.read(repo)
586 mergestate = mergemod.mergestate.read(repo)
587 if not mergestate.active():
587 if not mergestate.active():
588 return
588 return
589
589
590 m = scmutil.match(repo[None])
590 m = scmutil.match(repo[None])
591 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
591 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
592 if unresolvedlist:
592 if unresolvedlist:
593 mergeliststr = '\n'.join(
593 mergeliststr = '\n'.join(
594 [' %s' % os.path.relpath(
594 [' %s' % os.path.relpath(
595 os.path.join(repo.root, path),
595 os.path.join(repo.root, path),
596 pycompat.getcwd()) for path in unresolvedlist])
596 pycompat.getcwd()) for path in unresolvedlist])
597 msg = _('''Unresolved merge conflicts:
597 msg = _('''Unresolved merge conflicts:
598
598
599 %s
599 %s
600
600
601 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
601 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
602 else:
602 else:
603 msg = _('No unresolved merge conflicts.')
603 msg = _('No unresolved merge conflicts.')
604
604
605 return _commentlines(msg)
605 return _commentlines(msg)
606
606
607 def _helpmessage(continuecmd, abortcmd):
607 def _helpmessage(continuecmd, abortcmd):
608 msg = _('To continue: %s\n'
608 msg = _('To continue: %s\n'
609 'To abort: %s') % (continuecmd, abortcmd)
609 'To abort: %s') % (continuecmd, abortcmd)
610 return _commentlines(msg)
610 return _commentlines(msg)
611
611
612 def _rebasemsg():
612 def _rebasemsg():
613 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
613 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
614
614
615 def _histeditmsg():
615 def _histeditmsg():
616 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
616 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
617
617
618 def _unshelvemsg():
618 def _unshelvemsg():
619 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
619 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
620
620
621 def _updatecleanmsg(dest=None):
621 def _updatecleanmsg(dest=None):
622 warning = _('warning: this will discard uncommitted changes')
622 warning = _('warning: this will discard uncommitted changes')
623 return 'hg update --clean %s (%s)' % (dest or '.', warning)
623 return 'hg update --clean %s (%s)' % (dest or '.', warning)
624
624
625 def _graftmsg():
625 def _graftmsg():
626 # tweakdefaults requires `update` to have a rev hence the `.`
626 # tweakdefaults requires `update` to have a rev hence the `.`
627 return _helpmessage('hg graft --continue', _updatecleanmsg())
627 return _helpmessage('hg graft --continue', _updatecleanmsg())
628
628
629 def _mergemsg():
629 def _mergemsg():
630 # tweakdefaults requires `update` to have a rev hence the `.`
630 # tweakdefaults requires `update` to have a rev hence the `.`
631 return _helpmessage('hg commit', _updatecleanmsg())
631 return _helpmessage('hg commit', _updatecleanmsg())
632
632
633 def _bisectmsg():
633 def _bisectmsg():
634 msg = _('To mark the changeset good: hg bisect --good\n'
634 msg = _('To mark the changeset good: hg bisect --good\n'
635 'To mark the changeset bad: hg bisect --bad\n'
635 'To mark the changeset bad: hg bisect --bad\n'
636 'To abort: hg bisect --reset\n')
636 'To abort: hg bisect --reset\n')
637 return _commentlines(msg)
637 return _commentlines(msg)
638
638
639 def fileexistspredicate(filename):
639 def fileexistspredicate(filename):
640 return lambda repo: repo.vfs.exists(filename)
640 return lambda repo: repo.vfs.exists(filename)
641
641
642 def _mergepredicate(repo):
642 def _mergepredicate(repo):
643 return len(repo[None].parents()) > 1
643 return len(repo[None].parents()) > 1
644
644
645 STATES = (
645 STATES = (
646 # (state, predicate to detect states, helpful message function)
646 # (state, predicate to detect states, helpful message function)
647 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
647 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
648 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
648 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
649 ('graft', fileexistspredicate('graftstate'), _graftmsg),
649 ('graft', fileexistspredicate('graftstate'), _graftmsg),
650 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
650 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
651 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
651 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
652 # The merge state is part of a list that will be iterated over.
652 # The merge state is part of a list that will be iterated over.
653 # They need to be last because some of the other unfinished states may also
653 # They need to be last because some of the other unfinished states may also
654 # be in a merge or update state (eg. rebase, histedit, graft, etc).
654 # be in a merge or update state (eg. rebase, histedit, graft, etc).
655 # We want those to have priority.
655 # We want those to have priority.
656 ('merge', _mergepredicate, _mergemsg),
656 ('merge', _mergepredicate, _mergemsg),
657 )
657 )
658
658
659 def _getrepostate(repo):
659 def _getrepostate(repo):
660 # experimental config: commands.status.skipstates
660 # experimental config: commands.status.skipstates
661 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
661 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
662 for state, statedetectionpredicate, msgfn in STATES:
662 for state, statedetectionpredicate, msgfn in STATES:
663 if state in skip:
663 if state in skip:
664 continue
664 continue
665 if statedetectionpredicate(repo):
665 if statedetectionpredicate(repo):
666 return (state, statedetectionpredicate, msgfn)
666 return (state, statedetectionpredicate, msgfn)
667
667
668 def morestatus(repo, fm):
668 def morestatus(repo, fm):
669 statetuple = _getrepostate(repo)
669 statetuple = _getrepostate(repo)
670 label = 'status.morestatus'
670 label = 'status.morestatus'
671 if statetuple:
671 if statetuple:
672 fm.startitem()
672 fm.startitem()
673 state, statedetectionpredicate, helpfulmsg = statetuple
673 state, statedetectionpredicate, helpfulmsg = statetuple
674 statemsg = _('The repository is in an unfinished *%s* state.') % state
674 statemsg = _('The repository is in an unfinished *%s* state.') % state
675 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
675 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
676 conmsg = _conflictsmsg(repo)
676 conmsg = _conflictsmsg(repo)
677 if conmsg:
677 if conmsg:
678 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
678 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
679 if helpfulmsg:
679 if helpfulmsg:
680 helpmsg = helpfulmsg()
680 helpmsg = helpfulmsg()
681 fm.write('helpmsg', '%s\n', helpmsg, label=label)
681 fm.write('helpmsg', '%s\n', helpmsg, label=label)
682
682
683 def findpossible(cmd, table, strict=False):
683 def findpossible(cmd, table, strict=False):
684 """
684 """
685 Return cmd -> (aliases, command table entry)
685 Return cmd -> (aliases, command table entry)
686 for each matching command.
686 for each matching command.
687 Return debug commands (or their aliases) only if no normal command matches.
687 Return debug commands (or their aliases) only if no normal command matches.
688 """
688 """
689 choice = {}
689 choice = {}
690 debugchoice = {}
690 debugchoice = {}
691
691
692 if cmd in table:
692 if cmd in table:
693 # short-circuit exact matches, "log" alias beats "^log|history"
693 # short-circuit exact matches, "log" alias beats "^log|history"
694 keys = [cmd]
694 keys = [cmd]
695 else:
695 else:
696 keys = table.keys()
696 keys = table.keys()
697
697
698 allcmds = []
698 allcmds = []
699 for e in keys:
699 for e in keys:
700 aliases = parsealiases(e)
700 aliases = parsealiases(e)
701 allcmds.extend(aliases)
701 allcmds.extend(aliases)
702 found = None
702 found = None
703 if cmd in aliases:
703 if cmd in aliases:
704 found = cmd
704 found = cmd
705 elif not strict:
705 elif not strict:
706 for a in aliases:
706 for a in aliases:
707 if a.startswith(cmd):
707 if a.startswith(cmd):
708 found = a
708 found = a
709 break
709 break
710 if found is not None:
710 if found is not None:
711 if aliases[0].startswith("debug") or found.startswith("debug"):
711 if aliases[0].startswith("debug") or found.startswith("debug"):
712 debugchoice[found] = (aliases, table[e])
712 debugchoice[found] = (aliases, table[e])
713 else:
713 else:
714 choice[found] = (aliases, table[e])
714 choice[found] = (aliases, table[e])
715
715
716 if not choice and debugchoice:
716 if not choice and debugchoice:
717 choice = debugchoice
717 choice = debugchoice
718
718
719 return choice, allcmds
719 return choice, allcmds
720
720
721 def findcmd(cmd, table, strict=True):
721 def findcmd(cmd, table, strict=True):
722 """Return (aliases, command table entry) for command string."""
722 """Return (aliases, command table entry) for command string."""
723 choice, allcmds = findpossible(cmd, table, strict)
723 choice, allcmds = findpossible(cmd, table, strict)
724
724
725 if cmd in choice:
725 if cmd in choice:
726 return choice[cmd]
726 return choice[cmd]
727
727
728 if len(choice) > 1:
728 if len(choice) > 1:
729 clist = sorted(choice)
729 clist = sorted(choice)
730 raise error.AmbiguousCommand(cmd, clist)
730 raise error.AmbiguousCommand(cmd, clist)
731
731
732 if choice:
732 if choice:
733 return list(choice.values())[0]
733 return list(choice.values())[0]
734
734
735 raise error.UnknownCommand(cmd, allcmds)
735 raise error.UnknownCommand(cmd, allcmds)
736
736
737 def findrepo(p):
737 def findrepo(p):
738 while not os.path.isdir(os.path.join(p, ".hg")):
738 while not os.path.isdir(os.path.join(p, ".hg")):
739 oldp, p = p, os.path.dirname(p)
739 oldp, p = p, os.path.dirname(p)
740 if p == oldp:
740 if p == oldp:
741 return None
741 return None
742
742
743 return p
743 return p
744
744
745 def bailifchanged(repo, merge=True, hint=None):
745 def bailifchanged(repo, merge=True, hint=None):
746 """ enforce the precondition that working directory must be clean.
746 """ enforce the precondition that working directory must be clean.
747
747
748 'merge' can be set to false if a pending uncommitted merge should be
748 'merge' can be set to false if a pending uncommitted merge should be
749 ignored (such as when 'update --check' runs).
749 ignored (such as when 'update --check' runs).
750
750
751 'hint' is the usual hint given to Abort exception.
751 'hint' is the usual hint given to Abort exception.
752 """
752 """
753
753
754 if merge and repo.dirstate.p2() != nullid:
754 if merge and repo.dirstate.p2() != nullid:
755 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
755 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
756 modified, added, removed, deleted = repo.status()[:4]
756 modified, added, removed, deleted = repo.status()[:4]
757 if modified or added or removed or deleted:
757 if modified or added or removed or deleted:
758 raise error.Abort(_('uncommitted changes'), hint=hint)
758 raise error.Abort(_('uncommitted changes'), hint=hint)
759 ctx = repo[None]
759 ctx = repo[None]
760 for s in sorted(ctx.substate):
760 for s in sorted(ctx.substate):
761 ctx.sub(s).bailifchanged(hint=hint)
761 ctx.sub(s).bailifchanged(hint=hint)
762
762
763 def logmessage(ui, opts):
763 def logmessage(ui, opts):
764 """ get the log message according to -m and -l option """
764 """ get the log message according to -m and -l option """
765 message = opts.get('message')
765 message = opts.get('message')
766 logfile = opts.get('logfile')
766 logfile = opts.get('logfile')
767
767
768 if message and logfile:
768 if message and logfile:
769 raise error.Abort(_('options --message and --logfile are mutually '
769 raise error.Abort(_('options --message and --logfile are mutually '
770 'exclusive'))
770 'exclusive'))
771 if not message and logfile:
771 if not message and logfile:
772 try:
772 try:
773 if isstdiofilename(logfile):
773 if isstdiofilename(logfile):
774 message = ui.fin.read()
774 message = ui.fin.read()
775 else:
775 else:
776 message = '\n'.join(util.readfile(logfile).splitlines())
776 message = '\n'.join(util.readfile(logfile).splitlines())
777 except IOError as inst:
777 except IOError as inst:
778 raise error.Abort(_("can't read commit message '%s': %s") %
778 raise error.Abort(_("can't read commit message '%s': %s") %
779 (logfile, encoding.strtolocal(inst.strerror)))
779 (logfile, encoding.strtolocal(inst.strerror)))
780 return message
780 return message
781
781
782 def mergeeditform(ctxorbool, baseformname):
782 def mergeeditform(ctxorbool, baseformname):
783 """return appropriate editform name (referencing a committemplate)
783 """return appropriate editform name (referencing a committemplate)
784
784
785 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
785 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
786 merging is committed.
786 merging is committed.
787
787
788 This returns baseformname with '.merge' appended if it is a merge,
788 This returns baseformname with '.merge' appended if it is a merge,
789 otherwise '.normal' is appended.
789 otherwise '.normal' is appended.
790 """
790 """
791 if isinstance(ctxorbool, bool):
791 if isinstance(ctxorbool, bool):
792 if ctxorbool:
792 if ctxorbool:
793 return baseformname + ".merge"
793 return baseformname + ".merge"
794 elif 1 < len(ctxorbool.parents()):
794 elif 1 < len(ctxorbool.parents()):
795 return baseformname + ".merge"
795 return baseformname + ".merge"
796
796
797 return baseformname + ".normal"
797 return baseformname + ".normal"
798
798
799 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
799 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
800 editform='', **opts):
800 editform='', **opts):
801 """get appropriate commit message editor according to '--edit' option
801 """get appropriate commit message editor according to '--edit' option
802
802
803 'finishdesc' is a function to be called with edited commit message
803 'finishdesc' is a function to be called with edited commit message
804 (= 'description' of the new changeset) just after editing, but
804 (= 'description' of the new changeset) just after editing, but
805 before checking empty-ness. It should return actual text to be
805 before checking empty-ness. It should return actual text to be
806 stored into history. This allows to change description before
806 stored into history. This allows to change description before
807 storing.
807 storing.
808
808
809 'extramsg' is a extra message to be shown in the editor instead of
809 'extramsg' is a extra message to be shown in the editor instead of
810 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
810 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
811 is automatically added.
811 is automatically added.
812
812
813 'editform' is a dot-separated list of names, to distinguish
813 'editform' is a dot-separated list of names, to distinguish
814 the purpose of commit text editing.
814 the purpose of commit text editing.
815
815
816 'getcommiteditor' returns 'commitforceeditor' regardless of
816 'getcommiteditor' returns 'commitforceeditor' regardless of
817 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
817 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
818 they are specific for usage in MQ.
818 they are specific for usage in MQ.
819 """
819 """
820 if edit or finishdesc or extramsg:
820 if edit or finishdesc or extramsg:
821 return lambda r, c, s: commitforceeditor(r, c, s,
821 return lambda r, c, s: commitforceeditor(r, c, s,
822 finishdesc=finishdesc,
822 finishdesc=finishdesc,
823 extramsg=extramsg,
823 extramsg=extramsg,
824 editform=editform)
824 editform=editform)
825 elif editform:
825 elif editform:
826 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
826 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
827 else:
827 else:
828 return commiteditor
828 return commiteditor
829
829
830 def loglimit(opts):
830 def loglimit(opts):
831 """get the log limit according to option -l/--limit"""
831 """get the log limit according to option -l/--limit"""
832 limit = opts.get('limit')
832 limit = opts.get('limit')
833 if limit:
833 if limit:
834 try:
834 try:
835 limit = int(limit)
835 limit = int(limit)
836 except ValueError:
836 except ValueError:
837 raise error.Abort(_('limit must be a positive integer'))
837 raise error.Abort(_('limit must be a positive integer'))
838 if limit <= 0:
838 if limit <= 0:
839 raise error.Abort(_('limit must be positive'))
839 raise error.Abort(_('limit must be positive'))
840 else:
840 else:
841 limit = None
841 limit = None
842 return limit
842 return limit
843
843
844 def makefilename(repo, pat, node, desc=None,
844 def makefilename(repo, pat, node, desc=None,
845 total=None, seqno=None, revwidth=None, pathname=None):
845 total=None, seqno=None, revwidth=None, pathname=None):
846 node_expander = {
846 node_expander = {
847 'H': lambda: hex(node),
847 'H': lambda: hex(node),
848 'R': lambda: str(repo.changelog.rev(node)),
848 'R': lambda: str(repo.changelog.rev(node)),
849 'h': lambda: short(node),
849 'h': lambda: short(node),
850 'm': lambda: re.sub('[^\w]', '_', str(desc))
850 'm': lambda: re.sub('[^\w]', '_', str(desc))
851 }
851 }
852 expander = {
852 expander = {
853 '%': lambda: '%',
853 '%': lambda: '%',
854 'b': lambda: os.path.basename(repo.root),
854 'b': lambda: os.path.basename(repo.root),
855 }
855 }
856
856
857 try:
857 try:
858 if node:
858 if node:
859 expander.update(node_expander)
859 expander.update(node_expander)
860 if node:
860 if node:
861 expander['r'] = (lambda:
861 expander['r'] = (lambda:
862 str(repo.changelog.rev(node)).zfill(revwidth or 0))
862 str(repo.changelog.rev(node)).zfill(revwidth or 0))
863 if total is not None:
863 if total is not None:
864 expander['N'] = lambda: str(total)
864 expander['N'] = lambda: str(total)
865 if seqno is not None:
865 if seqno is not None:
866 expander['n'] = lambda: str(seqno)
866 expander['n'] = lambda: str(seqno)
867 if total is not None and seqno is not None:
867 if total is not None and seqno is not None:
868 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
868 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
869 if pathname is not None:
869 if pathname is not None:
870 expander['s'] = lambda: os.path.basename(pathname)
870 expander['s'] = lambda: os.path.basename(pathname)
871 expander['d'] = lambda: os.path.dirname(pathname) or '.'
871 expander['d'] = lambda: os.path.dirname(pathname) or '.'
872 expander['p'] = lambda: pathname
872 expander['p'] = lambda: pathname
873
873
874 newname = []
874 newname = []
875 patlen = len(pat)
875 patlen = len(pat)
876 i = 0
876 i = 0
877 while i < patlen:
877 while i < patlen:
878 c = pat[i:i + 1]
878 c = pat[i:i + 1]
879 if c == '%':
879 if c == '%':
880 i += 1
880 i += 1
881 c = pat[i:i + 1]
881 c = pat[i:i + 1]
882 c = expander[c]()
882 c = expander[c]()
883 newname.append(c)
883 newname.append(c)
884 i += 1
884 i += 1
885 return ''.join(newname)
885 return ''.join(newname)
886 except KeyError as inst:
886 except KeyError as inst:
887 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
887 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
888 inst.args[0])
888 inst.args[0])
889
889
890 def isstdiofilename(pat):
890 def isstdiofilename(pat):
891 """True if the given pat looks like a filename denoting stdin/stdout"""
891 """True if the given pat looks like a filename denoting stdin/stdout"""
892 return not pat or pat == '-'
892 return not pat or pat == '-'
893
893
894 class _unclosablefile(object):
894 class _unclosablefile(object):
895 def __init__(self, fp):
895 def __init__(self, fp):
896 self._fp = fp
896 self._fp = fp
897
897
898 def close(self):
898 def close(self):
899 pass
899 pass
900
900
901 def __iter__(self):
901 def __iter__(self):
902 return iter(self._fp)
902 return iter(self._fp)
903
903
904 def __getattr__(self, attr):
904 def __getattr__(self, attr):
905 return getattr(self._fp, attr)
905 return getattr(self._fp, attr)
906
906
907 def __enter__(self):
907 def __enter__(self):
908 return self
908 return self
909
909
910 def __exit__(self, exc_type, exc_value, exc_tb):
910 def __exit__(self, exc_type, exc_value, exc_tb):
911 pass
911 pass
912
912
913 def makefileobj(repo, pat, node=None, desc=None, total=None,
913 def makefileobj(repo, pat, node=None, desc=None, total=None,
914 seqno=None, revwidth=None, mode='wb', modemap=None,
914 seqno=None, revwidth=None, mode='wb', modemap=None,
915 pathname=None):
915 pathname=None):
916
916
917 writable = mode not in ('r', 'rb')
917 writable = mode not in ('r', 'rb')
918
918
919 if isstdiofilename(pat):
919 if isstdiofilename(pat):
920 if writable:
920 if writable:
921 fp = repo.ui.fout
921 fp = repo.ui.fout
922 else:
922 else:
923 fp = repo.ui.fin
923 fp = repo.ui.fin
924 return _unclosablefile(fp)
924 return _unclosablefile(fp)
925 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
925 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
926 if modemap is not None:
926 if modemap is not None:
927 mode = modemap.get(fn, mode)
927 mode = modemap.get(fn, mode)
928 if mode == 'wb':
928 if mode == 'wb':
929 modemap[fn] = 'ab'
929 modemap[fn] = 'ab'
930 return open(fn, mode)
930 return open(fn, mode)
931
931
932 def openrevlog(repo, cmd, file_, opts):
932 def openrevlog(repo, cmd, file_, opts):
933 """opens the changelog, manifest, a filelog or a given revlog"""
933 """opens the changelog, manifest, a filelog or a given revlog"""
934 cl = opts['changelog']
934 cl = opts['changelog']
935 mf = opts['manifest']
935 mf = opts['manifest']
936 dir = opts['dir']
936 dir = opts['dir']
937 msg = None
937 msg = None
938 if cl and mf:
938 if cl and mf:
939 msg = _('cannot specify --changelog and --manifest at the same time')
939 msg = _('cannot specify --changelog and --manifest at the same time')
940 elif cl and dir:
940 elif cl and dir:
941 msg = _('cannot specify --changelog and --dir at the same time')
941 msg = _('cannot specify --changelog and --dir at the same time')
942 elif cl or mf or dir:
942 elif cl or mf or dir:
943 if file_:
943 if file_:
944 msg = _('cannot specify filename with --changelog or --manifest')
944 msg = _('cannot specify filename with --changelog or --manifest')
945 elif not repo:
945 elif not repo:
946 msg = _('cannot specify --changelog or --manifest or --dir '
946 msg = _('cannot specify --changelog or --manifest or --dir '
947 'without a repository')
947 'without a repository')
948 if msg:
948 if msg:
949 raise error.Abort(msg)
949 raise error.Abort(msg)
950
950
951 r = None
951 r = None
952 if repo:
952 if repo:
953 if cl:
953 if cl:
954 r = repo.unfiltered().changelog
954 r = repo.unfiltered().changelog
955 elif dir:
955 elif dir:
956 if 'treemanifest' not in repo.requirements:
956 if 'treemanifest' not in repo.requirements:
957 raise error.Abort(_("--dir can only be used on repos with "
957 raise error.Abort(_("--dir can only be used on repos with "
958 "treemanifest enabled"))
958 "treemanifest enabled"))
959 dirlog = repo.manifestlog._revlog.dirlog(dir)
959 dirlog = repo.manifestlog._revlog.dirlog(dir)
960 if len(dirlog):
960 if len(dirlog):
961 r = dirlog
961 r = dirlog
962 elif mf:
962 elif mf:
963 r = repo.manifestlog._revlog
963 r = repo.manifestlog._revlog
964 elif file_:
964 elif file_:
965 filelog = repo.file(file_)
965 filelog = repo.file(file_)
966 if len(filelog):
966 if len(filelog):
967 r = filelog
967 r = filelog
968 if not r:
968 if not r:
969 if not file_:
969 if not file_:
970 raise error.CommandError(cmd, _('invalid arguments'))
970 raise error.CommandError(cmd, _('invalid arguments'))
971 if not os.path.isfile(file_):
971 if not os.path.isfile(file_):
972 raise error.Abort(_("revlog '%s' not found") % file_)
972 raise error.Abort(_("revlog '%s' not found") % file_)
973 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
973 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
974 file_[:-2] + ".i")
974 file_[:-2] + ".i")
975 return r
975 return r
976
976
977 def copy(ui, repo, pats, opts, rename=False):
977 def copy(ui, repo, pats, opts, rename=False):
978 # called with the repo lock held
978 # called with the repo lock held
979 #
979 #
980 # hgsep => pathname that uses "/" to separate directories
980 # hgsep => pathname that uses "/" to separate directories
981 # ossep => pathname that uses os.sep to separate directories
981 # ossep => pathname that uses os.sep to separate directories
982 cwd = repo.getcwd()
982 cwd = repo.getcwd()
983 targets = {}
983 targets = {}
984 after = opts.get("after")
984 after = opts.get("after")
985 dryrun = opts.get("dry_run")
985 dryrun = opts.get("dry_run")
986 wctx = repo[None]
986 wctx = repo[None]
987
987
988 def walkpat(pat):
988 def walkpat(pat):
989 srcs = []
989 srcs = []
990 if after:
990 if after:
991 badstates = '?'
991 badstates = '?'
992 else:
992 else:
993 badstates = '?r'
993 badstates = '?r'
994 m = scmutil.match(wctx, [pat], opts, globbed=True)
994 m = scmutil.match(wctx, [pat], opts, globbed=True)
995 for abs in wctx.walk(m):
995 for abs in wctx.walk(m):
996 state = repo.dirstate[abs]
996 state = repo.dirstate[abs]
997 rel = m.rel(abs)
997 rel = m.rel(abs)
998 exact = m.exact(abs)
998 exact = m.exact(abs)
999 if state in badstates:
999 if state in badstates:
1000 if exact and state == '?':
1000 if exact and state == '?':
1001 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1001 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1002 if exact and state == 'r':
1002 if exact and state == 'r':
1003 ui.warn(_('%s: not copying - file has been marked for'
1003 ui.warn(_('%s: not copying - file has been marked for'
1004 ' remove\n') % rel)
1004 ' remove\n') % rel)
1005 continue
1005 continue
1006 # abs: hgsep
1006 # abs: hgsep
1007 # rel: ossep
1007 # rel: ossep
1008 srcs.append((abs, rel, exact))
1008 srcs.append((abs, rel, exact))
1009 return srcs
1009 return srcs
1010
1010
1011 # abssrc: hgsep
1011 # abssrc: hgsep
1012 # relsrc: ossep
1012 # relsrc: ossep
1013 # otarget: ossep
1013 # otarget: ossep
1014 def copyfile(abssrc, relsrc, otarget, exact):
1014 def copyfile(abssrc, relsrc, otarget, exact):
1015 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1015 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1016 if '/' in abstarget:
1016 if '/' in abstarget:
1017 # We cannot normalize abstarget itself, this would prevent
1017 # We cannot normalize abstarget itself, this would prevent
1018 # case only renames, like a => A.
1018 # case only renames, like a => A.
1019 abspath, absname = abstarget.rsplit('/', 1)
1019 abspath, absname = abstarget.rsplit('/', 1)
1020 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1020 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1021 reltarget = repo.pathto(abstarget, cwd)
1021 reltarget = repo.pathto(abstarget, cwd)
1022 target = repo.wjoin(abstarget)
1022 target = repo.wjoin(abstarget)
1023 src = repo.wjoin(abssrc)
1023 src = repo.wjoin(abssrc)
1024 state = repo.dirstate[abstarget]
1024 state = repo.dirstate[abstarget]
1025
1025
1026 scmutil.checkportable(ui, abstarget)
1026 scmutil.checkportable(ui, abstarget)
1027
1027
1028 # check for collisions
1028 # check for collisions
1029 prevsrc = targets.get(abstarget)
1029 prevsrc = targets.get(abstarget)
1030 if prevsrc is not None:
1030 if prevsrc is not None:
1031 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1031 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1032 (reltarget, repo.pathto(abssrc, cwd),
1032 (reltarget, repo.pathto(abssrc, cwd),
1033 repo.pathto(prevsrc, cwd)))
1033 repo.pathto(prevsrc, cwd)))
1034 return
1034 return
1035
1035
1036 # check for overwrites
1036 # check for overwrites
1037 exists = os.path.lexists(target)
1037 exists = os.path.lexists(target)
1038 samefile = False
1038 samefile = False
1039 if exists and abssrc != abstarget:
1039 if exists and abssrc != abstarget:
1040 if (repo.dirstate.normalize(abssrc) ==
1040 if (repo.dirstate.normalize(abssrc) ==
1041 repo.dirstate.normalize(abstarget)):
1041 repo.dirstate.normalize(abstarget)):
1042 if not rename:
1042 if not rename:
1043 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1043 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1044 return
1044 return
1045 exists = False
1045 exists = False
1046 samefile = True
1046 samefile = True
1047
1047
1048 if not after and exists or after and state in 'mn':
1048 if not after and exists or after and state in 'mn':
1049 if not opts['force']:
1049 if not opts['force']:
1050 if state in 'mn':
1050 if state in 'mn':
1051 msg = _('%s: not overwriting - file already committed\n')
1051 msg = _('%s: not overwriting - file already committed\n')
1052 if after:
1052 if after:
1053 flags = '--after --force'
1053 flags = '--after --force'
1054 else:
1054 else:
1055 flags = '--force'
1055 flags = '--force'
1056 if rename:
1056 if rename:
1057 hint = _('(hg rename %s to replace the file by '
1057 hint = _('(hg rename %s to replace the file by '
1058 'recording a rename)\n') % flags
1058 'recording a rename)\n') % flags
1059 else:
1059 else:
1060 hint = _('(hg copy %s to replace the file by '
1060 hint = _('(hg copy %s to replace the file by '
1061 'recording a copy)\n') % flags
1061 'recording a copy)\n') % flags
1062 else:
1062 else:
1063 msg = _('%s: not overwriting - file exists\n')
1063 msg = _('%s: not overwriting - file exists\n')
1064 if rename:
1064 if rename:
1065 hint = _('(hg rename --after to record the rename)\n')
1065 hint = _('(hg rename --after to record the rename)\n')
1066 else:
1066 else:
1067 hint = _('(hg copy --after to record the copy)\n')
1067 hint = _('(hg copy --after to record the copy)\n')
1068 ui.warn(msg % reltarget)
1068 ui.warn(msg % reltarget)
1069 ui.warn(hint)
1069 ui.warn(hint)
1070 return
1070 return
1071
1071
1072 if after:
1072 if after:
1073 if not exists:
1073 if not exists:
1074 if rename:
1074 if rename:
1075 ui.warn(_('%s: not recording move - %s does not exist\n') %
1075 ui.warn(_('%s: not recording move - %s does not exist\n') %
1076 (relsrc, reltarget))
1076 (relsrc, reltarget))
1077 else:
1077 else:
1078 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1078 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1079 (relsrc, reltarget))
1079 (relsrc, reltarget))
1080 return
1080 return
1081 elif not dryrun:
1081 elif not dryrun:
1082 try:
1082 try:
1083 if exists:
1083 if exists:
1084 os.unlink(target)
1084 os.unlink(target)
1085 targetdir = os.path.dirname(target) or '.'
1085 targetdir = os.path.dirname(target) or '.'
1086 if not os.path.isdir(targetdir):
1086 if not os.path.isdir(targetdir):
1087 os.makedirs(targetdir)
1087 os.makedirs(targetdir)
1088 if samefile:
1088 if samefile:
1089 tmp = target + "~hgrename"
1089 tmp = target + "~hgrename"
1090 os.rename(src, tmp)
1090 os.rename(src, tmp)
1091 os.rename(tmp, target)
1091 os.rename(tmp, target)
1092 else:
1092 else:
1093 util.copyfile(src, target)
1093 util.copyfile(src, target)
1094 srcexists = True
1094 srcexists = True
1095 except IOError as inst:
1095 except IOError as inst:
1096 if inst.errno == errno.ENOENT:
1096 if inst.errno == errno.ENOENT:
1097 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1097 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1098 srcexists = False
1098 srcexists = False
1099 else:
1099 else:
1100 ui.warn(_('%s: cannot copy - %s\n') %
1100 ui.warn(_('%s: cannot copy - %s\n') %
1101 (relsrc, encoding.strtolocal(inst.strerror)))
1101 (relsrc, encoding.strtolocal(inst.strerror)))
1102 return True # report a failure
1102 return True # report a failure
1103
1103
1104 if ui.verbose or not exact:
1104 if ui.verbose or not exact:
1105 if rename:
1105 if rename:
1106 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1106 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1107 else:
1107 else:
1108 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1108 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1109
1109
1110 targets[abstarget] = abssrc
1110 targets[abstarget] = abssrc
1111
1111
1112 # fix up dirstate
1112 # fix up dirstate
1113 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1113 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1114 dryrun=dryrun, cwd=cwd)
1114 dryrun=dryrun, cwd=cwd)
1115 if rename and not dryrun:
1115 if rename and not dryrun:
1116 if not after and srcexists and not samefile:
1116 if not after and srcexists and not samefile:
1117 repo.wvfs.unlinkpath(abssrc)
1117 repo.wvfs.unlinkpath(abssrc)
1118 wctx.forget([abssrc])
1118 wctx.forget([abssrc])
1119
1119
1120 # pat: ossep
1120 # pat: ossep
1121 # dest ossep
1121 # dest ossep
1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1123 # return: function that takes hgsep and returns ossep
1123 # return: function that takes hgsep and returns ossep
1124 def targetpathfn(pat, dest, srcs):
1124 def targetpathfn(pat, dest, srcs):
1125 if os.path.isdir(pat):
1125 if os.path.isdir(pat):
1126 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1126 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1127 abspfx = util.localpath(abspfx)
1127 abspfx = util.localpath(abspfx)
1128 if destdirexists:
1128 if destdirexists:
1129 striplen = len(os.path.split(abspfx)[0])
1129 striplen = len(os.path.split(abspfx)[0])
1130 else:
1130 else:
1131 striplen = len(abspfx)
1131 striplen = len(abspfx)
1132 if striplen:
1132 if striplen:
1133 striplen += len(pycompat.ossep)
1133 striplen += len(pycompat.ossep)
1134 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1134 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1135 elif destdirexists:
1135 elif destdirexists:
1136 res = lambda p: os.path.join(dest,
1136 res = lambda p: os.path.join(dest,
1137 os.path.basename(util.localpath(p)))
1137 os.path.basename(util.localpath(p)))
1138 else:
1138 else:
1139 res = lambda p: dest
1139 res = lambda p: dest
1140 return res
1140 return res
1141
1141
1142 # pat: ossep
1142 # pat: ossep
1143 # dest ossep
1143 # dest ossep
1144 # srcs: list of (hgsep, hgsep, ossep, bool)
1144 # srcs: list of (hgsep, hgsep, ossep, bool)
1145 # return: function that takes hgsep and returns ossep
1145 # return: function that takes hgsep and returns ossep
1146 def targetpathafterfn(pat, dest, srcs):
1146 def targetpathafterfn(pat, dest, srcs):
1147 if matchmod.patkind(pat):
1147 if matchmod.patkind(pat):
1148 # a mercurial pattern
1148 # a mercurial pattern
1149 res = lambda p: os.path.join(dest,
1149 res = lambda p: os.path.join(dest,
1150 os.path.basename(util.localpath(p)))
1150 os.path.basename(util.localpath(p)))
1151 else:
1151 else:
1152 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1152 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1153 if len(abspfx) < len(srcs[0][0]):
1153 if len(abspfx) < len(srcs[0][0]):
1154 # A directory. Either the target path contains the last
1154 # A directory. Either the target path contains the last
1155 # component of the source path or it does not.
1155 # component of the source path or it does not.
1156 def evalpath(striplen):
1156 def evalpath(striplen):
1157 score = 0
1157 score = 0
1158 for s in srcs:
1158 for s in srcs:
1159 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1159 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1160 if os.path.lexists(t):
1160 if os.path.lexists(t):
1161 score += 1
1161 score += 1
1162 return score
1162 return score
1163
1163
1164 abspfx = util.localpath(abspfx)
1164 abspfx = util.localpath(abspfx)
1165 striplen = len(abspfx)
1165 striplen = len(abspfx)
1166 if striplen:
1166 if striplen:
1167 striplen += len(pycompat.ossep)
1167 striplen += len(pycompat.ossep)
1168 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1168 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1169 score = evalpath(striplen)
1169 score = evalpath(striplen)
1170 striplen1 = len(os.path.split(abspfx)[0])
1170 striplen1 = len(os.path.split(abspfx)[0])
1171 if striplen1:
1171 if striplen1:
1172 striplen1 += len(pycompat.ossep)
1172 striplen1 += len(pycompat.ossep)
1173 if evalpath(striplen1) > score:
1173 if evalpath(striplen1) > score:
1174 striplen = striplen1
1174 striplen = striplen1
1175 res = lambda p: os.path.join(dest,
1175 res = lambda p: os.path.join(dest,
1176 util.localpath(p)[striplen:])
1176 util.localpath(p)[striplen:])
1177 else:
1177 else:
1178 # a file
1178 # a file
1179 if destdirexists:
1179 if destdirexists:
1180 res = lambda p: os.path.join(dest,
1180 res = lambda p: os.path.join(dest,
1181 os.path.basename(util.localpath(p)))
1181 os.path.basename(util.localpath(p)))
1182 else:
1182 else:
1183 res = lambda p: dest
1183 res = lambda p: dest
1184 return res
1184 return res
1185
1185
1186 pats = scmutil.expandpats(pats)
1186 pats = scmutil.expandpats(pats)
1187 if not pats:
1187 if not pats:
1188 raise error.Abort(_('no source or destination specified'))
1188 raise error.Abort(_('no source or destination specified'))
1189 if len(pats) == 1:
1189 if len(pats) == 1:
1190 raise error.Abort(_('no destination specified'))
1190 raise error.Abort(_('no destination specified'))
1191 dest = pats.pop()
1191 dest = pats.pop()
1192 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1192 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1193 if not destdirexists:
1193 if not destdirexists:
1194 if len(pats) > 1 or matchmod.patkind(pats[0]):
1194 if len(pats) > 1 or matchmod.patkind(pats[0]):
1195 raise error.Abort(_('with multiple sources, destination must be an '
1195 raise error.Abort(_('with multiple sources, destination must be an '
1196 'existing directory'))
1196 'existing directory'))
1197 if util.endswithsep(dest):
1197 if util.endswithsep(dest):
1198 raise error.Abort(_('destination %s is not a directory') % dest)
1198 raise error.Abort(_('destination %s is not a directory') % dest)
1199
1199
1200 tfn = targetpathfn
1200 tfn = targetpathfn
1201 if after:
1201 if after:
1202 tfn = targetpathafterfn
1202 tfn = targetpathafterfn
1203 copylist = []
1203 copylist = []
1204 for pat in pats:
1204 for pat in pats:
1205 srcs = walkpat(pat)
1205 srcs = walkpat(pat)
1206 if not srcs:
1206 if not srcs:
1207 continue
1207 continue
1208 copylist.append((tfn(pat, dest, srcs), srcs))
1208 copylist.append((tfn(pat, dest, srcs), srcs))
1209 if not copylist:
1209 if not copylist:
1210 raise error.Abort(_('no files to copy'))
1210 raise error.Abort(_('no files to copy'))
1211
1211
1212 errors = 0
1212 errors = 0
1213 for targetpath, srcs in copylist:
1213 for targetpath, srcs in copylist:
1214 for abssrc, relsrc, exact in srcs:
1214 for abssrc, relsrc, exact in srcs:
1215 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1215 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1216 errors += 1
1216 errors += 1
1217
1217
1218 if errors:
1218 if errors:
1219 ui.warn(_('(consider using --after)\n'))
1219 ui.warn(_('(consider using --after)\n'))
1220
1220
1221 return errors != 0
1221 return errors != 0
1222
1222
1223 ## facility to let extension process additional data into an import patch
1223 ## facility to let extension process additional data into an import patch
1224 # list of identifier to be executed in order
1224 # list of identifier to be executed in order
1225 extrapreimport = [] # run before commit
1225 extrapreimport = [] # run before commit
1226 extrapostimport = [] # run after commit
1226 extrapostimport = [] # run after commit
1227 # mapping from identifier to actual import function
1227 # mapping from identifier to actual import function
1228 #
1228 #
1229 # 'preimport' are run before the commit is made and are provided the following
1229 # 'preimport' are run before the commit is made and are provided the following
1230 # arguments:
1230 # arguments:
1231 # - repo: the localrepository instance,
1231 # - repo: the localrepository instance,
1232 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1232 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1233 # - extra: the future extra dictionary of the changeset, please mutate it,
1233 # - extra: the future extra dictionary of the changeset, please mutate it,
1234 # - opts: the import options.
1234 # - opts: the import options.
1235 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1235 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1236 # mutation of in memory commit and more. Feel free to rework the code to get
1236 # mutation of in memory commit and more. Feel free to rework the code to get
1237 # there.
1237 # there.
1238 extrapreimportmap = {}
1238 extrapreimportmap = {}
1239 # 'postimport' are run after the commit is made and are provided the following
1239 # 'postimport' are run after the commit is made and are provided the following
1240 # argument:
1240 # argument:
1241 # - ctx: the changectx created by import.
1241 # - ctx: the changectx created by import.
1242 extrapostimportmap = {}
1242 extrapostimportmap = {}
1243
1243
1244 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1244 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1245 """Utility function used by commands.import to import a single patch
1245 """Utility function used by commands.import to import a single patch
1246
1246
1247 This function is explicitly defined here to help the evolve extension to
1247 This function is explicitly defined here to help the evolve extension to
1248 wrap this part of the import logic.
1248 wrap this part of the import logic.
1249
1249
1250 The API is currently a bit ugly because it a simple code translation from
1250 The API is currently a bit ugly because it a simple code translation from
1251 the import command. Feel free to make it better.
1251 the import command. Feel free to make it better.
1252
1252
1253 :hunk: a patch (as a binary string)
1253 :hunk: a patch (as a binary string)
1254 :parents: nodes that will be parent of the created commit
1254 :parents: nodes that will be parent of the created commit
1255 :opts: the full dict of option passed to the import command
1255 :opts: the full dict of option passed to the import command
1256 :msgs: list to save commit message to.
1256 :msgs: list to save commit message to.
1257 (used in case we need to save it when failing)
1257 (used in case we need to save it when failing)
1258 :updatefunc: a function that update a repo to a given node
1258 :updatefunc: a function that update a repo to a given node
1259 updatefunc(<repo>, <node>)
1259 updatefunc(<repo>, <node>)
1260 """
1260 """
1261 # avoid cycle context -> subrepo -> cmdutil
1261 # avoid cycle context -> subrepo -> cmdutil
1262 from . import context
1262 from . import context
1263 extractdata = patch.extract(ui, hunk)
1263 extractdata = patch.extract(ui, hunk)
1264 tmpname = extractdata.get('filename')
1264 tmpname = extractdata.get('filename')
1265 message = extractdata.get('message')
1265 message = extractdata.get('message')
1266 user = opts.get('user') or extractdata.get('user')
1266 user = opts.get('user') or extractdata.get('user')
1267 date = opts.get('date') or extractdata.get('date')
1267 date = opts.get('date') or extractdata.get('date')
1268 branch = extractdata.get('branch')
1268 branch = extractdata.get('branch')
1269 nodeid = extractdata.get('nodeid')
1269 nodeid = extractdata.get('nodeid')
1270 p1 = extractdata.get('p1')
1270 p1 = extractdata.get('p1')
1271 p2 = extractdata.get('p2')
1271 p2 = extractdata.get('p2')
1272
1272
1273 nocommit = opts.get('no_commit')
1273 nocommit = opts.get('no_commit')
1274 importbranch = opts.get('import_branch')
1274 importbranch = opts.get('import_branch')
1275 update = not opts.get('bypass')
1275 update = not opts.get('bypass')
1276 strip = opts["strip"]
1276 strip = opts["strip"]
1277 prefix = opts["prefix"]
1277 prefix = opts["prefix"]
1278 sim = float(opts.get('similarity') or 0)
1278 sim = float(opts.get('similarity') or 0)
1279 if not tmpname:
1279 if not tmpname:
1280 return (None, None, False)
1280 return (None, None, False)
1281
1281
1282 rejects = False
1282 rejects = False
1283
1283
1284 try:
1284 try:
1285 cmdline_message = logmessage(ui, opts)
1285 cmdline_message = logmessage(ui, opts)
1286 if cmdline_message:
1286 if cmdline_message:
1287 # pickup the cmdline msg
1287 # pickup the cmdline msg
1288 message = cmdline_message
1288 message = cmdline_message
1289 elif message:
1289 elif message:
1290 # pickup the patch msg
1290 # pickup the patch msg
1291 message = message.strip()
1291 message = message.strip()
1292 else:
1292 else:
1293 # launch the editor
1293 # launch the editor
1294 message = None
1294 message = None
1295 ui.debug('message:\n%s\n' % message)
1295 ui.debug('message:\n%s\n' % message)
1296
1296
1297 if len(parents) == 1:
1297 if len(parents) == 1:
1298 parents.append(repo[nullid])
1298 parents.append(repo[nullid])
1299 if opts.get('exact'):
1299 if opts.get('exact'):
1300 if not nodeid or not p1:
1300 if not nodeid or not p1:
1301 raise error.Abort(_('not a Mercurial patch'))
1301 raise error.Abort(_('not a Mercurial patch'))
1302 p1 = repo[p1]
1302 p1 = repo[p1]
1303 p2 = repo[p2 or nullid]
1303 p2 = repo[p2 or nullid]
1304 elif p2:
1304 elif p2:
1305 try:
1305 try:
1306 p1 = repo[p1]
1306 p1 = repo[p1]
1307 p2 = repo[p2]
1307 p2 = repo[p2]
1308 # Without any options, consider p2 only if the
1308 # Without any options, consider p2 only if the
1309 # patch is being applied on top of the recorded
1309 # patch is being applied on top of the recorded
1310 # first parent.
1310 # first parent.
1311 if p1 != parents[0]:
1311 if p1 != parents[0]:
1312 p1 = parents[0]
1312 p1 = parents[0]
1313 p2 = repo[nullid]
1313 p2 = repo[nullid]
1314 except error.RepoError:
1314 except error.RepoError:
1315 p1, p2 = parents
1315 p1, p2 = parents
1316 if p2.node() == nullid:
1316 if p2.node() == nullid:
1317 ui.warn(_("warning: import the patch as a normal revision\n"
1317 ui.warn(_("warning: import the patch as a normal revision\n"
1318 "(use --exact to import the patch as a merge)\n"))
1318 "(use --exact to import the patch as a merge)\n"))
1319 else:
1319 else:
1320 p1, p2 = parents
1320 p1, p2 = parents
1321
1321
1322 n = None
1322 n = None
1323 if update:
1323 if update:
1324 if p1 != parents[0]:
1324 if p1 != parents[0]:
1325 updatefunc(repo, p1.node())
1325 updatefunc(repo, p1.node())
1326 if p2 != parents[1]:
1326 if p2 != parents[1]:
1327 repo.setparents(p1.node(), p2.node())
1327 repo.setparents(p1.node(), p2.node())
1328
1328
1329 if opts.get('exact') or importbranch:
1329 if opts.get('exact') or importbranch:
1330 repo.dirstate.setbranch(branch or 'default')
1330 repo.dirstate.setbranch(branch or 'default')
1331
1331
1332 partial = opts.get('partial', False)
1332 partial = opts.get('partial', False)
1333 files = set()
1333 files = set()
1334 try:
1334 try:
1335 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1335 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1336 files=files, eolmode=None, similarity=sim / 100.0)
1336 files=files, eolmode=None, similarity=sim / 100.0)
1337 except error.PatchError as e:
1337 except error.PatchError as e:
1338 if not partial:
1338 if not partial:
1339 raise error.Abort(str(e))
1339 raise error.Abort(str(e))
1340 if partial:
1340 if partial:
1341 rejects = True
1341 rejects = True
1342
1342
1343 files = list(files)
1343 files = list(files)
1344 if nocommit:
1344 if nocommit:
1345 if message:
1345 if message:
1346 msgs.append(message)
1346 msgs.append(message)
1347 else:
1347 else:
1348 if opts.get('exact') or p2:
1348 if opts.get('exact') or p2:
1349 # If you got here, you either use --force and know what
1349 # If you got here, you either use --force and know what
1350 # you are doing or used --exact or a merge patch while
1350 # you are doing or used --exact or a merge patch while
1351 # being updated to its first parent.
1351 # being updated to its first parent.
1352 m = None
1352 m = None
1353 else:
1353 else:
1354 m = scmutil.matchfiles(repo, files or [])
1354 m = scmutil.matchfiles(repo, files or [])
1355 editform = mergeeditform(repo[None], 'import.normal')
1355 editform = mergeeditform(repo[None], 'import.normal')
1356 if opts.get('exact'):
1356 if opts.get('exact'):
1357 editor = None
1357 editor = None
1358 else:
1358 else:
1359 editor = getcommiteditor(editform=editform, **opts)
1359 editor = getcommiteditor(editform=editform, **opts)
1360 extra = {}
1360 extra = {}
1361 for idfunc in extrapreimport:
1361 for idfunc in extrapreimport:
1362 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1362 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1363 overrides = {}
1363 overrides = {}
1364 if partial:
1364 if partial:
1365 overrides[('ui', 'allowemptycommit')] = True
1365 overrides[('ui', 'allowemptycommit')] = True
1366 with repo.ui.configoverride(overrides, 'import'):
1366 with repo.ui.configoverride(overrides, 'import'):
1367 n = repo.commit(message, user,
1367 n = repo.commit(message, user,
1368 date, match=m,
1368 date, match=m,
1369 editor=editor, extra=extra)
1369 editor=editor, extra=extra)
1370 for idfunc in extrapostimport:
1370 for idfunc in extrapostimport:
1371 extrapostimportmap[idfunc](repo[n])
1371 extrapostimportmap[idfunc](repo[n])
1372 else:
1372 else:
1373 if opts.get('exact') or importbranch:
1373 if opts.get('exact') or importbranch:
1374 branch = branch or 'default'
1374 branch = branch or 'default'
1375 else:
1375 else:
1376 branch = p1.branch()
1376 branch = p1.branch()
1377 store = patch.filestore()
1377 store = patch.filestore()
1378 try:
1378 try:
1379 files = set()
1379 files = set()
1380 try:
1380 try:
1381 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1381 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1382 files, eolmode=None)
1382 files, eolmode=None)
1383 except error.PatchError as e:
1383 except error.PatchError as e:
1384 raise error.Abort(str(e))
1384 raise error.Abort(str(e))
1385 if opts.get('exact'):
1385 if opts.get('exact'):
1386 editor = None
1386 editor = None
1387 else:
1387 else:
1388 editor = getcommiteditor(editform='import.bypass')
1388 editor = getcommiteditor(editform='import.bypass')
1389 memctx = context.memctx(repo, (p1.node(), p2.node()),
1389 memctx = context.memctx(repo, (p1.node(), p2.node()),
1390 message,
1390 message,
1391 files=files,
1391 files=files,
1392 filectxfn=store,
1392 filectxfn=store,
1393 user=user,
1393 user=user,
1394 date=date,
1394 date=date,
1395 branch=branch,
1395 branch=branch,
1396 editor=editor)
1396 editor=editor)
1397 n = memctx.commit()
1397 n = memctx.commit()
1398 finally:
1398 finally:
1399 store.close()
1399 store.close()
1400 if opts.get('exact') and nocommit:
1400 if opts.get('exact') and nocommit:
1401 # --exact with --no-commit is still useful in that it does merge
1401 # --exact with --no-commit is still useful in that it does merge
1402 # and branch bits
1402 # and branch bits
1403 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1403 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1404 elif opts.get('exact') and hex(n) != nodeid:
1404 elif opts.get('exact') and hex(n) != nodeid:
1405 raise error.Abort(_('patch is damaged or loses information'))
1405 raise error.Abort(_('patch is damaged or loses information'))
1406 msg = _('applied to working directory')
1406 msg = _('applied to working directory')
1407 if n:
1407 if n:
1408 # i18n: refers to a short changeset id
1408 # i18n: refers to a short changeset id
1409 msg = _('created %s') % short(n)
1409 msg = _('created %s') % short(n)
1410 return (msg, n, rejects)
1410 return (msg, n, rejects)
1411 finally:
1411 finally:
1412 os.unlink(tmpname)
1412 os.unlink(tmpname)
1413
1413
1414 # facility to let extensions include additional data in an exported patch
1414 # facility to let extensions include additional data in an exported patch
1415 # list of identifiers to be executed in order
1415 # list of identifiers to be executed in order
1416 extraexport = []
1416 extraexport = []
1417 # mapping from identifier to actual export function
1417 # mapping from identifier to actual export function
1418 # function as to return a string to be added to the header or None
1418 # function as to return a string to be added to the header or None
1419 # it is given two arguments (sequencenumber, changectx)
1419 # it is given two arguments (sequencenumber, changectx)
1420 extraexportmap = {}
1420 extraexportmap = {}
1421
1421
1422 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1422 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1423 node = scmutil.binnode(ctx)
1423 node = scmutil.binnode(ctx)
1424 parents = [p.node() for p in ctx.parents() if p]
1424 parents = [p.node() for p in ctx.parents() if p]
1425 branch = ctx.branch()
1425 branch = ctx.branch()
1426 if switch_parent:
1426 if switch_parent:
1427 parents.reverse()
1427 parents.reverse()
1428
1428
1429 if parents:
1429 if parents:
1430 prev = parents[0]
1430 prev = parents[0]
1431 else:
1431 else:
1432 prev = nullid
1432 prev = nullid
1433
1433
1434 write("# HG changeset patch\n")
1434 write("# HG changeset patch\n")
1435 write("# User %s\n" % ctx.user())
1435 write("# User %s\n" % ctx.user())
1436 write("# Date %d %d\n" % ctx.date())
1436 write("# Date %d %d\n" % ctx.date())
1437 write("# %s\n" % util.datestr(ctx.date()))
1437 write("# %s\n" % util.datestr(ctx.date()))
1438 if branch and branch != 'default':
1438 if branch and branch != 'default':
1439 write("# Branch %s\n" % branch)
1439 write("# Branch %s\n" % branch)
1440 write("# Node ID %s\n" % hex(node))
1440 write("# Node ID %s\n" % hex(node))
1441 write("# Parent %s\n" % hex(prev))
1441 write("# Parent %s\n" % hex(prev))
1442 if len(parents) > 1:
1442 if len(parents) > 1:
1443 write("# Parent %s\n" % hex(parents[1]))
1443 write("# Parent %s\n" % hex(parents[1]))
1444
1444
1445 for headerid in extraexport:
1445 for headerid in extraexport:
1446 header = extraexportmap[headerid](seqno, ctx)
1446 header = extraexportmap[headerid](seqno, ctx)
1447 if header is not None:
1447 if header is not None:
1448 write('# %s\n' % header)
1448 write('# %s\n' % header)
1449 write(ctx.description().rstrip())
1449 write(ctx.description().rstrip())
1450 write("\n\n")
1450 write("\n\n")
1451
1451
1452 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1452 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1453 write(chunk, label=label)
1453 write(chunk, label=label)
1454
1454
1455 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1455 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1456 opts=None, match=None):
1456 opts=None, match=None):
1457 '''export changesets as hg patches
1457 '''export changesets as hg patches
1458
1458
1459 Args:
1459 Args:
1460 repo: The repository from which we're exporting revisions.
1460 repo: The repository from which we're exporting revisions.
1461 revs: A list of revisions to export as revision numbers.
1461 revs: A list of revisions to export as revision numbers.
1462 fntemplate: An optional string to use for generating patch file names.
1462 fntemplate: An optional string to use for generating patch file names.
1463 fp: An optional file-like object to which patches should be written.
1463 fp: An optional file-like object to which patches should be written.
1464 switch_parent: If True, show diffs against second parent when not nullid.
1464 switch_parent: If True, show diffs against second parent when not nullid.
1465 Default is false, which always shows diff against p1.
1465 Default is false, which always shows diff against p1.
1466 opts: diff options to use for generating the patch.
1466 opts: diff options to use for generating the patch.
1467 match: If specified, only export changes to files matching this matcher.
1467 match: If specified, only export changes to files matching this matcher.
1468
1468
1469 Returns:
1469 Returns:
1470 Nothing.
1470 Nothing.
1471
1471
1472 Side Effect:
1472 Side Effect:
1473 "HG Changeset Patch" data is emitted to one of the following
1473 "HG Changeset Patch" data is emitted to one of the following
1474 destinations:
1474 destinations:
1475 fp is specified: All revs are written to the specified
1475 fp is specified: All revs are written to the specified
1476 file-like object.
1476 file-like object.
1477 fntemplate specified: Each rev is written to a unique file named using
1477 fntemplate specified: Each rev is written to a unique file named using
1478 the given template.
1478 the given template.
1479 Neither fp nor template specified: All revs written to repo.ui.write()
1479 Neither fp nor template specified: All revs written to repo.ui.write()
1480 '''
1480 '''
1481
1481
1482 total = len(revs)
1482 total = len(revs)
1483 revwidth = max(len(str(rev)) for rev in revs)
1483 revwidth = max(len(str(rev)) for rev in revs)
1484 filemode = {}
1484 filemode = {}
1485
1485
1486 write = None
1486 write = None
1487 dest = '<unnamed>'
1487 dest = '<unnamed>'
1488 if fp:
1488 if fp:
1489 dest = getattr(fp, 'name', dest)
1489 dest = getattr(fp, 'name', dest)
1490 def write(s, **kw):
1490 def write(s, **kw):
1491 fp.write(s)
1491 fp.write(s)
1492 elif not fntemplate:
1492 elif not fntemplate:
1493 write = repo.ui.write
1493 write = repo.ui.write
1494
1494
1495 for seqno, rev in enumerate(revs, 1):
1495 for seqno, rev in enumerate(revs, 1):
1496 ctx = repo[rev]
1496 ctx = repo[rev]
1497 fo = None
1497 fo = None
1498 if not fp and fntemplate:
1498 if not fp and fntemplate:
1499 desc_lines = ctx.description().rstrip().split('\n')
1499 desc_lines = ctx.description().rstrip().split('\n')
1500 desc = desc_lines[0] #Commit always has a first line.
1500 desc = desc_lines[0] #Commit always has a first line.
1501 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1501 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1502 total=total, seqno=seqno, revwidth=revwidth,
1502 total=total, seqno=seqno, revwidth=revwidth,
1503 mode='wb', modemap=filemode)
1503 mode='wb', modemap=filemode)
1504 dest = fo.name
1504 dest = fo.name
1505 def write(s, **kw):
1505 def write(s, **kw):
1506 fo.write(s)
1506 fo.write(s)
1507 if not dest.startswith('<'):
1507 if not dest.startswith('<'):
1508 repo.ui.note("%s\n" % dest)
1508 repo.ui.note("%s\n" % dest)
1509 _exportsingle(
1509 _exportsingle(
1510 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1510 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1511 if fo is not None:
1511 if fo is not None:
1512 fo.close()
1512 fo.close()
1513
1513
1514 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1514 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1515 changes=None, stat=False, fp=None, prefix='',
1515 changes=None, stat=False, fp=None, prefix='',
1516 root='', listsubrepos=False):
1516 root='', listsubrepos=False):
1517 '''show diff or diffstat.'''
1517 '''show diff or diffstat.'''
1518 if fp is None:
1518 if fp is None:
1519 write = ui.write
1519 write = ui.write
1520 else:
1520 else:
1521 def write(s, **kw):
1521 def write(s, **kw):
1522 fp.write(s)
1522 fp.write(s)
1523
1523
1524 if root:
1524 if root:
1525 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1525 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1526 else:
1526 else:
1527 relroot = ''
1527 relroot = ''
1528 if relroot != '':
1528 if relroot != '':
1529 # XXX relative roots currently don't work if the root is within a
1529 # XXX relative roots currently don't work if the root is within a
1530 # subrepo
1530 # subrepo
1531 uirelroot = match.uipath(relroot)
1531 uirelroot = match.uipath(relroot)
1532 relroot += '/'
1532 relroot += '/'
1533 for matchroot in match.files():
1533 for matchroot in match.files():
1534 if not matchroot.startswith(relroot):
1534 if not matchroot.startswith(relroot):
1535 ui.warn(_('warning: %s not inside relative root %s\n') % (
1535 ui.warn(_('warning: %s not inside relative root %s\n') % (
1536 match.uipath(matchroot), uirelroot))
1536 match.uipath(matchroot), uirelroot))
1537
1537
1538 if stat:
1538 if stat:
1539 diffopts = diffopts.copy(context=0)
1539 diffopts = diffopts.copy(context=0)
1540 width = 80
1540 width = 80
1541 if not ui.plain():
1541 if not ui.plain():
1542 width = ui.termwidth()
1542 width = ui.termwidth()
1543 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1543 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1544 prefix=prefix, relroot=relroot)
1544 prefix=prefix, relroot=relroot)
1545 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1545 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1546 width=width):
1546 width=width):
1547 write(chunk, label=label)
1547 write(chunk, label=label)
1548 else:
1548 else:
1549 for chunk, label in patch.diffui(repo, node1, node2, match,
1549 for chunk, label in patch.diffui(repo, node1, node2, match,
1550 changes, diffopts, prefix=prefix,
1550 changes, diffopts, prefix=prefix,
1551 relroot=relroot):
1551 relroot=relroot):
1552 write(chunk, label=label)
1552 write(chunk, label=label)
1553
1553
1554 if listsubrepos:
1554 if listsubrepos:
1555 ctx1 = repo[node1]
1555 ctx1 = repo[node1]
1556 ctx2 = repo[node2]
1556 ctx2 = repo[node2]
1557 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1557 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1558 tempnode2 = node2
1558 tempnode2 = node2
1559 try:
1559 try:
1560 if node2 is not None:
1560 if node2 is not None:
1561 tempnode2 = ctx2.substate[subpath][1]
1561 tempnode2 = ctx2.substate[subpath][1]
1562 except KeyError:
1562 except KeyError:
1563 # A subrepo that existed in node1 was deleted between node1 and
1563 # A subrepo that existed in node1 was deleted between node1 and
1564 # node2 (inclusive). Thus, ctx2's substate won't contain that
1564 # node2 (inclusive). Thus, ctx2's substate won't contain that
1565 # subpath. The best we can do is to ignore it.
1565 # subpath. The best we can do is to ignore it.
1566 tempnode2 = None
1566 tempnode2 = None
1567 submatch = matchmod.subdirmatcher(subpath, match)
1567 submatch = matchmod.subdirmatcher(subpath, match)
1568 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1568 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1569 stat=stat, fp=fp, prefix=prefix)
1569 stat=stat, fp=fp, prefix=prefix)
1570
1570
1571 def _changesetlabels(ctx):
1571 def _changesetlabels(ctx):
1572 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1572 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1573 if ctx.obsolete():
1573 if ctx.obsolete():
1574 labels.append('changeset.obsolete')
1574 labels.append('changeset.obsolete')
1575 if ctx.isunstable():
1575 if ctx.isunstable():
1576 labels.append('changeset.unstable')
1576 labels.append('changeset.unstable')
1577 for instability in ctx.instabilities():
1577 for instability in ctx.instabilities():
1578 labels.append('instability.%s' % instability)
1578 labels.append('instability.%s' % instability)
1579 return ' '.join(labels)
1579 return ' '.join(labels)
1580
1580
1581 class changeset_printer(object):
1581 class changeset_printer(object):
1582 '''show changeset information when templating not requested.'''
1582 '''show changeset information when templating not requested.'''
1583
1583
1584 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1584 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1585 self.ui = ui
1585 self.ui = ui
1586 self.repo = repo
1586 self.repo = repo
1587 self.buffered = buffered
1587 self.buffered = buffered
1588 self.matchfn = matchfn
1588 self.matchfn = matchfn
1589 self.diffopts = diffopts
1589 self.diffopts = diffopts
1590 self.header = {}
1590 self.header = {}
1591 self.hunk = {}
1591 self.hunk = {}
1592 self.lastheader = None
1592 self.lastheader = None
1593 self.footer = None
1593 self.footer = None
1594
1594
1595 def flush(self, ctx):
1595 def flush(self, ctx):
1596 rev = ctx.rev()
1596 rev = ctx.rev()
1597 if rev in self.header:
1597 if rev in self.header:
1598 h = self.header[rev]
1598 h = self.header[rev]
1599 if h != self.lastheader:
1599 if h != self.lastheader:
1600 self.lastheader = h
1600 self.lastheader = h
1601 self.ui.write(h)
1601 self.ui.write(h)
1602 del self.header[rev]
1602 del self.header[rev]
1603 if rev in self.hunk:
1603 if rev in self.hunk:
1604 self.ui.write(self.hunk[rev])
1604 self.ui.write(self.hunk[rev])
1605 del self.hunk[rev]
1605 del self.hunk[rev]
1606 return 1
1606 return 1
1607 return 0
1607 return 0
1608
1608
1609 def close(self):
1609 def close(self):
1610 if self.footer:
1610 if self.footer:
1611 self.ui.write(self.footer)
1611 self.ui.write(self.footer)
1612
1612
1613 def show(self, ctx, copies=None, matchfn=None, **props):
1613 def show(self, ctx, copies=None, matchfn=None, **props):
1614 props = pycompat.byteskwargs(props)
1614 props = pycompat.byteskwargs(props)
1615 if self.buffered:
1615 if self.buffered:
1616 self.ui.pushbuffer(labeled=True)
1616 self.ui.pushbuffer(labeled=True)
1617 self._show(ctx, copies, matchfn, props)
1617 self._show(ctx, copies, matchfn, props)
1618 self.hunk[ctx.rev()] = self.ui.popbuffer()
1618 self.hunk[ctx.rev()] = self.ui.popbuffer()
1619 else:
1619 else:
1620 self._show(ctx, copies, matchfn, props)
1620 self._show(ctx, copies, matchfn, props)
1621
1621
1622 def _show(self, ctx, copies, matchfn, props):
1622 def _show(self, ctx, copies, matchfn, props):
1623 '''show a single changeset or file revision'''
1623 '''show a single changeset or file revision'''
1624 changenode = ctx.node()
1624 changenode = ctx.node()
1625 rev = ctx.rev()
1625 rev = ctx.rev()
1626
1626
1627 if self.ui.quiet:
1627 if self.ui.quiet:
1628 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1628 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1629 label='log.node')
1629 label='log.node')
1630 return
1630 return
1631
1631
1632 date = util.datestr(ctx.date())
1632 date = util.datestr(ctx.date())
1633
1633
1634 # i18n: column positioning for "hg log"
1634 # i18n: column positioning for "hg log"
1635 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1635 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1636 label=_changesetlabels(ctx))
1636 label=_changesetlabels(ctx))
1637
1637
1638 # branches are shown first before any other names due to backwards
1638 # branches are shown first before any other names due to backwards
1639 # compatibility
1639 # compatibility
1640 branch = ctx.branch()
1640 branch = ctx.branch()
1641 # don't show the default branch name
1641 # don't show the default branch name
1642 if branch != 'default':
1642 if branch != 'default':
1643 # i18n: column positioning for "hg log"
1643 # i18n: column positioning for "hg log"
1644 self.ui.write(_("branch: %s\n") % branch,
1644 self.ui.write(_("branch: %s\n") % branch,
1645 label='log.branch')
1645 label='log.branch')
1646
1646
1647 for nsname, ns in self.repo.names.iteritems():
1647 for nsname, ns in self.repo.names.iteritems():
1648 # branches has special logic already handled above, so here we just
1648 # branches has special logic already handled above, so here we just
1649 # skip it
1649 # skip it
1650 if nsname == 'branches':
1650 if nsname == 'branches':
1651 continue
1651 continue
1652 # we will use the templatename as the color name since those two
1652 # we will use the templatename as the color name since those two
1653 # should be the same
1653 # should be the same
1654 for name in ns.names(self.repo, changenode):
1654 for name in ns.names(self.repo, changenode):
1655 self.ui.write(ns.logfmt % name,
1655 self.ui.write(ns.logfmt % name,
1656 label='log.%s' % ns.colorname)
1656 label='log.%s' % ns.colorname)
1657 if self.ui.debugflag:
1657 if self.ui.debugflag:
1658 # i18n: column positioning for "hg log"
1658 # i18n: column positioning for "hg log"
1659 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1659 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1660 label='log.phase')
1660 label='log.phase')
1661 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1661 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1662 label = 'log.parent changeset.%s' % pctx.phasestr()
1662 label = 'log.parent changeset.%s' % pctx.phasestr()
1663 # i18n: column positioning for "hg log"
1663 # i18n: column positioning for "hg log"
1664 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1664 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1665 label=label)
1665 label=label)
1666
1666
1667 if self.ui.debugflag and rev is not None:
1667 if self.ui.debugflag and rev is not None:
1668 mnode = ctx.manifestnode()
1668 mnode = ctx.manifestnode()
1669 mrev = self.repo.manifestlog._revlog.rev(mnode)
1669 mrev = self.repo.manifestlog._revlog.rev(mnode)
1670 # i18n: column positioning for "hg log"
1670 # i18n: column positioning for "hg log"
1671 self.ui.write(_("manifest: %s\n")
1671 self.ui.write(_("manifest: %s\n")
1672 % scmutil.formatrevnode(self.ui, mrev, mnode),
1672 % scmutil.formatrevnode(self.ui, mrev, mnode),
1673 label='ui.debug log.manifest')
1673 label='ui.debug log.manifest')
1674 # i18n: column positioning for "hg log"
1674 # i18n: column positioning for "hg log"
1675 self.ui.write(_("user: %s\n") % ctx.user(),
1675 self.ui.write(_("user: %s\n") % ctx.user(),
1676 label='log.user')
1676 label='log.user')
1677 # i18n: column positioning for "hg log"
1677 # i18n: column positioning for "hg log"
1678 self.ui.write(_("date: %s\n") % date,
1678 self.ui.write(_("date: %s\n") % date,
1679 label='log.date')
1679 label='log.date')
1680
1680
1681 if ctx.isunstable():
1681 if ctx.isunstable():
1682 # i18n: column positioning for "hg log"
1682 # i18n: column positioning for "hg log"
1683 instabilities = ctx.instabilities()
1683 instabilities = ctx.instabilities()
1684 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1684 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1685 label='log.instability')
1685 label='log.instability')
1686
1686
1687 self._exthook(ctx)
1687 self._exthook(ctx)
1688
1688
1689 if self.ui.debugflag:
1689 if self.ui.debugflag:
1690 files = ctx.p1().status(ctx)[:3]
1690 files = ctx.p1().status(ctx)[:3]
1691 for key, value in zip([# i18n: column positioning for "hg log"
1691 for key, value in zip([# i18n: column positioning for "hg log"
1692 _("files:"),
1692 _("files:"),
1693 # i18n: column positioning for "hg log"
1693 # i18n: column positioning for "hg log"
1694 _("files+:"),
1694 _("files+:"),
1695 # i18n: column positioning for "hg log"
1695 # i18n: column positioning for "hg log"
1696 _("files-:")], files):
1696 _("files-:")], files):
1697 if value:
1697 if value:
1698 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1698 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1699 label='ui.debug log.files')
1699 label='ui.debug log.files')
1700 elif ctx.files() and self.ui.verbose:
1700 elif ctx.files() and self.ui.verbose:
1701 # i18n: column positioning for "hg log"
1701 # i18n: column positioning for "hg log"
1702 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1702 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1703 label='ui.note log.files')
1703 label='ui.note log.files')
1704 if copies and self.ui.verbose:
1704 if copies and self.ui.verbose:
1705 copies = ['%s (%s)' % c for c in copies]
1705 copies = ['%s (%s)' % c for c in copies]
1706 # i18n: column positioning for "hg log"
1706 # i18n: column positioning for "hg log"
1707 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1707 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1708 label='ui.note log.copies')
1708 label='ui.note log.copies')
1709
1709
1710 extra = ctx.extra()
1710 extra = ctx.extra()
1711 if extra and self.ui.debugflag:
1711 if extra and self.ui.debugflag:
1712 for key, value in sorted(extra.items()):
1712 for key, value in sorted(extra.items()):
1713 # i18n: column positioning for "hg log"
1713 # i18n: column positioning for "hg log"
1714 self.ui.write(_("extra: %s=%s\n")
1714 self.ui.write(_("extra: %s=%s\n")
1715 % (key, util.escapestr(value)),
1715 % (key, util.escapestr(value)),
1716 label='ui.debug log.extra')
1716 label='ui.debug log.extra')
1717
1717
1718 description = ctx.description().strip()
1718 description = ctx.description().strip()
1719 if description:
1719 if description:
1720 if self.ui.verbose:
1720 if self.ui.verbose:
1721 self.ui.write(_("description:\n"),
1721 self.ui.write(_("description:\n"),
1722 label='ui.note log.description')
1722 label='ui.note log.description')
1723 self.ui.write(description,
1723 self.ui.write(description,
1724 label='ui.note log.description')
1724 label='ui.note log.description')
1725 self.ui.write("\n\n")
1725 self.ui.write("\n\n")
1726 else:
1726 else:
1727 # i18n: column positioning for "hg log"
1727 # i18n: column positioning for "hg log"
1728 self.ui.write(_("summary: %s\n") %
1728 self.ui.write(_("summary: %s\n") %
1729 description.splitlines()[0],
1729 description.splitlines()[0],
1730 label='log.summary')
1730 label='log.summary')
1731 self.ui.write("\n")
1731 self.ui.write("\n")
1732
1732
1733 self.showpatch(ctx, matchfn)
1733 self.showpatch(ctx, matchfn)
1734
1734
1735 def _exthook(self, ctx):
1735 def _exthook(self, ctx):
1736 '''empty method used by extension as a hook point
1736 '''empty method used by extension as a hook point
1737 '''
1737 '''
1738 pass
1738 pass
1739
1739
1740 def showpatch(self, ctx, matchfn):
1740 def showpatch(self, ctx, matchfn):
1741 if not matchfn:
1741 if not matchfn:
1742 matchfn = self.matchfn
1742 matchfn = self.matchfn
1743 if matchfn:
1743 if matchfn:
1744 stat = self.diffopts.get('stat')
1744 stat = self.diffopts.get('stat')
1745 diff = self.diffopts.get('patch')
1745 diff = self.diffopts.get('patch')
1746 diffopts = patch.diffallopts(self.ui, self.diffopts)
1746 diffopts = patch.diffallopts(self.ui, self.diffopts)
1747 node = ctx.node()
1747 node = ctx.node()
1748 prev = ctx.p1().node()
1748 prev = ctx.p1().node()
1749 if stat:
1749 if stat:
1750 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1750 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1751 match=matchfn, stat=True)
1751 match=matchfn, stat=True)
1752 if diff:
1752 if diff:
1753 if stat:
1753 if stat:
1754 self.ui.write("\n")
1754 self.ui.write("\n")
1755 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1755 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1756 match=matchfn, stat=False)
1756 match=matchfn, stat=False)
1757 self.ui.write("\n")
1757 self.ui.write("\n")
1758
1758
1759 class jsonchangeset(changeset_printer):
1759 class jsonchangeset(changeset_printer):
1760 '''format changeset information.'''
1760 '''format changeset information.'''
1761
1761
1762 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1762 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1763 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1763 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1764 self.cache = {}
1764 self.cache = {}
1765 self._first = True
1765 self._first = True
1766
1766
1767 def close(self):
1767 def close(self):
1768 if not self._first:
1768 if not self._first:
1769 self.ui.write("\n]\n")
1769 self.ui.write("\n]\n")
1770 else:
1770 else:
1771 self.ui.write("[]\n")
1771 self.ui.write("[]\n")
1772
1772
1773 def _show(self, ctx, copies, matchfn, props):
1773 def _show(self, ctx, copies, matchfn, props):
1774 '''show a single changeset or file revision'''
1774 '''show a single changeset or file revision'''
1775 rev = ctx.rev()
1775 rev = ctx.rev()
1776 if rev is None:
1776 if rev is None:
1777 jrev = jnode = 'null'
1777 jrev = jnode = 'null'
1778 else:
1778 else:
1779 jrev = '%d' % rev
1779 jrev = '%d' % rev
1780 jnode = '"%s"' % hex(ctx.node())
1780 jnode = '"%s"' % hex(ctx.node())
1781 j = encoding.jsonescape
1781 j = encoding.jsonescape
1782
1782
1783 if self._first:
1783 if self._first:
1784 self.ui.write("[\n {")
1784 self.ui.write("[\n {")
1785 self._first = False
1785 self._first = False
1786 else:
1786 else:
1787 self.ui.write(",\n {")
1787 self.ui.write(",\n {")
1788
1788
1789 if self.ui.quiet:
1789 if self.ui.quiet:
1790 self.ui.write(('\n "rev": %s') % jrev)
1790 self.ui.write(('\n "rev": %s') % jrev)
1791 self.ui.write((',\n "node": %s') % jnode)
1791 self.ui.write((',\n "node": %s') % jnode)
1792 self.ui.write('\n }')
1792 self.ui.write('\n }')
1793 return
1793 return
1794
1794
1795 self.ui.write(('\n "rev": %s') % jrev)
1795 self.ui.write(('\n "rev": %s') % jrev)
1796 self.ui.write((',\n "node": %s') % jnode)
1796 self.ui.write((',\n "node": %s') % jnode)
1797 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1797 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1798 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1798 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1799 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1799 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1800 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1800 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1801 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1801 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1802
1802
1803 self.ui.write((',\n "bookmarks": [%s]') %
1803 self.ui.write((',\n "bookmarks": [%s]') %
1804 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1804 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1805 self.ui.write((',\n "tags": [%s]') %
1805 self.ui.write((',\n "tags": [%s]') %
1806 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1806 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1807 self.ui.write((',\n "parents": [%s]') %
1807 self.ui.write((',\n "parents": [%s]') %
1808 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1808 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1809
1809
1810 if self.ui.debugflag:
1810 if self.ui.debugflag:
1811 if rev is None:
1811 if rev is None:
1812 jmanifestnode = 'null'
1812 jmanifestnode = 'null'
1813 else:
1813 else:
1814 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1814 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1815 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1815 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1816
1816
1817 self.ui.write((',\n "extra": {%s}') %
1817 self.ui.write((',\n "extra": {%s}') %
1818 ", ".join('"%s": "%s"' % (j(k), j(v))
1818 ", ".join('"%s": "%s"' % (j(k), j(v))
1819 for k, v in ctx.extra().items()))
1819 for k, v in ctx.extra().items()))
1820
1820
1821 files = ctx.p1().status(ctx)
1821 files = ctx.p1().status(ctx)
1822 self.ui.write((',\n "modified": [%s]') %
1822 self.ui.write((',\n "modified": [%s]') %
1823 ", ".join('"%s"' % j(f) for f in files[0]))
1823 ", ".join('"%s"' % j(f) for f in files[0]))
1824 self.ui.write((',\n "added": [%s]') %
1824 self.ui.write((',\n "added": [%s]') %
1825 ", ".join('"%s"' % j(f) for f in files[1]))
1825 ", ".join('"%s"' % j(f) for f in files[1]))
1826 self.ui.write((',\n "removed": [%s]') %
1826 self.ui.write((',\n "removed": [%s]') %
1827 ", ".join('"%s"' % j(f) for f in files[2]))
1827 ", ".join('"%s"' % j(f) for f in files[2]))
1828
1828
1829 elif self.ui.verbose:
1829 elif self.ui.verbose:
1830 self.ui.write((',\n "files": [%s]') %
1830 self.ui.write((',\n "files": [%s]') %
1831 ", ".join('"%s"' % j(f) for f in ctx.files()))
1831 ", ".join('"%s"' % j(f) for f in ctx.files()))
1832
1832
1833 if copies:
1833 if copies:
1834 self.ui.write((',\n "copies": {%s}') %
1834 self.ui.write((',\n "copies": {%s}') %
1835 ", ".join('"%s": "%s"' % (j(k), j(v))
1835 ", ".join('"%s": "%s"' % (j(k), j(v))
1836 for k, v in copies))
1836 for k, v in copies))
1837
1837
1838 matchfn = self.matchfn
1838 matchfn = self.matchfn
1839 if matchfn:
1839 if matchfn:
1840 stat = self.diffopts.get('stat')
1840 stat = self.diffopts.get('stat')
1841 diff = self.diffopts.get('patch')
1841 diff = self.diffopts.get('patch')
1842 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1842 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1843 node, prev = ctx.node(), ctx.p1().node()
1843 node, prev = ctx.node(), ctx.p1().node()
1844 if stat:
1844 if stat:
1845 self.ui.pushbuffer()
1845 self.ui.pushbuffer()
1846 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1846 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1847 match=matchfn, stat=True)
1847 match=matchfn, stat=True)
1848 self.ui.write((',\n "diffstat": "%s"')
1848 self.ui.write((',\n "diffstat": "%s"')
1849 % j(self.ui.popbuffer()))
1849 % j(self.ui.popbuffer()))
1850 if diff:
1850 if diff:
1851 self.ui.pushbuffer()
1851 self.ui.pushbuffer()
1852 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1852 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1853 match=matchfn, stat=False)
1853 match=matchfn, stat=False)
1854 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1854 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1855
1855
1856 self.ui.write("\n }")
1856 self.ui.write("\n }")
1857
1857
1858 class changeset_templater(changeset_printer):
1858 class changeset_templater(changeset_printer):
1859 '''format changeset information.'''
1859 '''format changeset information.'''
1860
1860
1861 # Arguments before "buffered" used to be positional. Consider not
1861 # Arguments before "buffered" used to be positional. Consider not
1862 # adding/removing arguments before "buffered" to not break callers.
1862 # adding/removing arguments before "buffered" to not break callers.
1863 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1863 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1864 buffered=False):
1864 buffered=False):
1865 diffopts = diffopts or {}
1865 diffopts = diffopts or {}
1866
1866
1867 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1867 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1868 self.t = formatter.loadtemplater(ui, tmplspec,
1868 self.t = formatter.loadtemplater(ui, tmplspec,
1869 cache=templatekw.defaulttempl)
1869 cache=templatekw.defaulttempl)
1870 self._counter = itertools.count()
1870 self._counter = itertools.count()
1871 self.cache = {}
1871 self.cache = {}
1872
1872
1873 self._tref = tmplspec.ref
1873 self._tref = tmplspec.ref
1874 self._parts = {'header': '', 'footer': '',
1874 self._parts = {'header': '', 'footer': '',
1875 tmplspec.ref: tmplspec.ref,
1875 tmplspec.ref: tmplspec.ref,
1876 'docheader': '', 'docfooter': '',
1876 'docheader': '', 'docfooter': '',
1877 'separator': ''}
1877 'separator': ''}
1878 if tmplspec.mapfile:
1878 if tmplspec.mapfile:
1879 # find correct templates for current mode, for backward
1879 # find correct templates for current mode, for backward
1880 # compatibility with 'log -v/-q/--debug' using a mapfile
1880 # compatibility with 'log -v/-q/--debug' using a mapfile
1881 tmplmodes = [
1881 tmplmodes = [
1882 (True, ''),
1882 (True, ''),
1883 (self.ui.verbose, '_verbose'),
1883 (self.ui.verbose, '_verbose'),
1884 (self.ui.quiet, '_quiet'),
1884 (self.ui.quiet, '_quiet'),
1885 (self.ui.debugflag, '_debug'),
1885 (self.ui.debugflag, '_debug'),
1886 ]
1886 ]
1887 for mode, postfix in tmplmodes:
1887 for mode, postfix in tmplmodes:
1888 for t in self._parts:
1888 for t in self._parts:
1889 cur = t + postfix
1889 cur = t + postfix
1890 if mode and cur in self.t:
1890 if mode and cur in self.t:
1891 self._parts[t] = cur
1891 self._parts[t] = cur
1892 else:
1892 else:
1893 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1893 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1894 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1894 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1895 self._parts.update(m)
1895 self._parts.update(m)
1896
1896
1897 if self._parts['docheader']:
1897 if self._parts['docheader']:
1898 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1898 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1899
1899
1900 def close(self):
1900 def close(self):
1901 if self._parts['docfooter']:
1901 if self._parts['docfooter']:
1902 if not self.footer:
1902 if not self.footer:
1903 self.footer = ""
1903 self.footer = ""
1904 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1904 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1905 return super(changeset_templater, self).close()
1905 return super(changeset_templater, self).close()
1906
1906
1907 def _show(self, ctx, copies, matchfn, props):
1907 def _show(self, ctx, copies, matchfn, props):
1908 '''show a single changeset or file revision'''
1908 '''show a single changeset or file revision'''
1909 props = props.copy()
1909 props = props.copy()
1910 props.update(templatekw.keywords)
1910 props.update(templatekw.keywords)
1911 props['templ'] = self.t
1911 props['templ'] = self.t
1912 props['ctx'] = ctx
1912 props['ctx'] = ctx
1913 props['repo'] = self.repo
1913 props['repo'] = self.repo
1914 props['ui'] = self.repo.ui
1914 props['ui'] = self.repo.ui
1915 props['index'] = index = next(self._counter)
1915 props['index'] = index = next(self._counter)
1916 props['revcache'] = {'copies': copies}
1916 props['revcache'] = {'copies': copies}
1917 props['cache'] = self.cache
1917 props['cache'] = self.cache
1918 props = pycompat.strkwargs(props)
1918 props = pycompat.strkwargs(props)
1919
1919
1920 # write separator, which wouldn't work well with the header part below
1920 # write separator, which wouldn't work well with the header part below
1921 # since there's inherently a conflict between header (across items) and
1921 # since there's inherently a conflict between header (across items) and
1922 # separator (per item)
1922 # separator (per item)
1923 if self._parts['separator'] and index > 0:
1923 if self._parts['separator'] and index > 0:
1924 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1924 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1925
1925
1926 # write header
1926 # write header
1927 if self._parts['header']:
1927 if self._parts['header']:
1928 h = templater.stringify(self.t(self._parts['header'], **props))
1928 h = templater.stringify(self.t(self._parts['header'], **props))
1929 if self.buffered:
1929 if self.buffered:
1930 self.header[ctx.rev()] = h
1930 self.header[ctx.rev()] = h
1931 else:
1931 else:
1932 if self.lastheader != h:
1932 if self.lastheader != h:
1933 self.lastheader = h
1933 self.lastheader = h
1934 self.ui.write(h)
1934 self.ui.write(h)
1935
1935
1936 # write changeset metadata, then patch if requested
1936 # write changeset metadata, then patch if requested
1937 key = self._parts[self._tref]
1937 key = self._parts[self._tref]
1938 self.ui.write(templater.stringify(self.t(key, **props)))
1938 self.ui.write(templater.stringify(self.t(key, **props)))
1939 self.showpatch(ctx, matchfn)
1939 self.showpatch(ctx, matchfn)
1940
1940
1941 if self._parts['footer']:
1941 if self._parts['footer']:
1942 if not self.footer:
1942 if not self.footer:
1943 self.footer = templater.stringify(
1943 self.footer = templater.stringify(
1944 self.t(self._parts['footer'], **props))
1944 self.t(self._parts['footer'], **props))
1945
1945
1946 def logtemplatespec(tmpl, mapfile):
1946 def logtemplatespec(tmpl, mapfile):
1947 if mapfile:
1947 if mapfile:
1948 return formatter.templatespec('changeset', tmpl, mapfile)
1948 return formatter.templatespec('changeset', tmpl, mapfile)
1949 else:
1949 else:
1950 return formatter.templatespec('', tmpl, None)
1950 return formatter.templatespec('', tmpl, None)
1951
1951
1952 def _lookuplogtemplate(ui, tmpl, style):
1952 def _lookuplogtemplate(ui, tmpl, style):
1953 """Find the template matching the given template spec or style
1953 """Find the template matching the given template spec or style
1954
1954
1955 See formatter.lookuptemplate() for details.
1955 See formatter.lookuptemplate() for details.
1956 """
1956 """
1957
1957
1958 # ui settings
1958 # ui settings
1959 if not tmpl and not style: # template are stronger than style
1959 if not tmpl and not style: # template are stronger than style
1960 tmpl = ui.config('ui', 'logtemplate')
1960 tmpl = ui.config('ui', 'logtemplate')
1961 if tmpl:
1961 if tmpl:
1962 return logtemplatespec(templater.unquotestring(tmpl), None)
1962 return logtemplatespec(templater.unquotestring(tmpl), None)
1963 else:
1963 else:
1964 style = util.expandpath(ui.config('ui', 'style'))
1964 style = util.expandpath(ui.config('ui', 'style'))
1965
1965
1966 if not tmpl and style:
1966 if not tmpl and style:
1967 mapfile = style
1967 mapfile = style
1968 if not os.path.split(mapfile)[0]:
1968 if not os.path.split(mapfile)[0]:
1969 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1969 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1970 or templater.templatepath(mapfile))
1970 or templater.templatepath(mapfile))
1971 if mapname:
1971 if mapname:
1972 mapfile = mapname
1972 mapfile = mapname
1973 return logtemplatespec(None, mapfile)
1973 return logtemplatespec(None, mapfile)
1974
1974
1975 if not tmpl:
1975 if not tmpl:
1976 return logtemplatespec(None, None)
1976 return logtemplatespec(None, None)
1977
1977
1978 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1978 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1979
1979
1980 def makelogtemplater(ui, repo, tmpl, buffered=False):
1980 def makelogtemplater(ui, repo, tmpl, buffered=False):
1981 """Create a changeset_templater from a literal template 'tmpl'"""
1981 """Create a changeset_templater from a literal template 'tmpl'"""
1982 spec = logtemplatespec(tmpl, None)
1982 spec = logtemplatespec(tmpl, None)
1983 return changeset_templater(ui, repo, spec, buffered=buffered)
1983 return changeset_templater(ui, repo, spec, buffered=buffered)
1984
1984
1985 def show_changeset(ui, repo, opts, buffered=False):
1985 def show_changeset(ui, repo, opts, buffered=False):
1986 """show one changeset using template or regular display.
1986 """show one changeset using template or regular display.
1987
1987
1988 Display format will be the first non-empty hit of:
1988 Display format will be the first non-empty hit of:
1989 1. option 'template'
1989 1. option 'template'
1990 2. option 'style'
1990 2. option 'style'
1991 3. [ui] setting 'logtemplate'
1991 3. [ui] setting 'logtemplate'
1992 4. [ui] setting 'style'
1992 4. [ui] setting 'style'
1993 If all of these values are either the unset or the empty string,
1993 If all of these values are either the unset or the empty string,
1994 regular display via changeset_printer() is done.
1994 regular display via changeset_printer() is done.
1995 """
1995 """
1996 # options
1996 # options
1997 match = None
1997 match = None
1998 if opts.get('patch') or opts.get('stat'):
1998 if opts.get('patch') or opts.get('stat'):
1999 match = scmutil.matchall(repo)
1999 match = scmutil.matchall(repo)
2000
2000
2001 if opts.get('template') == 'json':
2001 if opts.get('template') == 'json':
2002 return jsonchangeset(ui, repo, match, opts, buffered)
2002 return jsonchangeset(ui, repo, match, opts, buffered)
2003
2003
2004 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
2004 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
2005
2005
2006 if not spec.ref and not spec.tmpl and not spec.mapfile:
2006 if not spec.ref and not spec.tmpl and not spec.mapfile:
2007 return changeset_printer(ui, repo, match, opts, buffered)
2007 return changeset_printer(ui, repo, match, opts, buffered)
2008
2008
2009 return changeset_templater(ui, repo, spec, match, opts, buffered)
2009 return changeset_templater(ui, repo, spec, match, opts, buffered)
2010
2010
2011 def showmarker(fm, marker, index=None):
2011 def showmarker(fm, marker, index=None):
2012 """utility function to display obsolescence marker in a readable way
2012 """utility function to display obsolescence marker in a readable way
2013
2013
2014 To be used by debug function."""
2014 To be used by debug function."""
2015 if index is not None:
2015 if index is not None:
2016 fm.write('index', '%i ', index)
2016 fm.write('index', '%i ', index)
2017 fm.write('prednode', '%s ', hex(marker.prednode()))
2017 fm.write('prednode', '%s ', hex(marker.prednode()))
2018 succs = marker.succnodes()
2018 succs = marker.succnodes()
2019 fm.condwrite(succs, 'succnodes', '%s ',
2019 fm.condwrite(succs, 'succnodes', '%s ',
2020 fm.formatlist(map(hex, succs), name='node'))
2020 fm.formatlist(map(hex, succs), name='node'))
2021 fm.write('flag', '%X ', marker.flags())
2021 fm.write('flag', '%X ', marker.flags())
2022 parents = marker.parentnodes()
2022 parents = marker.parentnodes()
2023 if parents is not None:
2023 if parents is not None:
2024 fm.write('parentnodes', '{%s} ',
2024 fm.write('parentnodes', '{%s} ',
2025 fm.formatlist(map(hex, parents), name='node', sep=', '))
2025 fm.formatlist(map(hex, parents), name='node', sep=', '))
2026 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2026 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2027 meta = marker.metadata().copy()
2027 meta = marker.metadata().copy()
2028 meta.pop('date', None)
2028 meta.pop('date', None)
2029 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2029 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2030 fm.plain('\n')
2030 fm.plain('\n')
2031
2031
2032 def finddate(ui, repo, date):
2032 def finddate(ui, repo, date):
2033 """Find the tipmost changeset that matches the given date spec"""
2033 """Find the tipmost changeset that matches the given date spec"""
2034
2034
2035 df = util.matchdate(date)
2035 df = util.matchdate(date)
2036 m = scmutil.matchall(repo)
2036 m = scmutil.matchall(repo)
2037 results = {}
2037 results = {}
2038
2038
2039 def prep(ctx, fns):
2039 def prep(ctx, fns):
2040 d = ctx.date()
2040 d = ctx.date()
2041 if df(d[0]):
2041 if df(d[0]):
2042 results[ctx.rev()] = d
2042 results[ctx.rev()] = d
2043
2043
2044 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2044 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2045 rev = ctx.rev()
2045 rev = ctx.rev()
2046 if rev in results:
2046 if rev in results:
2047 ui.status(_("found revision %s from %s\n") %
2047 ui.status(_("found revision %s from %s\n") %
2048 (rev, util.datestr(results[rev])))
2048 (rev, util.datestr(results[rev])))
2049 return '%d' % rev
2049 return '%d' % rev
2050
2050
2051 raise error.Abort(_("revision matching date not found"))
2051 raise error.Abort(_("revision matching date not found"))
2052
2052
2053 def increasingwindows(windowsize=8, sizelimit=512):
2053 def increasingwindows(windowsize=8, sizelimit=512):
2054 while True:
2054 while True:
2055 yield windowsize
2055 yield windowsize
2056 if windowsize < sizelimit:
2056 if windowsize < sizelimit:
2057 windowsize *= 2
2057 windowsize *= 2
2058
2058
2059 class FileWalkError(Exception):
2059 class FileWalkError(Exception):
2060 pass
2060 pass
2061
2061
2062 def walkfilerevs(repo, match, follow, revs, fncache):
2062 def walkfilerevs(repo, match, follow, revs, fncache):
2063 '''Walks the file history for the matched files.
2063 '''Walks the file history for the matched files.
2064
2064
2065 Returns the changeset revs that are involved in the file history.
2065 Returns the changeset revs that are involved in the file history.
2066
2066
2067 Throws FileWalkError if the file history can't be walked using
2067 Throws FileWalkError if the file history can't be walked using
2068 filelogs alone.
2068 filelogs alone.
2069 '''
2069 '''
2070 wanted = set()
2070 wanted = set()
2071 copies = []
2071 copies = []
2072 minrev, maxrev = min(revs), max(revs)
2072 minrev, maxrev = min(revs), max(revs)
2073 def filerevgen(filelog, last):
2073 def filerevgen(filelog, last):
2074 """
2074 """
2075 Only files, no patterns. Check the history of each file.
2075 Only files, no patterns. Check the history of each file.
2076
2076
2077 Examines filelog entries within minrev, maxrev linkrev range
2077 Examines filelog entries within minrev, maxrev linkrev range
2078 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2078 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2079 tuples in backwards order
2079 tuples in backwards order
2080 """
2080 """
2081 cl_count = len(repo)
2081 cl_count = len(repo)
2082 revs = []
2082 revs = []
2083 for j in xrange(0, last + 1):
2083 for j in xrange(0, last + 1):
2084 linkrev = filelog.linkrev(j)
2084 linkrev = filelog.linkrev(j)
2085 if linkrev < minrev:
2085 if linkrev < minrev:
2086 continue
2086 continue
2087 # only yield rev for which we have the changelog, it can
2087 # only yield rev for which we have the changelog, it can
2088 # happen while doing "hg log" during a pull or commit
2088 # happen while doing "hg log" during a pull or commit
2089 if linkrev >= cl_count:
2089 if linkrev >= cl_count:
2090 break
2090 break
2091
2091
2092 parentlinkrevs = []
2092 parentlinkrevs = []
2093 for p in filelog.parentrevs(j):
2093 for p in filelog.parentrevs(j):
2094 if p != nullrev:
2094 if p != nullrev:
2095 parentlinkrevs.append(filelog.linkrev(p))
2095 parentlinkrevs.append(filelog.linkrev(p))
2096 n = filelog.node(j)
2096 n = filelog.node(j)
2097 revs.append((linkrev, parentlinkrevs,
2097 revs.append((linkrev, parentlinkrevs,
2098 follow and filelog.renamed(n)))
2098 follow and filelog.renamed(n)))
2099
2099
2100 return reversed(revs)
2100 return reversed(revs)
2101 def iterfiles():
2101 def iterfiles():
2102 pctx = repo['.']
2102 pctx = repo['.']
2103 for filename in match.files():
2103 for filename in match.files():
2104 if follow:
2104 if follow:
2105 if filename not in pctx:
2105 if filename not in pctx:
2106 raise error.Abort(_('cannot follow file not in parent '
2106 raise error.Abort(_('cannot follow file not in parent '
2107 'revision: "%s"') % filename)
2107 'revision: "%s"') % filename)
2108 yield filename, pctx[filename].filenode()
2108 yield filename, pctx[filename].filenode()
2109 else:
2109 else:
2110 yield filename, None
2110 yield filename, None
2111 for filename_node in copies:
2111 for filename_node in copies:
2112 yield filename_node
2112 yield filename_node
2113
2113
2114 for file_, node in iterfiles():
2114 for file_, node in iterfiles():
2115 filelog = repo.file(file_)
2115 filelog = repo.file(file_)
2116 if not len(filelog):
2116 if not len(filelog):
2117 if node is None:
2117 if node is None:
2118 # A zero count may be a directory or deleted file, so
2118 # A zero count may be a directory or deleted file, so
2119 # try to find matching entries on the slow path.
2119 # try to find matching entries on the slow path.
2120 if follow:
2120 if follow:
2121 raise error.Abort(
2121 raise error.Abort(
2122 _('cannot follow nonexistent file: "%s"') % file_)
2122 _('cannot follow nonexistent file: "%s"') % file_)
2123 raise FileWalkError("Cannot walk via filelog")
2123 raise FileWalkError("Cannot walk via filelog")
2124 else:
2124 else:
2125 continue
2125 continue
2126
2126
2127 if node is None:
2127 if node is None:
2128 last = len(filelog) - 1
2128 last = len(filelog) - 1
2129 else:
2129 else:
2130 last = filelog.rev(node)
2130 last = filelog.rev(node)
2131
2131
2132 # keep track of all ancestors of the file
2132 # keep track of all ancestors of the file
2133 ancestors = {filelog.linkrev(last)}
2133 ancestors = {filelog.linkrev(last)}
2134
2134
2135 # iterate from latest to oldest revision
2135 # iterate from latest to oldest revision
2136 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2136 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2137 if not follow:
2137 if not follow:
2138 if rev > maxrev:
2138 if rev > maxrev:
2139 continue
2139 continue
2140 else:
2140 else:
2141 # Note that last might not be the first interesting
2141 # Note that last might not be the first interesting
2142 # rev to us:
2142 # rev to us:
2143 # if the file has been changed after maxrev, we'll
2143 # if the file has been changed after maxrev, we'll
2144 # have linkrev(last) > maxrev, and we still need
2144 # have linkrev(last) > maxrev, and we still need
2145 # to explore the file graph
2145 # to explore the file graph
2146 if rev not in ancestors:
2146 if rev not in ancestors:
2147 continue
2147 continue
2148 # XXX insert 1327 fix here
2148 # XXX insert 1327 fix here
2149 if flparentlinkrevs:
2149 if flparentlinkrevs:
2150 ancestors.update(flparentlinkrevs)
2150 ancestors.update(flparentlinkrevs)
2151
2151
2152 fncache.setdefault(rev, []).append(file_)
2152 fncache.setdefault(rev, []).append(file_)
2153 wanted.add(rev)
2153 wanted.add(rev)
2154 if copied:
2154 if copied:
2155 copies.append(copied)
2155 copies.append(copied)
2156
2156
2157 return wanted
2157 return wanted
2158
2158
2159 class _followfilter(object):
2159 class _followfilter(object):
2160 def __init__(self, repo, onlyfirst=False):
2160 def __init__(self, repo, onlyfirst=False):
2161 self.repo = repo
2161 self.repo = repo
2162 self.startrev = nullrev
2162 self.startrev = nullrev
2163 self.roots = set()
2163 self.roots = set()
2164 self.onlyfirst = onlyfirst
2164 self.onlyfirst = onlyfirst
2165
2165
2166 def match(self, rev):
2166 def match(self, rev):
2167 def realparents(rev):
2167 def realparents(rev):
2168 if self.onlyfirst:
2168 if self.onlyfirst:
2169 return self.repo.changelog.parentrevs(rev)[0:1]
2169 return self.repo.changelog.parentrevs(rev)[0:1]
2170 else:
2170 else:
2171 return filter(lambda x: x != nullrev,
2171 return filter(lambda x: x != nullrev,
2172 self.repo.changelog.parentrevs(rev))
2172 self.repo.changelog.parentrevs(rev))
2173
2173
2174 if self.startrev == nullrev:
2174 if self.startrev == nullrev:
2175 self.startrev = rev
2175 self.startrev = rev
2176 return True
2176 return True
2177
2177
2178 if rev > self.startrev:
2178 if rev > self.startrev:
2179 # forward: all descendants
2179 # forward: all descendants
2180 if not self.roots:
2180 if not self.roots:
2181 self.roots.add(self.startrev)
2181 self.roots.add(self.startrev)
2182 for parent in realparents(rev):
2182 for parent in realparents(rev):
2183 if parent in self.roots:
2183 if parent in self.roots:
2184 self.roots.add(rev)
2184 self.roots.add(rev)
2185 return True
2185 return True
2186 else:
2186 else:
2187 # backwards: all parents
2187 # backwards: all parents
2188 if not self.roots:
2188 if not self.roots:
2189 self.roots.update(realparents(self.startrev))
2189 self.roots.update(realparents(self.startrev))
2190 if rev in self.roots:
2190 if rev in self.roots:
2191 self.roots.remove(rev)
2191 self.roots.remove(rev)
2192 self.roots.update(realparents(rev))
2192 self.roots.update(realparents(rev))
2193 return True
2193 return True
2194
2194
2195 return False
2195 return False
2196
2196
2197 def walkchangerevs(repo, match, opts, prepare):
2197 def walkchangerevs(repo, match, opts, prepare):
2198 '''Iterate over files and the revs in which they changed.
2198 '''Iterate over files and the revs in which they changed.
2199
2199
2200 Callers most commonly need to iterate backwards over the history
2200 Callers most commonly need to iterate backwards over the history
2201 in which they are interested. Doing so has awful (quadratic-looking)
2201 in which they are interested. Doing so has awful (quadratic-looking)
2202 performance, so we use iterators in a "windowed" way.
2202 performance, so we use iterators in a "windowed" way.
2203
2203
2204 We walk a window of revisions in the desired order. Within the
2204 We walk a window of revisions in the desired order. Within the
2205 window, we first walk forwards to gather data, then in the desired
2205 window, we first walk forwards to gather data, then in the desired
2206 order (usually backwards) to display it.
2206 order (usually backwards) to display it.
2207
2207
2208 This function returns an iterator yielding contexts. Before
2208 This function returns an iterator yielding contexts. Before
2209 yielding each context, the iterator will first call the prepare
2209 yielding each context, the iterator will first call the prepare
2210 function on each context in the window in forward order.'''
2210 function on each context in the window in forward order.'''
2211
2211
2212 follow = opts.get('follow') or opts.get('follow_first')
2212 follow = opts.get('follow') or opts.get('follow_first')
2213 revs = _logrevs(repo, opts)
2213 revs = _logrevs(repo, opts)
2214 if not revs:
2214 if not revs:
2215 return []
2215 return []
2216 wanted = set()
2216 wanted = set()
2217 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2217 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2218 opts.get('removed'))
2218 opts.get('removed'))
2219 fncache = {}
2219 fncache = {}
2220 change = repo.changectx
2220 change = repo.changectx
2221
2221
2222 # First step is to fill wanted, the set of revisions that we want to yield.
2222 # First step is to fill wanted, the set of revisions that we want to yield.
2223 # When it does not induce extra cost, we also fill fncache for revisions in
2223 # When it does not induce extra cost, we also fill fncache for revisions in
2224 # wanted: a cache of filenames that were changed (ctx.files()) and that
2224 # wanted: a cache of filenames that were changed (ctx.files()) and that
2225 # match the file filtering conditions.
2225 # match the file filtering conditions.
2226
2226
2227 if match.always():
2227 if match.always():
2228 # No files, no patterns. Display all revs.
2228 # No files, no patterns. Display all revs.
2229 wanted = revs
2229 wanted = revs
2230 elif not slowpath:
2230 elif not slowpath:
2231 # We only have to read through the filelog to find wanted revisions
2231 # We only have to read through the filelog to find wanted revisions
2232
2232
2233 try:
2233 try:
2234 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2234 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2235 except FileWalkError:
2235 except FileWalkError:
2236 slowpath = True
2236 slowpath = True
2237
2237
2238 # We decided to fall back to the slowpath because at least one
2238 # We decided to fall back to the slowpath because at least one
2239 # of the paths was not a file. Check to see if at least one of them
2239 # of the paths was not a file. Check to see if at least one of them
2240 # existed in history, otherwise simply return
2240 # existed in history, otherwise simply return
2241 for path in match.files():
2241 for path in match.files():
2242 if path == '.' or path in repo.store:
2242 if path == '.' or path in repo.store:
2243 break
2243 break
2244 else:
2244 else:
2245 return []
2245 return []
2246
2246
2247 if slowpath:
2247 if slowpath:
2248 # We have to read the changelog to match filenames against
2248 # We have to read the changelog to match filenames against
2249 # changed files
2249 # changed files
2250
2250
2251 if follow:
2251 if follow:
2252 raise error.Abort(_('can only follow copies/renames for explicit '
2252 raise error.Abort(_('can only follow copies/renames for explicit '
2253 'filenames'))
2253 'filenames'))
2254
2254
2255 # The slow path checks files modified in every changeset.
2255 # The slow path checks files modified in every changeset.
2256 # This is really slow on large repos, so compute the set lazily.
2256 # This is really slow on large repos, so compute the set lazily.
2257 class lazywantedset(object):
2257 class lazywantedset(object):
2258 def __init__(self):
2258 def __init__(self):
2259 self.set = set()
2259 self.set = set()
2260 self.revs = set(revs)
2260 self.revs = set(revs)
2261
2261
2262 # No need to worry about locality here because it will be accessed
2262 # No need to worry about locality here because it will be accessed
2263 # in the same order as the increasing window below.
2263 # in the same order as the increasing window below.
2264 def __contains__(self, value):
2264 def __contains__(self, value):
2265 if value in self.set:
2265 if value in self.set:
2266 return True
2266 return True
2267 elif not value in self.revs:
2267 elif not value in self.revs:
2268 return False
2268 return False
2269 else:
2269 else:
2270 self.revs.discard(value)
2270 self.revs.discard(value)
2271 ctx = change(value)
2271 ctx = change(value)
2272 matches = filter(match, ctx.files())
2272 matches = filter(match, ctx.files())
2273 if matches:
2273 if matches:
2274 fncache[value] = matches
2274 fncache[value] = matches
2275 self.set.add(value)
2275 self.set.add(value)
2276 return True
2276 return True
2277 return False
2277 return False
2278
2278
2279 def discard(self, value):
2279 def discard(self, value):
2280 self.revs.discard(value)
2280 self.revs.discard(value)
2281 self.set.discard(value)
2281 self.set.discard(value)
2282
2282
2283 wanted = lazywantedset()
2283 wanted = lazywantedset()
2284
2284
2285 # it might be worthwhile to do this in the iterator if the rev range
2285 # it might be worthwhile to do this in the iterator if the rev range
2286 # is descending and the prune args are all within that range
2286 # is descending and the prune args are all within that range
2287 for rev in opts.get('prune', ()):
2287 for rev in opts.get('prune', ()):
2288 rev = repo[rev].rev()
2288 rev = repo[rev].rev()
2289 ff = _followfilter(repo)
2289 ff = _followfilter(repo)
2290 stop = min(revs[0], revs[-1])
2290 stop = min(revs[0], revs[-1])
2291 for x in xrange(rev, stop - 1, -1):
2291 for x in xrange(rev, stop - 1, -1):
2292 if ff.match(x):
2292 if ff.match(x):
2293 wanted = wanted - [x]
2293 wanted = wanted - [x]
2294
2294
2295 # Now that wanted is correctly initialized, we can iterate over the
2295 # Now that wanted is correctly initialized, we can iterate over the
2296 # revision range, yielding only revisions in wanted.
2296 # revision range, yielding only revisions in wanted.
2297 def iterate():
2297 def iterate():
2298 if follow and match.always():
2298 if follow and match.always():
2299 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2299 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2300 def want(rev):
2300 def want(rev):
2301 return ff.match(rev) and rev in wanted
2301 return ff.match(rev) and rev in wanted
2302 else:
2302 else:
2303 def want(rev):
2303 def want(rev):
2304 return rev in wanted
2304 return rev in wanted
2305
2305
2306 it = iter(revs)
2306 it = iter(revs)
2307 stopiteration = False
2307 stopiteration = False
2308 for windowsize in increasingwindows():
2308 for windowsize in increasingwindows():
2309 nrevs = []
2309 nrevs = []
2310 for i in xrange(windowsize):
2310 for i in xrange(windowsize):
2311 rev = next(it, None)
2311 rev = next(it, None)
2312 if rev is None:
2312 if rev is None:
2313 stopiteration = True
2313 stopiteration = True
2314 break
2314 break
2315 elif want(rev):
2315 elif want(rev):
2316 nrevs.append(rev)
2316 nrevs.append(rev)
2317 for rev in sorted(nrevs):
2317 for rev in sorted(nrevs):
2318 fns = fncache.get(rev)
2318 fns = fncache.get(rev)
2319 ctx = change(rev)
2319 ctx = change(rev)
2320 if not fns:
2320 if not fns:
2321 def fns_generator():
2321 def fns_generator():
2322 for f in ctx.files():
2322 for f in ctx.files():
2323 if match(f):
2323 if match(f):
2324 yield f
2324 yield f
2325 fns = fns_generator()
2325 fns = fns_generator()
2326 prepare(ctx, fns)
2326 prepare(ctx, fns)
2327 for rev in nrevs:
2327 for rev in nrevs:
2328 yield change(rev)
2328 yield change(rev)
2329
2329
2330 if stopiteration:
2330 if stopiteration:
2331 break
2331 break
2332
2332
2333 return iterate()
2333 return iterate()
2334
2334
2335 def _makefollowlogfilematcher(repo, files, followfirst):
2335 def _makefollowlogfilematcher(repo, files, followfirst):
2336 # When displaying a revision with --patch --follow FILE, we have
2336 # When displaying a revision with --patch --follow FILE, we have
2337 # to know which file of the revision must be diffed. With
2337 # to know which file of the revision must be diffed. With
2338 # --follow, we want the names of the ancestors of FILE in the
2338 # --follow, we want the names of the ancestors of FILE in the
2339 # revision, stored in "fcache". "fcache" is populated by
2339 # revision, stored in "fcache". "fcache" is populated by
2340 # reproducing the graph traversal already done by --follow revset
2340 # reproducing the graph traversal already done by --follow revset
2341 # and relating revs to file names (which is not "correct" but
2341 # and relating revs to file names (which is not "correct" but
2342 # good enough).
2342 # good enough).
2343 fcache = {}
2343 fcache = {}
2344 fcacheready = [False]
2344 fcacheready = [False]
2345 pctx = repo['.']
2345 pctx = repo['.']
2346
2346
2347 def populate():
2347 def populate():
2348 for fn in files:
2348 for fn in files:
2349 fctx = pctx[fn]
2349 fctx = pctx[fn]
2350 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2350 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2351 for c in fctx.ancestors(followfirst=followfirst):
2351 for c in fctx.ancestors(followfirst=followfirst):
2352 fcache.setdefault(c.rev(), set()).add(c.path())
2352 fcache.setdefault(c.rev(), set()).add(c.path())
2353
2353
2354 def filematcher(rev):
2354 def filematcher(rev):
2355 if not fcacheready[0]:
2355 if not fcacheready[0]:
2356 # Lazy initialization
2356 # Lazy initialization
2357 fcacheready[0] = True
2357 fcacheready[0] = True
2358 populate()
2358 populate()
2359 return scmutil.matchfiles(repo, fcache.get(rev, []))
2359 return scmutil.matchfiles(repo, fcache.get(rev, []))
2360
2360
2361 return filematcher
2361 return filematcher
2362
2362
2363 def _makenofollowlogfilematcher(repo, pats, opts):
2363 def _makenofollowlogfilematcher(repo, pats, opts):
2364 '''hook for extensions to override the filematcher for non-follow cases'''
2364 '''hook for extensions to override the filematcher for non-follow cases'''
2365 return None
2365 return None
2366
2366
2367 def _makelogrevset(repo, pats, opts, revs):
2367 def _makelogrevset(repo, pats, opts, revs):
2368 """Return (expr, filematcher) where expr is a revset string built
2368 """Return (expr, filematcher) where expr is a revset string built
2369 from log options and file patterns or None. If --stat or --patch
2369 from log options and file patterns or None. If --stat or --patch
2370 are not passed filematcher is None. Otherwise it is a callable
2370 are not passed filematcher is None. Otherwise it is a callable
2371 taking a revision number and returning a match objects filtering
2371 taking a revision number and returning a match objects filtering
2372 the files to be detailed when displaying the revision.
2372 the files to be detailed when displaying the revision.
2373 """
2373 """
2374 opt2revset = {
2374 opt2revset = {
2375 'no_merges': ('not merge()', None),
2375 'no_merges': ('not merge()', None),
2376 'only_merges': ('merge()', None),
2376 'only_merges': ('merge()', None),
2377 '_ancestors': ('ancestors(%(val)s)', None),
2377 '_ancestors': ('ancestors(%(val)s)', None),
2378 '_fancestors': ('_firstancestors(%(val)s)', None),
2378 '_fancestors': ('_firstancestors(%(val)s)', None),
2379 '_descendants': ('descendants(%(val)s)', None),
2379 '_descendants': ('descendants(%(val)s)', None),
2380 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2380 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2381 '_matchfiles': ('_matchfiles(%(val)s)', None),
2381 '_matchfiles': ('_matchfiles(%(val)s)', None),
2382 'date': ('date(%(val)r)', None),
2382 'date': ('date(%(val)r)', None),
2383 'branch': ('branch(%(val)r)', ' or '),
2383 'branch': ('branch(%(val)r)', ' or '),
2384 '_patslog': ('filelog(%(val)r)', ' or '),
2384 '_patslog': ('filelog(%(val)r)', ' or '),
2385 '_patsfollow': ('follow(%(val)r)', ' or '),
2385 '_patsfollow': ('follow(%(val)r)', ' or '),
2386 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2386 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2387 'keyword': ('keyword(%(val)r)', ' or '),
2387 'keyword': ('keyword(%(val)r)', ' or '),
2388 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2388 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2389 'user': ('user(%(val)r)', ' or '),
2389 'user': ('user(%(val)r)', ' or '),
2390 }
2390 }
2391
2391
2392 opts = dict(opts)
2392 opts = dict(opts)
2393 # follow or not follow?
2393 # follow or not follow?
2394 follow = opts.get('follow') or opts.get('follow_first')
2394 follow = opts.get('follow') or opts.get('follow_first')
2395 if opts.get('follow_first'):
2395 if opts.get('follow_first'):
2396 followfirst = 1
2396 followfirst = 1
2397 else:
2397 else:
2398 followfirst = 0
2398 followfirst = 0
2399 # --follow with FILE behavior depends on revs...
2399 # --follow with FILE behavior depends on revs...
2400 it = iter(revs)
2400 it = iter(revs)
2401 startrev = next(it)
2401 startrev = next(it)
2402 followdescendants = startrev < next(it, startrev)
2402 followdescendants = startrev < next(it, startrev)
2403
2403
2404 # branch and only_branch are really aliases and must be handled at
2404 # branch and only_branch are really aliases and must be handled at
2405 # the same time
2405 # the same time
2406 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2406 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2407 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2407 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2408 # pats/include/exclude are passed to match.match() directly in
2408 # pats/include/exclude are passed to match.match() directly in
2409 # _matchfiles() revset but walkchangerevs() builds its matcher with
2409 # _matchfiles() revset but walkchangerevs() builds its matcher with
2410 # scmutil.match(). The difference is input pats are globbed on
2410 # scmutil.match(). The difference is input pats are globbed on
2411 # platforms without shell expansion (windows).
2411 # platforms without shell expansion (windows).
2412 wctx = repo[None]
2412 wctx = repo[None]
2413 match, pats = scmutil.matchandpats(wctx, pats, opts)
2413 match, pats = scmutil.matchandpats(wctx, pats, opts)
2414 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2414 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2415 opts.get('removed'))
2415 opts.get('removed'))
2416 if not slowpath:
2416 if not slowpath:
2417 for f in match.files():
2417 for f in match.files():
2418 if follow and f not in wctx:
2418 if follow and f not in wctx:
2419 # If the file exists, it may be a directory, so let it
2419 # If the file exists, it may be a directory, so let it
2420 # take the slow path.
2420 # take the slow path.
2421 if os.path.exists(repo.wjoin(f)):
2421 if os.path.exists(repo.wjoin(f)):
2422 slowpath = True
2422 slowpath = True
2423 continue
2423 continue
2424 else:
2424 else:
2425 raise error.Abort(_('cannot follow file not in parent '
2425 raise error.Abort(_('cannot follow file not in parent '
2426 'revision: "%s"') % f)
2426 'revision: "%s"') % f)
2427 filelog = repo.file(f)
2427 filelog = repo.file(f)
2428 if not filelog:
2428 if not filelog:
2429 # A zero count may be a directory or deleted file, so
2429 # A zero count may be a directory or deleted file, so
2430 # try to find matching entries on the slow path.
2430 # try to find matching entries on the slow path.
2431 if follow:
2431 if follow:
2432 raise error.Abort(
2432 raise error.Abort(
2433 _('cannot follow nonexistent file: "%s"') % f)
2433 _('cannot follow nonexistent file: "%s"') % f)
2434 slowpath = True
2434 slowpath = True
2435
2435
2436 # We decided to fall back to the slowpath because at least one
2436 # We decided to fall back to the slowpath because at least one
2437 # of the paths was not a file. Check to see if at least one of them
2437 # of the paths was not a file. Check to see if at least one of them
2438 # existed in history - in that case, we'll continue down the
2438 # existed in history - in that case, we'll continue down the
2439 # slowpath; otherwise, we can turn off the slowpath
2439 # slowpath; otherwise, we can turn off the slowpath
2440 if slowpath:
2440 if slowpath:
2441 for path in match.files():
2441 for path in match.files():
2442 if path == '.' or path in repo.store:
2442 if path == '.' or path in repo.store:
2443 break
2443 break
2444 else:
2444 else:
2445 slowpath = False
2445 slowpath = False
2446
2446
2447 fpats = ('_patsfollow', '_patsfollowfirst')
2447 fpats = ('_patsfollow', '_patsfollowfirst')
2448 fnopats = (('_ancestors', '_fancestors'),
2448 fnopats = (('_ancestors', '_fancestors'),
2449 ('_descendants', '_fdescendants'))
2449 ('_descendants', '_fdescendants'))
2450 if slowpath:
2450 if slowpath:
2451 # See walkchangerevs() slow path.
2451 # See walkchangerevs() slow path.
2452 #
2452 #
2453 # pats/include/exclude cannot be represented as separate
2453 # pats/include/exclude cannot be represented as separate
2454 # revset expressions as their filtering logic applies at file
2454 # revset expressions as their filtering logic applies at file
2455 # level. For instance "-I a -X a" matches a revision touching
2455 # level. For instance "-I a -X a" matches a revision touching
2456 # "a" and "b" while "file(a) and not file(b)" does
2456 # "a" and "b" while "file(a) and not file(b)" does
2457 # not. Besides, filesets are evaluated against the working
2457 # not. Besides, filesets are evaluated against the working
2458 # directory.
2458 # directory.
2459 matchargs = ['r:', 'd:relpath']
2459 matchargs = ['r:', 'd:relpath']
2460 for p in pats:
2460 for p in pats:
2461 matchargs.append('p:' + p)
2461 matchargs.append('p:' + p)
2462 for p in opts.get('include', []):
2462 for p in opts.get('include', []):
2463 matchargs.append('i:' + p)
2463 matchargs.append('i:' + p)
2464 for p in opts.get('exclude', []):
2464 for p in opts.get('exclude', []):
2465 matchargs.append('x:' + p)
2465 matchargs.append('x:' + p)
2466 matchargs = ','.join(('%r' % p) for p in matchargs)
2466 matchargs = ','.join(('%r' % p) for p in matchargs)
2467 opts['_matchfiles'] = matchargs
2467 opts['_matchfiles'] = matchargs
2468 if follow:
2468 if follow:
2469 opts[fnopats[0][followfirst]] = '.'
2469 opts[fnopats[0][followfirst]] = '.'
2470 else:
2470 else:
2471 if follow:
2471 if follow:
2472 if pats:
2472 if pats:
2473 # follow() revset interprets its file argument as a
2473 # follow() revset interprets its file argument as a
2474 # manifest entry, so use match.files(), not pats.
2474 # manifest entry, so use match.files(), not pats.
2475 opts[fpats[followfirst]] = list(match.files())
2475 opts[fpats[followfirst]] = list(match.files())
2476 else:
2476 else:
2477 op = fnopats[followdescendants][followfirst]
2477 op = fnopats[followdescendants][followfirst]
2478 opts[op] = 'rev(%d)' % startrev
2478 opts[op] = 'rev(%d)' % startrev
2479 else:
2479 else:
2480 opts['_patslog'] = list(pats)
2480 opts['_patslog'] = list(pats)
2481
2481
2482 filematcher = None
2482 filematcher = None
2483 if opts.get('patch') or opts.get('stat'):
2483 if opts.get('patch') or opts.get('stat'):
2484 # When following files, track renames via a special matcher.
2484 # When following files, track renames via a special matcher.
2485 # If we're forced to take the slowpath it means we're following
2485 # If we're forced to take the slowpath it means we're following
2486 # at least one pattern/directory, so don't bother with rename tracking.
2486 # at least one pattern/directory, so don't bother with rename tracking.
2487 if follow and not match.always() and not slowpath:
2487 if follow and not match.always() and not slowpath:
2488 # _makefollowlogfilematcher expects its files argument to be
2488 # _makefollowlogfilematcher expects its files argument to be
2489 # relative to the repo root, so use match.files(), not pats.
2489 # relative to the repo root, so use match.files(), not pats.
2490 filematcher = _makefollowlogfilematcher(repo, match.files(),
2490 filematcher = _makefollowlogfilematcher(repo, match.files(),
2491 followfirst)
2491 followfirst)
2492 else:
2492 else:
2493 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2493 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2494 if filematcher is None:
2494 if filematcher is None:
2495 filematcher = lambda rev: match
2495 filematcher = lambda rev: match
2496
2496
2497 expr = []
2497 expr = []
2498 for op, val in sorted(opts.iteritems()):
2498 for op, val in sorted(opts.iteritems()):
2499 if not val:
2499 if not val:
2500 continue
2500 continue
2501 if op not in opt2revset:
2501 if op not in opt2revset:
2502 continue
2502 continue
2503 revop, andor = opt2revset[op]
2503 revop, andor = opt2revset[op]
2504 if '%(val)' not in revop:
2504 if '%(val)' not in revop:
2505 expr.append(revop)
2505 expr.append(revop)
2506 else:
2506 else:
2507 if not isinstance(val, list):
2507 if not isinstance(val, list):
2508 e = revop % {'val': val}
2508 e = revop % {'val': val}
2509 else:
2509 else:
2510 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2510 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2511 expr.append(e)
2511 expr.append(e)
2512
2512
2513 if expr:
2513 if expr:
2514 expr = '(' + ' and '.join(expr) + ')'
2514 expr = '(' + ' and '.join(expr) + ')'
2515 else:
2515 else:
2516 expr = None
2516 expr = None
2517 return expr, filematcher
2517 return expr, filematcher
2518
2518
2519 def _logrevs(repo, opts):
2519 def _logrevs(repo, opts):
2520 # Default --rev value depends on --follow but --follow behavior
2520 # Default --rev value depends on --follow but --follow behavior
2521 # depends on revisions resolved from --rev...
2521 # depends on revisions resolved from --rev...
2522 follow = opts.get('follow') or opts.get('follow_first')
2522 follow = opts.get('follow') or opts.get('follow_first')
2523 if opts.get('rev'):
2523 if opts.get('rev'):
2524 revs = scmutil.revrange(repo, opts['rev'])
2524 revs = scmutil.revrange(repo, opts['rev'])
2525 elif follow and repo.dirstate.p1() == nullid:
2525 elif follow and repo.dirstate.p1() == nullid:
2526 revs = smartset.baseset()
2526 revs = smartset.baseset()
2527 elif follow:
2527 elif follow:
2528 revs = repo.revs('reverse(:.)')
2528 revs = repo.revs('reverse(:.)')
2529 else:
2529 else:
2530 revs = smartset.spanset(repo)
2530 revs = smartset.spanset(repo)
2531 revs.reverse()
2531 revs.reverse()
2532 return revs
2532 return revs
2533
2533
2534 def getgraphlogrevs(repo, pats, opts):
2534 def getgraphlogrevs(repo, pats, opts):
2535 """Return (revs, expr, filematcher) where revs is an iterable of
2535 """Return (revs, expr, filematcher) where revs is an iterable of
2536 revision numbers, expr is a revset string built from log options
2536 revision numbers, expr is a revset string built from log options
2537 and file patterns or None, and used to filter 'revs'. If --stat or
2537 and file patterns or None, and used to filter 'revs'. If --stat or
2538 --patch are not passed filematcher is None. Otherwise it is a
2538 --patch are not passed filematcher is None. Otherwise it is a
2539 callable taking a revision number and returning a match objects
2539 callable taking a revision number and returning a match objects
2540 filtering the files to be detailed when displaying the revision.
2540 filtering the files to be detailed when displaying the revision.
2541 """
2541 """
2542 limit = loglimit(opts)
2542 limit = loglimit(opts)
2543 revs = _logrevs(repo, opts)
2543 revs = _logrevs(repo, opts)
2544 if not revs:
2544 if not revs:
2545 return smartset.baseset(), None, None
2545 return smartset.baseset(), None, None
2546 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2546 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2547 if opts.get('rev'):
2547 if opts.get('rev'):
2548 # User-specified revs might be unsorted, but don't sort before
2548 # User-specified revs might be unsorted, but don't sort before
2549 # _makelogrevset because it might depend on the order of revs
2549 # _makelogrevset because it might depend on the order of revs
2550 if not (revs.isdescending() or revs.istopo()):
2550 if not (revs.isdescending() or revs.istopo()):
2551 revs.sort(reverse=True)
2551 revs.sort(reverse=True)
2552 if expr:
2552 if expr:
2553 matcher = revset.match(repo.ui, expr)
2553 matcher = revset.match(repo.ui, expr)
2554 revs = matcher(repo, revs)
2554 revs = matcher(repo, revs)
2555 if limit is not None:
2555 if limit is not None:
2556 limitedrevs = []
2556 limitedrevs = []
2557 for idx, rev in enumerate(revs):
2557 for idx, rev in enumerate(revs):
2558 if idx >= limit:
2558 if idx >= limit:
2559 break
2559 break
2560 limitedrevs.append(rev)
2560 limitedrevs.append(rev)
2561 revs = smartset.baseset(limitedrevs)
2561 revs = smartset.baseset(limitedrevs)
2562
2562
2563 return revs, expr, filematcher
2563 return revs, expr, filematcher
2564
2564
2565 def getlogrevs(repo, pats, opts):
2565 def getlogrevs(repo, pats, opts):
2566 """Return (revs, expr, filematcher) where revs is an iterable of
2566 """Return (revs, expr, filematcher) where revs is an iterable of
2567 revision numbers, expr is a revset string built from log options
2567 revision numbers, expr is a revset string built from log options
2568 and file patterns or None, and used to filter 'revs'. If --stat or
2568 and file patterns or None, and used to filter 'revs'. If --stat or
2569 --patch are not passed filematcher is None. Otherwise it is a
2569 --patch are not passed filematcher is None. Otherwise it is a
2570 callable taking a revision number and returning a match objects
2570 callable taking a revision number and returning a match objects
2571 filtering the files to be detailed when displaying the revision.
2571 filtering the files to be detailed when displaying the revision.
2572 """
2572 """
2573 limit = loglimit(opts)
2573 limit = loglimit(opts)
2574 revs = _logrevs(repo, opts)
2574 revs = _logrevs(repo, opts)
2575 if not revs:
2575 if not revs:
2576 return smartset.baseset([]), None, None
2576 return smartset.baseset([]), None, None
2577 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2577 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2578 if expr:
2578 if expr:
2579 matcher = revset.match(repo.ui, expr)
2579 matcher = revset.match(repo.ui, expr)
2580 revs = matcher(repo, revs)
2580 revs = matcher(repo, revs)
2581 if limit is not None:
2581 if limit is not None:
2582 limitedrevs = []
2582 limitedrevs = []
2583 for idx, r in enumerate(revs):
2583 for idx, r in enumerate(revs):
2584 if limit <= idx:
2584 if limit <= idx:
2585 break
2585 break
2586 limitedrevs.append(r)
2586 limitedrevs.append(r)
2587 revs = smartset.baseset(limitedrevs)
2587 revs = smartset.baseset(limitedrevs)
2588
2588
2589 return revs, expr, filematcher
2589 return revs, expr, filematcher
2590
2590
2591 def _graphnodeformatter(ui, displayer):
2591 def _graphnodeformatter(ui, displayer):
2592 spec = ui.config('ui', 'graphnodetemplate')
2592 spec = ui.config('ui', 'graphnodetemplate')
2593 if not spec:
2593 if not spec:
2594 return templatekw.showgraphnode # fast path for "{graphnode}"
2594 return templatekw.showgraphnode # fast path for "{graphnode}"
2595
2595
2596 spec = templater.unquotestring(spec)
2596 spec = templater.unquotestring(spec)
2597 templ = formatter.maketemplater(ui, spec)
2597 templ = formatter.maketemplater(ui, spec)
2598 cache = {}
2598 cache = {}
2599 if isinstance(displayer, changeset_templater):
2599 if isinstance(displayer, changeset_templater):
2600 cache = displayer.cache # reuse cache of slow templates
2600 cache = displayer.cache # reuse cache of slow templates
2601 props = templatekw.keywords.copy()
2601 props = templatekw.keywords.copy()
2602 props['templ'] = templ
2602 props['templ'] = templ
2603 props['cache'] = cache
2603 props['cache'] = cache
2604 def formatnode(repo, ctx):
2604 def formatnode(repo, ctx):
2605 props['ctx'] = ctx
2605 props['ctx'] = ctx
2606 props['repo'] = repo
2606 props['repo'] = repo
2607 props['ui'] = repo.ui
2607 props['ui'] = repo.ui
2608 props['revcache'] = {}
2608 props['revcache'] = {}
2609 return templ.render(props)
2609 return templ.render(props)
2610 return formatnode
2610 return formatnode
2611
2611
2612 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2612 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2613 filematcher=None, props=None):
2613 filematcher=None, props=None):
2614 props = props or {}
2614 props = props or {}
2615 formatnode = _graphnodeformatter(ui, displayer)
2615 formatnode = _graphnodeformatter(ui, displayer)
2616 state = graphmod.asciistate()
2616 state = graphmod.asciistate()
2617 styles = state['styles']
2617 styles = state['styles']
2618
2618
2619 # only set graph styling if HGPLAIN is not set.
2619 # only set graph styling if HGPLAIN is not set.
2620 if ui.plain('graph'):
2620 if ui.plain('graph'):
2621 # set all edge styles to |, the default pre-3.8 behaviour
2621 # set all edge styles to |, the default pre-3.8 behaviour
2622 styles.update(dict.fromkeys(styles, '|'))
2622 styles.update(dict.fromkeys(styles, '|'))
2623 else:
2623 else:
2624 edgetypes = {
2624 edgetypes = {
2625 'parent': graphmod.PARENT,
2625 'parent': graphmod.PARENT,
2626 'grandparent': graphmod.GRANDPARENT,
2626 'grandparent': graphmod.GRANDPARENT,
2627 'missing': graphmod.MISSINGPARENT
2627 'missing': graphmod.MISSINGPARENT
2628 }
2628 }
2629 for name, key in edgetypes.items():
2629 for name, key in edgetypes.items():
2630 # experimental config: experimental.graphstyle.*
2630 # experimental config: experimental.graphstyle.*
2631 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2631 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2632 styles[key])
2632 styles[key])
2633 if not styles[key]:
2633 if not styles[key]:
2634 styles[key] = None
2634 styles[key] = None
2635
2635
2636 # experimental config: experimental.graphshorten
2636 # experimental config: experimental.graphshorten
2637 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2637 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2638
2638
2639 for rev, type, ctx, parents in dag:
2639 for rev, type, ctx, parents in dag:
2640 char = formatnode(repo, ctx)
2640 char = formatnode(repo, ctx)
2641 copies = None
2641 copies = None
2642 if getrenamed and ctx.rev():
2642 if getrenamed and ctx.rev():
2643 copies = []
2643 copies = []
2644 for fn in ctx.files():
2644 for fn in ctx.files():
2645 rename = getrenamed(fn, ctx.rev())
2645 rename = getrenamed(fn, ctx.rev())
2646 if rename:
2646 if rename:
2647 copies.append((fn, rename[0]))
2647 copies.append((fn, rename[0]))
2648 revmatchfn = None
2648 revmatchfn = None
2649 if filematcher is not None:
2649 if filematcher is not None:
2650 revmatchfn = filematcher(ctx.rev())
2650 revmatchfn = filematcher(ctx.rev())
2651 edges = edgefn(type, char, state, rev, parents)
2651 edges = edgefn(type, char, state, rev, parents)
2652 firstedge = next(edges)
2652 firstedge = next(edges)
2653 width = firstedge[2]
2653 width = firstedge[2]
2654 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2654 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2655 _graphwidth=width, **props)
2655 _graphwidth=width, **props)
2656 lines = displayer.hunk.pop(rev).split('\n')
2656 lines = displayer.hunk.pop(rev).split('\n')
2657 if not lines[-1]:
2657 if not lines[-1]:
2658 del lines[-1]
2658 del lines[-1]
2659 displayer.flush(ctx)
2659 displayer.flush(ctx)
2660 for type, char, width, coldata in itertools.chain([firstedge], edges):
2660 for type, char, width, coldata in itertools.chain([firstedge], edges):
2661 graphmod.ascii(ui, state, type, char, lines, coldata)
2661 graphmod.ascii(ui, state, type, char, lines, coldata)
2662 lines = []
2662 lines = []
2663 displayer.close()
2663 displayer.close()
2664
2664
2665 def graphlog(ui, repo, pats, opts):
2665 def graphlog(ui, repo, pats, opts):
2666 # Parameters are identical to log command ones
2666 # Parameters are identical to log command ones
2667 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2667 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2668 revdag = graphmod.dagwalker(repo, revs)
2668 revdag = graphmod.dagwalker(repo, revs)
2669
2669
2670 getrenamed = None
2670 getrenamed = None
2671 if opts.get('copies'):
2671 if opts.get('copies'):
2672 endrev = None
2672 endrev = None
2673 if opts.get('rev'):
2673 if opts.get('rev'):
2674 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2674 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2675 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2675 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2676
2676
2677 ui.pager('log')
2677 ui.pager('log')
2678 displayer = show_changeset(ui, repo, opts, buffered=True)
2678 displayer = show_changeset(ui, repo, opts, buffered=True)
2679 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2679 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2680 filematcher)
2680 filematcher)
2681
2681
2682 def checkunsupportedgraphflags(pats, opts):
2682 def checkunsupportedgraphflags(pats, opts):
2683 for op in ["newest_first"]:
2683 for op in ["newest_first"]:
2684 if op in opts and opts[op]:
2684 if op in opts and opts[op]:
2685 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2685 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2686 % op.replace("_", "-"))
2686 % op.replace("_", "-"))
2687
2687
2688 def graphrevs(repo, nodes, opts):
2688 def graphrevs(repo, nodes, opts):
2689 limit = loglimit(opts)
2689 limit = loglimit(opts)
2690 nodes.reverse()
2690 nodes.reverse()
2691 if limit is not None:
2691 if limit is not None:
2692 nodes = nodes[:limit]
2692 nodes = nodes[:limit]
2693 return graphmod.nodes(repo, nodes)
2693 return graphmod.nodes(repo, nodes)
2694
2694
2695 def add(ui, repo, match, prefix, explicitonly, **opts):
2695 def add(ui, repo, match, prefix, explicitonly, **opts):
2696 join = lambda f: os.path.join(prefix, f)
2696 join = lambda f: os.path.join(prefix, f)
2697 bad = []
2697 bad = []
2698
2698
2699 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2699 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2700 names = []
2700 names = []
2701 wctx = repo[None]
2701 wctx = repo[None]
2702 cca = None
2702 cca = None
2703 abort, warn = scmutil.checkportabilityalert(ui)
2703 abort, warn = scmutil.checkportabilityalert(ui)
2704 if abort or warn:
2704 if abort or warn:
2705 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2705 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2706
2706
2707 badmatch = matchmod.badmatch(match, badfn)
2707 badmatch = matchmod.badmatch(match, badfn)
2708 dirstate = repo.dirstate
2708 dirstate = repo.dirstate
2709 # We don't want to just call wctx.walk here, since it would return a lot of
2709 # We don't want to just call wctx.walk here, since it would return a lot of
2710 # clean files, which we aren't interested in and takes time.
2710 # clean files, which we aren't interested in and takes time.
2711 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2711 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2712 True, False, full=False)):
2712 unknown=True, ignored=False, full=False)):
2713 exact = match.exact(f)
2713 exact = match.exact(f)
2714 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2714 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2715 if cca:
2715 if cca:
2716 cca(f)
2716 cca(f)
2717 names.append(f)
2717 names.append(f)
2718 if ui.verbose or not exact:
2718 if ui.verbose or not exact:
2719 ui.status(_('adding %s\n') % match.rel(f))
2719 ui.status(_('adding %s\n') % match.rel(f))
2720
2720
2721 for subpath in sorted(wctx.substate):
2721 for subpath in sorted(wctx.substate):
2722 sub = wctx.sub(subpath)
2722 sub = wctx.sub(subpath)
2723 try:
2723 try:
2724 submatch = matchmod.subdirmatcher(subpath, match)
2724 submatch = matchmod.subdirmatcher(subpath, match)
2725 if opts.get(r'subrepos'):
2725 if opts.get(r'subrepos'):
2726 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2726 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2727 else:
2727 else:
2728 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2728 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2729 except error.LookupError:
2729 except error.LookupError:
2730 ui.status(_("skipping missing subrepository: %s\n")
2730 ui.status(_("skipping missing subrepository: %s\n")
2731 % join(subpath))
2731 % join(subpath))
2732
2732
2733 if not opts.get(r'dry_run'):
2733 if not opts.get(r'dry_run'):
2734 rejected = wctx.add(names, prefix)
2734 rejected = wctx.add(names, prefix)
2735 bad.extend(f for f in rejected if f in match.files())
2735 bad.extend(f for f in rejected if f in match.files())
2736 return bad
2736 return bad
2737
2737
2738 def addwebdirpath(repo, serverpath, webconf):
2738 def addwebdirpath(repo, serverpath, webconf):
2739 webconf[serverpath] = repo.root
2739 webconf[serverpath] = repo.root
2740 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2740 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2741
2741
2742 for r in repo.revs('filelog("path:.hgsub")'):
2742 for r in repo.revs('filelog("path:.hgsub")'):
2743 ctx = repo[r]
2743 ctx = repo[r]
2744 for subpath in ctx.substate:
2744 for subpath in ctx.substate:
2745 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2745 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2746
2746
2747 def forget(ui, repo, match, prefix, explicitonly):
2747 def forget(ui, repo, match, prefix, explicitonly):
2748 join = lambda f: os.path.join(prefix, f)
2748 join = lambda f: os.path.join(prefix, f)
2749 bad = []
2749 bad = []
2750 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2750 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2751 wctx = repo[None]
2751 wctx = repo[None]
2752 forgot = []
2752 forgot = []
2753
2753
2754 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2754 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2755 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2755 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2756 if explicitonly:
2756 if explicitonly:
2757 forget = [f for f in forget if match.exact(f)]
2757 forget = [f for f in forget if match.exact(f)]
2758
2758
2759 for subpath in sorted(wctx.substate):
2759 for subpath in sorted(wctx.substate):
2760 sub = wctx.sub(subpath)
2760 sub = wctx.sub(subpath)
2761 try:
2761 try:
2762 submatch = matchmod.subdirmatcher(subpath, match)
2762 submatch = matchmod.subdirmatcher(subpath, match)
2763 subbad, subforgot = sub.forget(submatch, prefix)
2763 subbad, subforgot = sub.forget(submatch, prefix)
2764 bad.extend([subpath + '/' + f for f in subbad])
2764 bad.extend([subpath + '/' + f for f in subbad])
2765 forgot.extend([subpath + '/' + f for f in subforgot])
2765 forgot.extend([subpath + '/' + f for f in subforgot])
2766 except error.LookupError:
2766 except error.LookupError:
2767 ui.status(_("skipping missing subrepository: %s\n")
2767 ui.status(_("skipping missing subrepository: %s\n")
2768 % join(subpath))
2768 % join(subpath))
2769
2769
2770 if not explicitonly:
2770 if not explicitonly:
2771 for f in match.files():
2771 for f in match.files():
2772 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2772 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2773 if f not in forgot:
2773 if f not in forgot:
2774 if repo.wvfs.exists(f):
2774 if repo.wvfs.exists(f):
2775 # Don't complain if the exact case match wasn't given.
2775 # Don't complain if the exact case match wasn't given.
2776 # But don't do this until after checking 'forgot', so
2776 # But don't do this until after checking 'forgot', so
2777 # that subrepo files aren't normalized, and this op is
2777 # that subrepo files aren't normalized, and this op is
2778 # purely from data cached by the status walk above.
2778 # purely from data cached by the status walk above.
2779 if repo.dirstate.normalize(f) in repo.dirstate:
2779 if repo.dirstate.normalize(f) in repo.dirstate:
2780 continue
2780 continue
2781 ui.warn(_('not removing %s: '
2781 ui.warn(_('not removing %s: '
2782 'file is already untracked\n')
2782 'file is already untracked\n')
2783 % match.rel(f))
2783 % match.rel(f))
2784 bad.append(f)
2784 bad.append(f)
2785
2785
2786 for f in forget:
2786 for f in forget:
2787 if ui.verbose or not match.exact(f):
2787 if ui.verbose or not match.exact(f):
2788 ui.status(_('removing %s\n') % match.rel(f))
2788 ui.status(_('removing %s\n') % match.rel(f))
2789
2789
2790 rejected = wctx.forget(forget, prefix)
2790 rejected = wctx.forget(forget, prefix)
2791 bad.extend(f for f in rejected if f in match.files())
2791 bad.extend(f for f in rejected if f in match.files())
2792 forgot.extend(f for f in forget if f not in rejected)
2792 forgot.extend(f for f in forget if f not in rejected)
2793 return bad, forgot
2793 return bad, forgot
2794
2794
2795 def files(ui, ctx, m, fm, fmt, subrepos):
2795 def files(ui, ctx, m, fm, fmt, subrepos):
2796 rev = ctx.rev()
2796 rev = ctx.rev()
2797 ret = 1
2797 ret = 1
2798 ds = ctx.repo().dirstate
2798 ds = ctx.repo().dirstate
2799
2799
2800 for f in ctx.matches(m):
2800 for f in ctx.matches(m):
2801 if rev is None and ds[f] == 'r':
2801 if rev is None and ds[f] == 'r':
2802 continue
2802 continue
2803 fm.startitem()
2803 fm.startitem()
2804 if ui.verbose:
2804 if ui.verbose:
2805 fc = ctx[f]
2805 fc = ctx[f]
2806 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2806 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2807 fm.data(abspath=f)
2807 fm.data(abspath=f)
2808 fm.write('path', fmt, m.rel(f))
2808 fm.write('path', fmt, m.rel(f))
2809 ret = 0
2809 ret = 0
2810
2810
2811 for subpath in sorted(ctx.substate):
2811 for subpath in sorted(ctx.substate):
2812 submatch = matchmod.subdirmatcher(subpath, m)
2812 submatch = matchmod.subdirmatcher(subpath, m)
2813 if (subrepos or m.exact(subpath) or any(submatch.files())):
2813 if (subrepos or m.exact(subpath) or any(submatch.files())):
2814 sub = ctx.sub(subpath)
2814 sub = ctx.sub(subpath)
2815 try:
2815 try:
2816 recurse = m.exact(subpath) or subrepos
2816 recurse = m.exact(subpath) or subrepos
2817 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2817 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2818 ret = 0
2818 ret = 0
2819 except error.LookupError:
2819 except error.LookupError:
2820 ui.status(_("skipping missing subrepository: %s\n")
2820 ui.status(_("skipping missing subrepository: %s\n")
2821 % m.abs(subpath))
2821 % m.abs(subpath))
2822
2822
2823 return ret
2823 return ret
2824
2824
2825 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2825 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2826 join = lambda f: os.path.join(prefix, f)
2826 join = lambda f: os.path.join(prefix, f)
2827 ret = 0
2827 ret = 0
2828 s = repo.status(match=m, clean=True)
2828 s = repo.status(match=m, clean=True)
2829 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2829 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2830
2830
2831 wctx = repo[None]
2831 wctx = repo[None]
2832
2832
2833 if warnings is None:
2833 if warnings is None:
2834 warnings = []
2834 warnings = []
2835 warn = True
2835 warn = True
2836 else:
2836 else:
2837 warn = False
2837 warn = False
2838
2838
2839 subs = sorted(wctx.substate)
2839 subs = sorted(wctx.substate)
2840 total = len(subs)
2840 total = len(subs)
2841 count = 0
2841 count = 0
2842 for subpath in subs:
2842 for subpath in subs:
2843 count += 1
2843 count += 1
2844 submatch = matchmod.subdirmatcher(subpath, m)
2844 submatch = matchmod.subdirmatcher(subpath, m)
2845 if subrepos or m.exact(subpath) or any(submatch.files()):
2845 if subrepos or m.exact(subpath) or any(submatch.files()):
2846 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2846 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2847 sub = wctx.sub(subpath)
2847 sub = wctx.sub(subpath)
2848 try:
2848 try:
2849 if sub.removefiles(submatch, prefix, after, force, subrepos,
2849 if sub.removefiles(submatch, prefix, after, force, subrepos,
2850 warnings):
2850 warnings):
2851 ret = 1
2851 ret = 1
2852 except error.LookupError:
2852 except error.LookupError:
2853 warnings.append(_("skipping missing subrepository: %s\n")
2853 warnings.append(_("skipping missing subrepository: %s\n")
2854 % join(subpath))
2854 % join(subpath))
2855 ui.progress(_('searching'), None)
2855 ui.progress(_('searching'), None)
2856
2856
2857 # warn about failure to delete explicit files/dirs
2857 # warn about failure to delete explicit files/dirs
2858 deleteddirs = util.dirs(deleted)
2858 deleteddirs = util.dirs(deleted)
2859 files = m.files()
2859 files = m.files()
2860 total = len(files)
2860 total = len(files)
2861 count = 0
2861 count = 0
2862 for f in files:
2862 for f in files:
2863 def insubrepo():
2863 def insubrepo():
2864 for subpath in wctx.substate:
2864 for subpath in wctx.substate:
2865 if f.startswith(subpath + '/'):
2865 if f.startswith(subpath + '/'):
2866 return True
2866 return True
2867 return False
2867 return False
2868
2868
2869 count += 1
2869 count += 1
2870 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2870 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2871 isdir = f in deleteddirs or wctx.hasdir(f)
2871 isdir = f in deleteddirs or wctx.hasdir(f)
2872 if (f in repo.dirstate or isdir or f == '.'
2872 if (f in repo.dirstate or isdir or f == '.'
2873 or insubrepo() or f in subs):
2873 or insubrepo() or f in subs):
2874 continue
2874 continue
2875
2875
2876 if repo.wvfs.exists(f):
2876 if repo.wvfs.exists(f):
2877 if repo.wvfs.isdir(f):
2877 if repo.wvfs.isdir(f):
2878 warnings.append(_('not removing %s: no tracked files\n')
2878 warnings.append(_('not removing %s: no tracked files\n')
2879 % m.rel(f))
2879 % m.rel(f))
2880 else:
2880 else:
2881 warnings.append(_('not removing %s: file is untracked\n')
2881 warnings.append(_('not removing %s: file is untracked\n')
2882 % m.rel(f))
2882 % m.rel(f))
2883 # missing files will generate a warning elsewhere
2883 # missing files will generate a warning elsewhere
2884 ret = 1
2884 ret = 1
2885 ui.progress(_('deleting'), None)
2885 ui.progress(_('deleting'), None)
2886
2886
2887 if force:
2887 if force:
2888 list = modified + deleted + clean + added
2888 list = modified + deleted + clean + added
2889 elif after:
2889 elif after:
2890 list = deleted
2890 list = deleted
2891 remaining = modified + added + clean
2891 remaining = modified + added + clean
2892 total = len(remaining)
2892 total = len(remaining)
2893 count = 0
2893 count = 0
2894 for f in remaining:
2894 for f in remaining:
2895 count += 1
2895 count += 1
2896 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2896 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2897 warnings.append(_('not removing %s: file still exists\n')
2897 warnings.append(_('not removing %s: file still exists\n')
2898 % m.rel(f))
2898 % m.rel(f))
2899 ret = 1
2899 ret = 1
2900 ui.progress(_('skipping'), None)
2900 ui.progress(_('skipping'), None)
2901 else:
2901 else:
2902 list = deleted + clean
2902 list = deleted + clean
2903 total = len(modified) + len(added)
2903 total = len(modified) + len(added)
2904 count = 0
2904 count = 0
2905 for f in modified:
2905 for f in modified:
2906 count += 1
2906 count += 1
2907 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2907 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2908 warnings.append(_('not removing %s: file is modified (use -f'
2908 warnings.append(_('not removing %s: file is modified (use -f'
2909 ' to force removal)\n') % m.rel(f))
2909 ' to force removal)\n') % m.rel(f))
2910 ret = 1
2910 ret = 1
2911 for f in added:
2911 for f in added:
2912 count += 1
2912 count += 1
2913 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2913 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2914 warnings.append(_("not removing %s: file has been marked for add"
2914 warnings.append(_("not removing %s: file has been marked for add"
2915 " (use 'hg forget' to undo add)\n") % m.rel(f))
2915 " (use 'hg forget' to undo add)\n") % m.rel(f))
2916 ret = 1
2916 ret = 1
2917 ui.progress(_('skipping'), None)
2917 ui.progress(_('skipping'), None)
2918
2918
2919 list = sorted(list)
2919 list = sorted(list)
2920 total = len(list)
2920 total = len(list)
2921 count = 0
2921 count = 0
2922 for f in list:
2922 for f in list:
2923 count += 1
2923 count += 1
2924 if ui.verbose or not m.exact(f):
2924 if ui.verbose or not m.exact(f):
2925 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2925 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2926 ui.status(_('removing %s\n') % m.rel(f))
2926 ui.status(_('removing %s\n') % m.rel(f))
2927 ui.progress(_('deleting'), None)
2927 ui.progress(_('deleting'), None)
2928
2928
2929 with repo.wlock():
2929 with repo.wlock():
2930 if not after:
2930 if not after:
2931 for f in list:
2931 for f in list:
2932 if f in added:
2932 if f in added:
2933 continue # we never unlink added files on remove
2933 continue # we never unlink added files on remove
2934 repo.wvfs.unlinkpath(f, ignoremissing=True)
2934 repo.wvfs.unlinkpath(f, ignoremissing=True)
2935 repo[None].forget(list)
2935 repo[None].forget(list)
2936
2936
2937 if warn:
2937 if warn:
2938 for warning in warnings:
2938 for warning in warnings:
2939 ui.warn(warning)
2939 ui.warn(warning)
2940
2940
2941 return ret
2941 return ret
2942
2942
2943 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2943 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2944 err = 1
2944 err = 1
2945
2945
2946 def write(path):
2946 def write(path):
2947 filename = None
2947 filename = None
2948 if fntemplate:
2948 if fntemplate:
2949 filename = makefilename(repo, fntemplate, ctx.node(),
2949 filename = makefilename(repo, fntemplate, ctx.node(),
2950 pathname=os.path.join(prefix, path))
2950 pathname=os.path.join(prefix, path))
2951 with formatter.maybereopen(basefm, filename, opts) as fm:
2951 with formatter.maybereopen(basefm, filename, opts) as fm:
2952 data = ctx[path].data()
2952 data = ctx[path].data()
2953 if opts.get('decode'):
2953 if opts.get('decode'):
2954 data = repo.wwritedata(path, data)
2954 data = repo.wwritedata(path, data)
2955 fm.startitem()
2955 fm.startitem()
2956 fm.write('data', '%s', data)
2956 fm.write('data', '%s', data)
2957 fm.data(abspath=path, path=matcher.rel(path))
2957 fm.data(abspath=path, path=matcher.rel(path))
2958
2958
2959 # Automation often uses hg cat on single files, so special case it
2959 # Automation often uses hg cat on single files, so special case it
2960 # for performance to avoid the cost of parsing the manifest.
2960 # for performance to avoid the cost of parsing the manifest.
2961 if len(matcher.files()) == 1 and not matcher.anypats():
2961 if len(matcher.files()) == 1 and not matcher.anypats():
2962 file = matcher.files()[0]
2962 file = matcher.files()[0]
2963 mfl = repo.manifestlog
2963 mfl = repo.manifestlog
2964 mfnode = ctx.manifestnode()
2964 mfnode = ctx.manifestnode()
2965 try:
2965 try:
2966 if mfnode and mfl[mfnode].find(file)[0]:
2966 if mfnode and mfl[mfnode].find(file)[0]:
2967 write(file)
2967 write(file)
2968 return 0
2968 return 0
2969 except KeyError:
2969 except KeyError:
2970 pass
2970 pass
2971
2971
2972 for abs in ctx.walk(matcher):
2972 for abs in ctx.walk(matcher):
2973 write(abs)
2973 write(abs)
2974 err = 0
2974 err = 0
2975
2975
2976 for subpath in sorted(ctx.substate):
2976 for subpath in sorted(ctx.substate):
2977 sub = ctx.sub(subpath)
2977 sub = ctx.sub(subpath)
2978 try:
2978 try:
2979 submatch = matchmod.subdirmatcher(subpath, matcher)
2979 submatch = matchmod.subdirmatcher(subpath, matcher)
2980
2980
2981 if not sub.cat(submatch, basefm, fntemplate,
2981 if not sub.cat(submatch, basefm, fntemplate,
2982 os.path.join(prefix, sub._path), **opts):
2982 os.path.join(prefix, sub._path), **opts):
2983 err = 0
2983 err = 0
2984 except error.RepoLookupError:
2984 except error.RepoLookupError:
2985 ui.status(_("skipping missing subrepository: %s\n")
2985 ui.status(_("skipping missing subrepository: %s\n")
2986 % os.path.join(prefix, subpath))
2986 % os.path.join(prefix, subpath))
2987
2987
2988 return err
2988 return err
2989
2989
2990 def commit(ui, repo, commitfunc, pats, opts):
2990 def commit(ui, repo, commitfunc, pats, opts):
2991 '''commit the specified files or all outstanding changes'''
2991 '''commit the specified files or all outstanding changes'''
2992 date = opts.get('date')
2992 date = opts.get('date')
2993 if date:
2993 if date:
2994 opts['date'] = util.parsedate(date)
2994 opts['date'] = util.parsedate(date)
2995 message = logmessage(ui, opts)
2995 message = logmessage(ui, opts)
2996 matcher = scmutil.match(repo[None], pats, opts)
2996 matcher = scmutil.match(repo[None], pats, opts)
2997
2997
2998 dsguard = None
2998 dsguard = None
2999 # extract addremove carefully -- this function can be called from a command
2999 # extract addremove carefully -- this function can be called from a command
3000 # that doesn't support addremove
3000 # that doesn't support addremove
3001 if opts.get('addremove'):
3001 if opts.get('addremove'):
3002 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3002 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3003 with dsguard or util.nullcontextmanager():
3003 with dsguard or util.nullcontextmanager():
3004 if dsguard:
3004 if dsguard:
3005 if scmutil.addremove(repo, matcher, "", opts) != 0:
3005 if scmutil.addremove(repo, matcher, "", opts) != 0:
3006 raise error.Abort(
3006 raise error.Abort(
3007 _("failed to mark all new/missing files as added/removed"))
3007 _("failed to mark all new/missing files as added/removed"))
3008
3008
3009 return commitfunc(ui, repo, message, matcher, opts)
3009 return commitfunc(ui, repo, message, matcher, opts)
3010
3010
3011 def samefile(f, ctx1, ctx2):
3011 def samefile(f, ctx1, ctx2):
3012 if f in ctx1.manifest():
3012 if f in ctx1.manifest():
3013 a = ctx1.filectx(f)
3013 a = ctx1.filectx(f)
3014 if f in ctx2.manifest():
3014 if f in ctx2.manifest():
3015 b = ctx2.filectx(f)
3015 b = ctx2.filectx(f)
3016 return (not a.cmp(b)
3016 return (not a.cmp(b)
3017 and a.flags() == b.flags())
3017 and a.flags() == b.flags())
3018 else:
3018 else:
3019 return False
3019 return False
3020 else:
3020 else:
3021 return f not in ctx2.manifest()
3021 return f not in ctx2.manifest()
3022
3022
3023 def amend(ui, repo, old, extra, pats, opts):
3023 def amend(ui, repo, old, extra, pats, opts):
3024 # avoid cycle context -> subrepo -> cmdutil
3024 # avoid cycle context -> subrepo -> cmdutil
3025 from . import context
3025 from . import context
3026
3026
3027 # amend will reuse the existing user if not specified, but the obsolete
3027 # amend will reuse the existing user if not specified, but the obsolete
3028 # marker creation requires that the current user's name is specified.
3028 # marker creation requires that the current user's name is specified.
3029 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3029 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3030 ui.username() # raise exception if username not set
3030 ui.username() # raise exception if username not set
3031
3031
3032 ui.note(_('amending changeset %s\n') % old)
3032 ui.note(_('amending changeset %s\n') % old)
3033 base = old.p1()
3033 base = old.p1()
3034
3034
3035 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3035 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3036 # Participating changesets:
3036 # Participating changesets:
3037 #
3037 #
3038 # wctx o - workingctx that contains changes from working copy
3038 # wctx o - workingctx that contains changes from working copy
3039 # | to go into amending commit
3039 # | to go into amending commit
3040 # |
3040 # |
3041 # old o - changeset to amend
3041 # old o - changeset to amend
3042 # |
3042 # |
3043 # base o - first parent of the changeset to amend
3043 # base o - first parent of the changeset to amend
3044 wctx = repo[None]
3044 wctx = repo[None]
3045
3045
3046 # Update extra dict from amended commit (e.g. to preserve graft
3046 # Update extra dict from amended commit (e.g. to preserve graft
3047 # source)
3047 # source)
3048 extra.update(old.extra())
3048 extra.update(old.extra())
3049
3049
3050 # Also update it from the from the wctx
3050 # Also update it from the from the wctx
3051 extra.update(wctx.extra())
3051 extra.update(wctx.extra())
3052
3052
3053 user = opts.get('user') or old.user()
3053 user = opts.get('user') or old.user()
3054 date = opts.get('date') or old.date()
3054 date = opts.get('date') or old.date()
3055
3055
3056 # Parse the date to allow comparison between date and old.date()
3056 # Parse the date to allow comparison between date and old.date()
3057 date = util.parsedate(date)
3057 date = util.parsedate(date)
3058
3058
3059 if len(old.parents()) > 1:
3059 if len(old.parents()) > 1:
3060 # ctx.files() isn't reliable for merges, so fall back to the
3060 # ctx.files() isn't reliable for merges, so fall back to the
3061 # slower repo.status() method
3061 # slower repo.status() method
3062 files = set([fn for st in repo.status(base, old)[:3]
3062 files = set([fn for st in repo.status(base, old)[:3]
3063 for fn in st])
3063 for fn in st])
3064 else:
3064 else:
3065 files = set(old.files())
3065 files = set(old.files())
3066
3066
3067 # add/remove the files to the working copy if the "addremove" option
3067 # add/remove the files to the working copy if the "addremove" option
3068 # was specified.
3068 # was specified.
3069 matcher = scmutil.match(wctx, pats, opts)
3069 matcher = scmutil.match(wctx, pats, opts)
3070 if (opts.get('addremove')
3070 if (opts.get('addremove')
3071 and scmutil.addremove(repo, matcher, "", opts)):
3071 and scmutil.addremove(repo, matcher, "", opts)):
3072 raise error.Abort(
3072 raise error.Abort(
3073 _("failed to mark all new/missing files as added/removed"))
3073 _("failed to mark all new/missing files as added/removed"))
3074
3074
3075 filestoamend = set(f for f in wctx.files() if matcher(f))
3075 filestoamend = set(f for f in wctx.files() if matcher(f))
3076
3076
3077 changes = (len(filestoamend) > 0)
3077 changes = (len(filestoamend) > 0)
3078 if changes:
3078 if changes:
3079 # Recompute copies (avoid recording a -> b -> a)
3079 # Recompute copies (avoid recording a -> b -> a)
3080 copied = copies.pathcopies(base, wctx, matcher)
3080 copied = copies.pathcopies(base, wctx, matcher)
3081 if old.p2:
3081 if old.p2:
3082 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3082 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3083
3083
3084 # Prune files which were reverted by the updates: if old
3084 # Prune files which were reverted by the updates: if old
3085 # introduced file X and the file was renamed in the working
3085 # introduced file X and the file was renamed in the working
3086 # copy, then those two files are the same and
3086 # copy, then those two files are the same and
3087 # we can discard X from our list of files. Likewise if X
3087 # we can discard X from our list of files. Likewise if X
3088 # was deleted, it's no longer relevant
3088 # was deleted, it's no longer relevant
3089 files.update(filestoamend)
3089 files.update(filestoamend)
3090 files = [f for f in files if not samefile(f, wctx, base)]
3090 files = [f for f in files if not samefile(f, wctx, base)]
3091
3091
3092 def filectxfn(repo, ctx_, path):
3092 def filectxfn(repo, ctx_, path):
3093 try:
3093 try:
3094 # If the file being considered is not amongst the files
3094 # If the file being considered is not amongst the files
3095 # to be amended, we should return the file context from the
3095 # to be amended, we should return the file context from the
3096 # old changeset. This avoids issues when only some files in
3096 # old changeset. This avoids issues when only some files in
3097 # the working copy are being amended but there are also
3097 # the working copy are being amended but there are also
3098 # changes to other files from the old changeset.
3098 # changes to other files from the old changeset.
3099 if path not in filestoamend:
3099 if path not in filestoamend:
3100 return old.filectx(path)
3100 return old.filectx(path)
3101
3101
3102 fctx = wctx[path]
3102 fctx = wctx[path]
3103
3103
3104 # Return None for removed files.
3104 # Return None for removed files.
3105 if not fctx.exists():
3105 if not fctx.exists():
3106 return None
3106 return None
3107
3107
3108 flags = fctx.flags()
3108 flags = fctx.flags()
3109 mctx = context.memfilectx(repo,
3109 mctx = context.memfilectx(repo,
3110 fctx.path(), fctx.data(),
3110 fctx.path(), fctx.data(),
3111 islink='l' in flags,
3111 islink='l' in flags,
3112 isexec='x' in flags,
3112 isexec='x' in flags,
3113 copied=copied.get(path))
3113 copied=copied.get(path))
3114 return mctx
3114 return mctx
3115 except KeyError:
3115 except KeyError:
3116 return None
3116 return None
3117 else:
3117 else:
3118 ui.note(_('copying changeset %s to %s\n') % (old, base))
3118 ui.note(_('copying changeset %s to %s\n') % (old, base))
3119
3119
3120 # Use version of files as in the old cset
3120 # Use version of files as in the old cset
3121 def filectxfn(repo, ctx_, path):
3121 def filectxfn(repo, ctx_, path):
3122 try:
3122 try:
3123 return old.filectx(path)
3123 return old.filectx(path)
3124 except KeyError:
3124 except KeyError:
3125 return None
3125 return None
3126
3126
3127 # See if we got a message from -m or -l, if not, open the editor with
3127 # See if we got a message from -m or -l, if not, open the editor with
3128 # the message of the changeset to amend.
3128 # the message of the changeset to amend.
3129 message = logmessage(ui, opts)
3129 message = logmessage(ui, opts)
3130
3130
3131 editform = mergeeditform(old, 'commit.amend')
3131 editform = mergeeditform(old, 'commit.amend')
3132 editor = getcommiteditor(editform=editform,
3132 editor = getcommiteditor(editform=editform,
3133 **pycompat.strkwargs(opts))
3133 **pycompat.strkwargs(opts))
3134
3134
3135 if not message:
3135 if not message:
3136 editor = getcommiteditor(edit=True, editform=editform)
3136 editor = getcommiteditor(edit=True, editform=editform)
3137 message = old.description()
3137 message = old.description()
3138
3138
3139 pureextra = extra.copy()
3139 pureextra = extra.copy()
3140 extra['amend_source'] = old.hex()
3140 extra['amend_source'] = old.hex()
3141
3141
3142 new = context.memctx(repo,
3142 new = context.memctx(repo,
3143 parents=[base.node(), old.p2().node()],
3143 parents=[base.node(), old.p2().node()],
3144 text=message,
3144 text=message,
3145 files=files,
3145 files=files,
3146 filectxfn=filectxfn,
3146 filectxfn=filectxfn,
3147 user=user,
3147 user=user,
3148 date=date,
3148 date=date,
3149 extra=extra,
3149 extra=extra,
3150 editor=editor)
3150 editor=editor)
3151
3151
3152 newdesc = changelog.stripdesc(new.description())
3152 newdesc = changelog.stripdesc(new.description())
3153 if ((not changes)
3153 if ((not changes)
3154 and newdesc == old.description()
3154 and newdesc == old.description()
3155 and user == old.user()
3155 and user == old.user()
3156 and date == old.date()
3156 and date == old.date()
3157 and pureextra == old.extra()):
3157 and pureextra == old.extra()):
3158 # nothing changed. continuing here would create a new node
3158 # nothing changed. continuing here would create a new node
3159 # anyway because of the amend_source noise.
3159 # anyway because of the amend_source noise.
3160 #
3160 #
3161 # This not what we expect from amend.
3161 # This not what we expect from amend.
3162 return old.node()
3162 return old.node()
3163
3163
3164 if opts.get('secret'):
3164 if opts.get('secret'):
3165 commitphase = 'secret'
3165 commitphase = 'secret'
3166 else:
3166 else:
3167 commitphase = old.phase()
3167 commitphase = old.phase()
3168 overrides = {('phases', 'new-commit'): commitphase}
3168 overrides = {('phases', 'new-commit'): commitphase}
3169 with ui.configoverride(overrides, 'amend'):
3169 with ui.configoverride(overrides, 'amend'):
3170 newid = repo.commitctx(new)
3170 newid = repo.commitctx(new)
3171
3171
3172 # Reroute the working copy parent to the new changeset
3172 # Reroute the working copy parent to the new changeset
3173 repo.setparents(newid, nullid)
3173 repo.setparents(newid, nullid)
3174 mapping = {old.node(): (newid,)}
3174 mapping = {old.node(): (newid,)}
3175 scmutil.cleanupnodes(repo, mapping, 'amend')
3175 scmutil.cleanupnodes(repo, mapping, 'amend')
3176
3176
3177 # Fixing the dirstate because localrepo.commitctx does not update
3177 # Fixing the dirstate because localrepo.commitctx does not update
3178 # it. This is rather convenient because we did not need to update
3178 # it. This is rather convenient because we did not need to update
3179 # the dirstate for all the files in the new commit which commitctx
3179 # the dirstate for all the files in the new commit which commitctx
3180 # could have done if it updated the dirstate. Now, we can
3180 # could have done if it updated the dirstate. Now, we can
3181 # selectively update the dirstate only for the amended files.
3181 # selectively update the dirstate only for the amended files.
3182 dirstate = repo.dirstate
3182 dirstate = repo.dirstate
3183
3183
3184 # Update the state of the files which were added and
3184 # Update the state of the files which were added and
3185 # and modified in the amend to "normal" in the dirstate.
3185 # and modified in the amend to "normal" in the dirstate.
3186 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3186 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3187 for f in normalfiles:
3187 for f in normalfiles:
3188 dirstate.normal(f)
3188 dirstate.normal(f)
3189
3189
3190 # Update the state of files which were removed in the amend
3190 # Update the state of files which were removed in the amend
3191 # to "removed" in the dirstate.
3191 # to "removed" in the dirstate.
3192 removedfiles = set(wctx.removed()) & filestoamend
3192 removedfiles = set(wctx.removed()) & filestoamend
3193 for f in removedfiles:
3193 for f in removedfiles:
3194 dirstate.drop(f)
3194 dirstate.drop(f)
3195
3195
3196 return newid
3196 return newid
3197
3197
3198 def commiteditor(repo, ctx, subs, editform=''):
3198 def commiteditor(repo, ctx, subs, editform=''):
3199 if ctx.description():
3199 if ctx.description():
3200 return ctx.description()
3200 return ctx.description()
3201 return commitforceeditor(repo, ctx, subs, editform=editform,
3201 return commitforceeditor(repo, ctx, subs, editform=editform,
3202 unchangedmessagedetection=True)
3202 unchangedmessagedetection=True)
3203
3203
3204 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3204 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3205 editform='', unchangedmessagedetection=False):
3205 editform='', unchangedmessagedetection=False):
3206 if not extramsg:
3206 if not extramsg:
3207 extramsg = _("Leave message empty to abort commit.")
3207 extramsg = _("Leave message empty to abort commit.")
3208
3208
3209 forms = [e for e in editform.split('.') if e]
3209 forms = [e for e in editform.split('.') if e]
3210 forms.insert(0, 'changeset')
3210 forms.insert(0, 'changeset')
3211 templatetext = None
3211 templatetext = None
3212 while forms:
3212 while forms:
3213 ref = '.'.join(forms)
3213 ref = '.'.join(forms)
3214 if repo.ui.config('committemplate', ref):
3214 if repo.ui.config('committemplate', ref):
3215 templatetext = committext = buildcommittemplate(
3215 templatetext = committext = buildcommittemplate(
3216 repo, ctx, subs, extramsg, ref)
3216 repo, ctx, subs, extramsg, ref)
3217 break
3217 break
3218 forms.pop()
3218 forms.pop()
3219 else:
3219 else:
3220 committext = buildcommittext(repo, ctx, subs, extramsg)
3220 committext = buildcommittext(repo, ctx, subs, extramsg)
3221
3221
3222 # run editor in the repository root
3222 # run editor in the repository root
3223 olddir = pycompat.getcwd()
3223 olddir = pycompat.getcwd()
3224 os.chdir(repo.root)
3224 os.chdir(repo.root)
3225
3225
3226 # make in-memory changes visible to external process
3226 # make in-memory changes visible to external process
3227 tr = repo.currenttransaction()
3227 tr = repo.currenttransaction()
3228 repo.dirstate.write(tr)
3228 repo.dirstate.write(tr)
3229 pending = tr and tr.writepending() and repo.root
3229 pending = tr and tr.writepending() and repo.root
3230
3230
3231 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3231 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3232 editform=editform, pending=pending,
3232 editform=editform, pending=pending,
3233 repopath=repo.path, action='commit')
3233 repopath=repo.path, action='commit')
3234 text = editortext
3234 text = editortext
3235
3235
3236 # strip away anything below this special string (used for editors that want
3236 # strip away anything below this special string (used for editors that want
3237 # to display the diff)
3237 # to display the diff)
3238 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3238 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3239 if stripbelow:
3239 if stripbelow:
3240 text = text[:stripbelow.start()]
3240 text = text[:stripbelow.start()]
3241
3241
3242 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3242 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3243 os.chdir(olddir)
3243 os.chdir(olddir)
3244
3244
3245 if finishdesc:
3245 if finishdesc:
3246 text = finishdesc(text)
3246 text = finishdesc(text)
3247 if not text.strip():
3247 if not text.strip():
3248 raise error.Abort(_("empty commit message"))
3248 raise error.Abort(_("empty commit message"))
3249 if unchangedmessagedetection and editortext == templatetext:
3249 if unchangedmessagedetection and editortext == templatetext:
3250 raise error.Abort(_("commit message unchanged"))
3250 raise error.Abort(_("commit message unchanged"))
3251
3251
3252 return text
3252 return text
3253
3253
3254 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3254 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3255 ui = repo.ui
3255 ui = repo.ui
3256 spec = formatter.templatespec(ref, None, None)
3256 spec = formatter.templatespec(ref, None, None)
3257 t = changeset_templater(ui, repo, spec, None, {}, False)
3257 t = changeset_templater(ui, repo, spec, None, {}, False)
3258 t.t.cache.update((k, templater.unquotestring(v))
3258 t.t.cache.update((k, templater.unquotestring(v))
3259 for k, v in repo.ui.configitems('committemplate'))
3259 for k, v in repo.ui.configitems('committemplate'))
3260
3260
3261 if not extramsg:
3261 if not extramsg:
3262 extramsg = '' # ensure that extramsg is string
3262 extramsg = '' # ensure that extramsg is string
3263
3263
3264 ui.pushbuffer()
3264 ui.pushbuffer()
3265 t.show(ctx, extramsg=extramsg)
3265 t.show(ctx, extramsg=extramsg)
3266 return ui.popbuffer()
3266 return ui.popbuffer()
3267
3267
3268 def hgprefix(msg):
3268 def hgprefix(msg):
3269 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3269 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3270
3270
3271 def buildcommittext(repo, ctx, subs, extramsg):
3271 def buildcommittext(repo, ctx, subs, extramsg):
3272 edittext = []
3272 edittext = []
3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3274 if ctx.description():
3274 if ctx.description():
3275 edittext.append(ctx.description())
3275 edittext.append(ctx.description())
3276 edittext.append("")
3276 edittext.append("")
3277 edittext.append("") # Empty line between message and comments.
3277 edittext.append("") # Empty line between message and comments.
3278 edittext.append(hgprefix(_("Enter commit message."
3278 edittext.append(hgprefix(_("Enter commit message."
3279 " Lines beginning with 'HG:' are removed.")))
3279 " Lines beginning with 'HG:' are removed.")))
3280 edittext.append(hgprefix(extramsg))
3280 edittext.append(hgprefix(extramsg))
3281 edittext.append("HG: --")
3281 edittext.append("HG: --")
3282 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3282 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3283 if ctx.p2():
3283 if ctx.p2():
3284 edittext.append(hgprefix(_("branch merge")))
3284 edittext.append(hgprefix(_("branch merge")))
3285 if ctx.branch():
3285 if ctx.branch():
3286 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3286 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3287 if bookmarks.isactivewdirparent(repo):
3287 if bookmarks.isactivewdirparent(repo):
3288 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3288 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3289 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3289 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3290 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3290 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3291 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3291 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3292 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3292 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3293 if not added and not modified and not removed:
3293 if not added and not modified and not removed:
3294 edittext.append(hgprefix(_("no files changed")))
3294 edittext.append(hgprefix(_("no files changed")))
3295 edittext.append("")
3295 edittext.append("")
3296
3296
3297 return "\n".join(edittext)
3297 return "\n".join(edittext)
3298
3298
3299 def commitstatus(repo, node, branch, bheads=None, opts=None):
3299 def commitstatus(repo, node, branch, bheads=None, opts=None):
3300 if opts is None:
3300 if opts is None:
3301 opts = {}
3301 opts = {}
3302 ctx = repo[node]
3302 ctx = repo[node]
3303 parents = ctx.parents()
3303 parents = ctx.parents()
3304
3304
3305 if (not opts.get('amend') and bheads and node not in bheads and not
3305 if (not opts.get('amend') and bheads and node not in bheads and not
3306 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3306 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3307 repo.ui.status(_('created new head\n'))
3307 repo.ui.status(_('created new head\n'))
3308 # The message is not printed for initial roots. For the other
3308 # The message is not printed for initial roots. For the other
3309 # changesets, it is printed in the following situations:
3309 # changesets, it is printed in the following situations:
3310 #
3310 #
3311 # Par column: for the 2 parents with ...
3311 # Par column: for the 2 parents with ...
3312 # N: null or no parent
3312 # N: null or no parent
3313 # B: parent is on another named branch
3313 # B: parent is on another named branch
3314 # C: parent is a regular non head changeset
3314 # C: parent is a regular non head changeset
3315 # H: parent was a branch head of the current branch
3315 # H: parent was a branch head of the current branch
3316 # Msg column: whether we print "created new head" message
3316 # Msg column: whether we print "created new head" message
3317 # In the following, it is assumed that there already exists some
3317 # In the following, it is assumed that there already exists some
3318 # initial branch heads of the current branch, otherwise nothing is
3318 # initial branch heads of the current branch, otherwise nothing is
3319 # printed anyway.
3319 # printed anyway.
3320 #
3320 #
3321 # Par Msg Comment
3321 # Par Msg Comment
3322 # N N y additional topo root
3322 # N N y additional topo root
3323 #
3323 #
3324 # B N y additional branch root
3324 # B N y additional branch root
3325 # C N y additional topo head
3325 # C N y additional topo head
3326 # H N n usual case
3326 # H N n usual case
3327 #
3327 #
3328 # B B y weird additional branch root
3328 # B B y weird additional branch root
3329 # C B y branch merge
3329 # C B y branch merge
3330 # H B n merge with named branch
3330 # H B n merge with named branch
3331 #
3331 #
3332 # C C y additional head from merge
3332 # C C y additional head from merge
3333 # C H n merge with a head
3333 # C H n merge with a head
3334 #
3334 #
3335 # H H n head merge: head count decreases
3335 # H H n head merge: head count decreases
3336
3336
3337 if not opts.get('close_branch'):
3337 if not opts.get('close_branch'):
3338 for r in parents:
3338 for r in parents:
3339 if r.closesbranch() and r.branch() == branch:
3339 if r.closesbranch() and r.branch() == branch:
3340 repo.ui.status(_('reopening closed branch head %d\n') % r)
3340 repo.ui.status(_('reopening closed branch head %d\n') % r)
3341
3341
3342 if repo.ui.debugflag:
3342 if repo.ui.debugflag:
3343 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3343 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3344 elif repo.ui.verbose:
3344 elif repo.ui.verbose:
3345 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3345 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3346
3346
3347 def postcommitstatus(repo, pats, opts):
3347 def postcommitstatus(repo, pats, opts):
3348 return repo.status(match=scmutil.match(repo[None], pats, opts))
3348 return repo.status(match=scmutil.match(repo[None], pats, opts))
3349
3349
3350 def revert(ui, repo, ctx, parents, *pats, **opts):
3350 def revert(ui, repo, ctx, parents, *pats, **opts):
3351 parent, p2 = parents
3351 parent, p2 = parents
3352 node = ctx.node()
3352 node = ctx.node()
3353
3353
3354 mf = ctx.manifest()
3354 mf = ctx.manifest()
3355 if node == p2:
3355 if node == p2:
3356 parent = p2
3356 parent = p2
3357
3357
3358 # need all matching names in dirstate and manifest of target rev,
3358 # need all matching names in dirstate and manifest of target rev,
3359 # so have to walk both. do not print errors if files exist in one
3359 # so have to walk both. do not print errors if files exist in one
3360 # but not other. in both cases, filesets should be evaluated against
3360 # but not other. in both cases, filesets should be evaluated against
3361 # workingctx to get consistent result (issue4497). this means 'set:**'
3361 # workingctx to get consistent result (issue4497). this means 'set:**'
3362 # cannot be used to select missing files from target rev.
3362 # cannot be used to select missing files from target rev.
3363
3363
3364 # `names` is a mapping for all elements in working copy and target revision
3364 # `names` is a mapping for all elements in working copy and target revision
3365 # The mapping is in the form:
3365 # The mapping is in the form:
3366 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3366 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3367 names = {}
3367 names = {}
3368
3368
3369 with repo.wlock():
3369 with repo.wlock():
3370 ## filling of the `names` mapping
3370 ## filling of the `names` mapping
3371 # walk dirstate to fill `names`
3371 # walk dirstate to fill `names`
3372
3372
3373 interactive = opts.get('interactive', False)
3373 interactive = opts.get('interactive', False)
3374 wctx = repo[None]
3374 wctx = repo[None]
3375 m = scmutil.match(wctx, pats, opts)
3375 m = scmutil.match(wctx, pats, opts)
3376
3376
3377 # we'll need this later
3377 # we'll need this later
3378 targetsubs = sorted(s for s in wctx.substate if m(s))
3378 targetsubs = sorted(s for s in wctx.substate if m(s))
3379
3379
3380 if not m.always():
3380 if not m.always():
3381 matcher = matchmod.badmatch(m, lambda x, y: False)
3381 matcher = matchmod.badmatch(m, lambda x, y: False)
3382 for abs in wctx.walk(matcher):
3382 for abs in wctx.walk(matcher):
3383 names[abs] = m.rel(abs), m.exact(abs)
3383 names[abs] = m.rel(abs), m.exact(abs)
3384
3384
3385 # walk target manifest to fill `names`
3385 # walk target manifest to fill `names`
3386
3386
3387 def badfn(path, msg):
3387 def badfn(path, msg):
3388 if path in names:
3388 if path in names:
3389 return
3389 return
3390 if path in ctx.substate:
3390 if path in ctx.substate:
3391 return
3391 return
3392 path_ = path + '/'
3392 path_ = path + '/'
3393 for f in names:
3393 for f in names:
3394 if f.startswith(path_):
3394 if f.startswith(path_):
3395 return
3395 return
3396 ui.warn("%s: %s\n" % (m.rel(path), msg))
3396 ui.warn("%s: %s\n" % (m.rel(path), msg))
3397
3397
3398 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3398 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3399 if abs not in names:
3399 if abs not in names:
3400 names[abs] = m.rel(abs), m.exact(abs)
3400 names[abs] = m.rel(abs), m.exact(abs)
3401
3401
3402 # Find status of all file in `names`.
3402 # Find status of all file in `names`.
3403 m = scmutil.matchfiles(repo, names)
3403 m = scmutil.matchfiles(repo, names)
3404
3404
3405 changes = repo.status(node1=node, match=m,
3405 changes = repo.status(node1=node, match=m,
3406 unknown=True, ignored=True, clean=True)
3406 unknown=True, ignored=True, clean=True)
3407 else:
3407 else:
3408 changes = repo.status(node1=node, match=m)
3408 changes = repo.status(node1=node, match=m)
3409 for kind in changes:
3409 for kind in changes:
3410 for abs in kind:
3410 for abs in kind:
3411 names[abs] = m.rel(abs), m.exact(abs)
3411 names[abs] = m.rel(abs), m.exact(abs)
3412
3412
3413 m = scmutil.matchfiles(repo, names)
3413 m = scmutil.matchfiles(repo, names)
3414
3414
3415 modified = set(changes.modified)
3415 modified = set(changes.modified)
3416 added = set(changes.added)
3416 added = set(changes.added)
3417 removed = set(changes.removed)
3417 removed = set(changes.removed)
3418 _deleted = set(changes.deleted)
3418 _deleted = set(changes.deleted)
3419 unknown = set(changes.unknown)
3419 unknown = set(changes.unknown)
3420 unknown.update(changes.ignored)
3420 unknown.update(changes.ignored)
3421 clean = set(changes.clean)
3421 clean = set(changes.clean)
3422 modadded = set()
3422 modadded = set()
3423
3423
3424 # We need to account for the state of the file in the dirstate,
3424 # We need to account for the state of the file in the dirstate,
3425 # even when we revert against something else than parent. This will
3425 # even when we revert against something else than parent. This will
3426 # slightly alter the behavior of revert (doing back up or not, delete
3426 # slightly alter the behavior of revert (doing back up or not, delete
3427 # or just forget etc).
3427 # or just forget etc).
3428 if parent == node:
3428 if parent == node:
3429 dsmodified = modified
3429 dsmodified = modified
3430 dsadded = added
3430 dsadded = added
3431 dsremoved = removed
3431 dsremoved = removed
3432 # store all local modifications, useful later for rename detection
3432 # store all local modifications, useful later for rename detection
3433 localchanges = dsmodified | dsadded
3433 localchanges = dsmodified | dsadded
3434 modified, added, removed = set(), set(), set()
3434 modified, added, removed = set(), set(), set()
3435 else:
3435 else:
3436 changes = repo.status(node1=parent, match=m)
3436 changes = repo.status(node1=parent, match=m)
3437 dsmodified = set(changes.modified)
3437 dsmodified = set(changes.modified)
3438 dsadded = set(changes.added)
3438 dsadded = set(changes.added)
3439 dsremoved = set(changes.removed)
3439 dsremoved = set(changes.removed)
3440 # store all local modifications, useful later for rename detection
3440 # store all local modifications, useful later for rename detection
3441 localchanges = dsmodified | dsadded
3441 localchanges = dsmodified | dsadded
3442
3442
3443 # only take into account for removes between wc and target
3443 # only take into account for removes between wc and target
3444 clean |= dsremoved - removed
3444 clean |= dsremoved - removed
3445 dsremoved &= removed
3445 dsremoved &= removed
3446 # distinct between dirstate remove and other
3446 # distinct between dirstate remove and other
3447 removed -= dsremoved
3447 removed -= dsremoved
3448
3448
3449 modadded = added & dsmodified
3449 modadded = added & dsmodified
3450 added -= modadded
3450 added -= modadded
3451
3451
3452 # tell newly modified apart.
3452 # tell newly modified apart.
3453 dsmodified &= modified
3453 dsmodified &= modified
3454 dsmodified |= modified & dsadded # dirstate added may need backup
3454 dsmodified |= modified & dsadded # dirstate added may need backup
3455 modified -= dsmodified
3455 modified -= dsmodified
3456
3456
3457 # We need to wait for some post-processing to update this set
3457 # We need to wait for some post-processing to update this set
3458 # before making the distinction. The dirstate will be used for
3458 # before making the distinction. The dirstate will be used for
3459 # that purpose.
3459 # that purpose.
3460 dsadded = added
3460 dsadded = added
3461
3461
3462 # in case of merge, files that are actually added can be reported as
3462 # in case of merge, files that are actually added can be reported as
3463 # modified, we need to post process the result
3463 # modified, we need to post process the result
3464 if p2 != nullid:
3464 if p2 != nullid:
3465 mergeadd = set(dsmodified)
3465 mergeadd = set(dsmodified)
3466 for path in dsmodified:
3466 for path in dsmodified:
3467 if path in mf:
3467 if path in mf:
3468 mergeadd.remove(path)
3468 mergeadd.remove(path)
3469 dsadded |= mergeadd
3469 dsadded |= mergeadd
3470 dsmodified -= mergeadd
3470 dsmodified -= mergeadd
3471
3471
3472 # if f is a rename, update `names` to also revert the source
3472 # if f is a rename, update `names` to also revert the source
3473 cwd = repo.getcwd()
3473 cwd = repo.getcwd()
3474 for f in localchanges:
3474 for f in localchanges:
3475 src = repo.dirstate.copied(f)
3475 src = repo.dirstate.copied(f)
3476 # XXX should we check for rename down to target node?
3476 # XXX should we check for rename down to target node?
3477 if src and src not in names and repo.dirstate[src] == 'r':
3477 if src and src not in names and repo.dirstate[src] == 'r':
3478 dsremoved.add(src)
3478 dsremoved.add(src)
3479 names[src] = (repo.pathto(src, cwd), True)
3479 names[src] = (repo.pathto(src, cwd), True)
3480
3480
3481 # determine the exact nature of the deleted changesets
3481 # determine the exact nature of the deleted changesets
3482 deladded = set(_deleted)
3482 deladded = set(_deleted)
3483 for path in _deleted:
3483 for path in _deleted:
3484 if path in mf:
3484 if path in mf:
3485 deladded.remove(path)
3485 deladded.remove(path)
3486 deleted = _deleted - deladded
3486 deleted = _deleted - deladded
3487
3487
3488 # distinguish between file to forget and the other
3488 # distinguish between file to forget and the other
3489 added = set()
3489 added = set()
3490 for abs in dsadded:
3490 for abs in dsadded:
3491 if repo.dirstate[abs] != 'a':
3491 if repo.dirstate[abs] != 'a':
3492 added.add(abs)
3492 added.add(abs)
3493 dsadded -= added
3493 dsadded -= added
3494
3494
3495 for abs in deladded:
3495 for abs in deladded:
3496 if repo.dirstate[abs] == 'a':
3496 if repo.dirstate[abs] == 'a':
3497 dsadded.add(abs)
3497 dsadded.add(abs)
3498 deladded -= dsadded
3498 deladded -= dsadded
3499
3499
3500 # For files marked as removed, we check if an unknown file is present at
3500 # For files marked as removed, we check if an unknown file is present at
3501 # the same path. If a such file exists it may need to be backed up.
3501 # the same path. If a such file exists it may need to be backed up.
3502 # Making the distinction at this stage helps have simpler backup
3502 # Making the distinction at this stage helps have simpler backup
3503 # logic.
3503 # logic.
3504 removunk = set()
3504 removunk = set()
3505 for abs in removed:
3505 for abs in removed:
3506 target = repo.wjoin(abs)
3506 target = repo.wjoin(abs)
3507 if os.path.lexists(target):
3507 if os.path.lexists(target):
3508 removunk.add(abs)
3508 removunk.add(abs)
3509 removed -= removunk
3509 removed -= removunk
3510
3510
3511 dsremovunk = set()
3511 dsremovunk = set()
3512 for abs in dsremoved:
3512 for abs in dsremoved:
3513 target = repo.wjoin(abs)
3513 target = repo.wjoin(abs)
3514 if os.path.lexists(target):
3514 if os.path.lexists(target):
3515 dsremovunk.add(abs)
3515 dsremovunk.add(abs)
3516 dsremoved -= dsremovunk
3516 dsremoved -= dsremovunk
3517
3517
3518 # action to be actually performed by revert
3518 # action to be actually performed by revert
3519 # (<list of file>, message>) tuple
3519 # (<list of file>, message>) tuple
3520 actions = {'revert': ([], _('reverting %s\n')),
3520 actions = {'revert': ([], _('reverting %s\n')),
3521 'add': ([], _('adding %s\n')),
3521 'add': ([], _('adding %s\n')),
3522 'remove': ([], _('removing %s\n')),
3522 'remove': ([], _('removing %s\n')),
3523 'drop': ([], _('removing %s\n')),
3523 'drop': ([], _('removing %s\n')),
3524 'forget': ([], _('forgetting %s\n')),
3524 'forget': ([], _('forgetting %s\n')),
3525 'undelete': ([], _('undeleting %s\n')),
3525 'undelete': ([], _('undeleting %s\n')),
3526 'noop': (None, _('no changes needed to %s\n')),
3526 'noop': (None, _('no changes needed to %s\n')),
3527 'unknown': (None, _('file not managed: %s\n')),
3527 'unknown': (None, _('file not managed: %s\n')),
3528 }
3528 }
3529
3529
3530 # "constant" that convey the backup strategy.
3530 # "constant" that convey the backup strategy.
3531 # All set to `discard` if `no-backup` is set do avoid checking
3531 # All set to `discard` if `no-backup` is set do avoid checking
3532 # no_backup lower in the code.
3532 # no_backup lower in the code.
3533 # These values are ordered for comparison purposes
3533 # These values are ordered for comparison purposes
3534 backupinteractive = 3 # do backup if interactively modified
3534 backupinteractive = 3 # do backup if interactively modified
3535 backup = 2 # unconditionally do backup
3535 backup = 2 # unconditionally do backup
3536 check = 1 # check if the existing file differs from target
3536 check = 1 # check if the existing file differs from target
3537 discard = 0 # never do backup
3537 discard = 0 # never do backup
3538 if opts.get('no_backup'):
3538 if opts.get('no_backup'):
3539 backupinteractive = backup = check = discard
3539 backupinteractive = backup = check = discard
3540 if interactive:
3540 if interactive:
3541 dsmodifiedbackup = backupinteractive
3541 dsmodifiedbackup = backupinteractive
3542 else:
3542 else:
3543 dsmodifiedbackup = backup
3543 dsmodifiedbackup = backup
3544 tobackup = set()
3544 tobackup = set()
3545
3545
3546 backupanddel = actions['remove']
3546 backupanddel = actions['remove']
3547 if not opts.get('no_backup'):
3547 if not opts.get('no_backup'):
3548 backupanddel = actions['drop']
3548 backupanddel = actions['drop']
3549
3549
3550 disptable = (
3550 disptable = (
3551 # dispatch table:
3551 # dispatch table:
3552 # file state
3552 # file state
3553 # action
3553 # action
3554 # make backup
3554 # make backup
3555
3555
3556 ## Sets that results that will change file on disk
3556 ## Sets that results that will change file on disk
3557 # Modified compared to target, no local change
3557 # Modified compared to target, no local change
3558 (modified, actions['revert'], discard),
3558 (modified, actions['revert'], discard),
3559 # Modified compared to target, but local file is deleted
3559 # Modified compared to target, but local file is deleted
3560 (deleted, actions['revert'], discard),
3560 (deleted, actions['revert'], discard),
3561 # Modified compared to target, local change
3561 # Modified compared to target, local change
3562 (dsmodified, actions['revert'], dsmodifiedbackup),
3562 (dsmodified, actions['revert'], dsmodifiedbackup),
3563 # Added since target
3563 # Added since target
3564 (added, actions['remove'], discard),
3564 (added, actions['remove'], discard),
3565 # Added in working directory
3565 # Added in working directory
3566 (dsadded, actions['forget'], discard),
3566 (dsadded, actions['forget'], discard),
3567 # Added since target, have local modification
3567 # Added since target, have local modification
3568 (modadded, backupanddel, backup),
3568 (modadded, backupanddel, backup),
3569 # Added since target but file is missing in working directory
3569 # Added since target but file is missing in working directory
3570 (deladded, actions['drop'], discard),
3570 (deladded, actions['drop'], discard),
3571 # Removed since target, before working copy parent
3571 # Removed since target, before working copy parent
3572 (removed, actions['add'], discard),
3572 (removed, actions['add'], discard),
3573 # Same as `removed` but an unknown file exists at the same path
3573 # Same as `removed` but an unknown file exists at the same path
3574 (removunk, actions['add'], check),
3574 (removunk, actions['add'], check),
3575 # Removed since targe, marked as such in working copy parent
3575 # Removed since targe, marked as such in working copy parent
3576 (dsremoved, actions['undelete'], discard),
3576 (dsremoved, actions['undelete'], discard),
3577 # Same as `dsremoved` but an unknown file exists at the same path
3577 # Same as `dsremoved` but an unknown file exists at the same path
3578 (dsremovunk, actions['undelete'], check),
3578 (dsremovunk, actions['undelete'], check),
3579 ## the following sets does not result in any file changes
3579 ## the following sets does not result in any file changes
3580 # File with no modification
3580 # File with no modification
3581 (clean, actions['noop'], discard),
3581 (clean, actions['noop'], discard),
3582 # Existing file, not tracked anywhere
3582 # Existing file, not tracked anywhere
3583 (unknown, actions['unknown'], discard),
3583 (unknown, actions['unknown'], discard),
3584 )
3584 )
3585
3585
3586 for abs, (rel, exact) in sorted(names.items()):
3586 for abs, (rel, exact) in sorted(names.items()):
3587 # target file to be touch on disk (relative to cwd)
3587 # target file to be touch on disk (relative to cwd)
3588 target = repo.wjoin(abs)
3588 target = repo.wjoin(abs)
3589 # search the entry in the dispatch table.
3589 # search the entry in the dispatch table.
3590 # if the file is in any of these sets, it was touched in the working
3590 # if the file is in any of these sets, it was touched in the working
3591 # directory parent and we are sure it needs to be reverted.
3591 # directory parent and we are sure it needs to be reverted.
3592 for table, (xlist, msg), dobackup in disptable:
3592 for table, (xlist, msg), dobackup in disptable:
3593 if abs not in table:
3593 if abs not in table:
3594 continue
3594 continue
3595 if xlist is not None:
3595 if xlist is not None:
3596 xlist.append(abs)
3596 xlist.append(abs)
3597 if dobackup:
3597 if dobackup:
3598 # If in interactive mode, don't automatically create
3598 # If in interactive mode, don't automatically create
3599 # .orig files (issue4793)
3599 # .orig files (issue4793)
3600 if dobackup == backupinteractive:
3600 if dobackup == backupinteractive:
3601 tobackup.add(abs)
3601 tobackup.add(abs)
3602 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3602 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3603 bakname = scmutil.origpath(ui, repo, rel)
3603 bakname = scmutil.origpath(ui, repo, rel)
3604 ui.note(_('saving current version of %s as %s\n') %
3604 ui.note(_('saving current version of %s as %s\n') %
3605 (rel, bakname))
3605 (rel, bakname))
3606 if not opts.get('dry_run'):
3606 if not opts.get('dry_run'):
3607 if interactive:
3607 if interactive:
3608 util.copyfile(target, bakname)
3608 util.copyfile(target, bakname)
3609 else:
3609 else:
3610 util.rename(target, bakname)
3610 util.rename(target, bakname)
3611 if ui.verbose or not exact:
3611 if ui.verbose or not exact:
3612 if not isinstance(msg, basestring):
3612 if not isinstance(msg, basestring):
3613 msg = msg(abs)
3613 msg = msg(abs)
3614 ui.status(msg % rel)
3614 ui.status(msg % rel)
3615 elif exact:
3615 elif exact:
3616 ui.warn(msg % rel)
3616 ui.warn(msg % rel)
3617 break
3617 break
3618
3618
3619 if not opts.get('dry_run'):
3619 if not opts.get('dry_run'):
3620 needdata = ('revert', 'add', 'undelete')
3620 needdata = ('revert', 'add', 'undelete')
3621 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3621 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3622 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3622 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3623
3623
3624 if targetsubs:
3624 if targetsubs:
3625 # Revert the subrepos on the revert list
3625 # Revert the subrepos on the revert list
3626 for sub in targetsubs:
3626 for sub in targetsubs:
3627 try:
3627 try:
3628 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3628 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3629 except KeyError:
3629 except KeyError:
3630 raise error.Abort("subrepository '%s' does not exist in %s!"
3630 raise error.Abort("subrepository '%s' does not exist in %s!"
3631 % (sub, short(ctx.node())))
3631 % (sub, short(ctx.node())))
3632
3632
3633 def _revertprefetch(repo, ctx, *files):
3633 def _revertprefetch(repo, ctx, *files):
3634 """Let extension changing the storage layer prefetch content"""
3634 """Let extension changing the storage layer prefetch content"""
3635 pass
3635 pass
3636
3636
3637 def _performrevert(repo, parents, ctx, actions, interactive=False,
3637 def _performrevert(repo, parents, ctx, actions, interactive=False,
3638 tobackup=None):
3638 tobackup=None):
3639 """function that actually perform all the actions computed for revert
3639 """function that actually perform all the actions computed for revert
3640
3640
3641 This is an independent function to let extension to plug in and react to
3641 This is an independent function to let extension to plug in and react to
3642 the imminent revert.
3642 the imminent revert.
3643
3643
3644 Make sure you have the working directory locked when calling this function.
3644 Make sure you have the working directory locked when calling this function.
3645 """
3645 """
3646 parent, p2 = parents
3646 parent, p2 = parents
3647 node = ctx.node()
3647 node = ctx.node()
3648 excluded_files = []
3648 excluded_files = []
3649 matcher_opts = {"exclude": excluded_files}
3649 matcher_opts = {"exclude": excluded_files}
3650
3650
3651 def checkout(f):
3651 def checkout(f):
3652 fc = ctx[f]
3652 fc = ctx[f]
3653 repo.wwrite(f, fc.data(), fc.flags())
3653 repo.wwrite(f, fc.data(), fc.flags())
3654
3654
3655 def doremove(f):
3655 def doremove(f):
3656 try:
3656 try:
3657 repo.wvfs.unlinkpath(f)
3657 repo.wvfs.unlinkpath(f)
3658 except OSError:
3658 except OSError:
3659 pass
3659 pass
3660 repo.dirstate.remove(f)
3660 repo.dirstate.remove(f)
3661
3661
3662 audit_path = pathutil.pathauditor(repo.root, cached=True)
3662 audit_path = pathutil.pathauditor(repo.root, cached=True)
3663 for f in actions['forget'][0]:
3663 for f in actions['forget'][0]:
3664 if interactive:
3664 if interactive:
3665 choice = repo.ui.promptchoice(
3665 choice = repo.ui.promptchoice(
3666 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3666 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3667 if choice == 0:
3667 if choice == 0:
3668 repo.dirstate.drop(f)
3668 repo.dirstate.drop(f)
3669 else:
3669 else:
3670 excluded_files.append(repo.wjoin(f))
3670 excluded_files.append(repo.wjoin(f))
3671 else:
3671 else:
3672 repo.dirstate.drop(f)
3672 repo.dirstate.drop(f)
3673 for f in actions['remove'][0]:
3673 for f in actions['remove'][0]:
3674 audit_path(f)
3674 audit_path(f)
3675 if interactive:
3675 if interactive:
3676 choice = repo.ui.promptchoice(
3676 choice = repo.ui.promptchoice(
3677 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3677 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3678 if choice == 0:
3678 if choice == 0:
3679 doremove(f)
3679 doremove(f)
3680 else:
3680 else:
3681 excluded_files.append(repo.wjoin(f))
3681 excluded_files.append(repo.wjoin(f))
3682 else:
3682 else:
3683 doremove(f)
3683 doremove(f)
3684 for f in actions['drop'][0]:
3684 for f in actions['drop'][0]:
3685 audit_path(f)
3685 audit_path(f)
3686 repo.dirstate.remove(f)
3686 repo.dirstate.remove(f)
3687
3687
3688 normal = None
3688 normal = None
3689 if node == parent:
3689 if node == parent:
3690 # We're reverting to our parent. If possible, we'd like status
3690 # We're reverting to our parent. If possible, we'd like status
3691 # to report the file as clean. We have to use normallookup for
3691 # to report the file as clean. We have to use normallookup for
3692 # merges to avoid losing information about merged/dirty files.
3692 # merges to avoid losing information about merged/dirty files.
3693 if p2 != nullid:
3693 if p2 != nullid:
3694 normal = repo.dirstate.normallookup
3694 normal = repo.dirstate.normallookup
3695 else:
3695 else:
3696 normal = repo.dirstate.normal
3696 normal = repo.dirstate.normal
3697
3697
3698 newlyaddedandmodifiedfiles = set()
3698 newlyaddedandmodifiedfiles = set()
3699 if interactive:
3699 if interactive:
3700 # Prompt the user for changes to revert
3700 # Prompt the user for changes to revert
3701 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3701 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3702 m = scmutil.match(ctx, torevert, matcher_opts)
3702 m = scmutil.match(ctx, torevert, matcher_opts)
3703 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3703 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3704 diffopts.nodates = True
3704 diffopts.nodates = True
3705 diffopts.git = True
3705 diffopts.git = True
3706 operation = 'discard'
3706 operation = 'discard'
3707 reversehunks = True
3707 reversehunks = True
3708 if node != parent:
3708 if node != parent:
3709 operation = 'revert'
3709 operation = 'revert'
3710 reversehunks = repo.ui.configbool('experimental',
3710 reversehunks = repo.ui.configbool('experimental',
3711 'revertalternateinteractivemode')
3711 'revertalternateinteractivemode')
3712 if reversehunks:
3712 if reversehunks:
3713 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3713 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3714 else:
3714 else:
3715 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3715 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3716 originalchunks = patch.parsepatch(diff)
3716 originalchunks = patch.parsepatch(diff)
3717
3717
3718 try:
3718 try:
3719
3719
3720 chunks, opts = recordfilter(repo.ui, originalchunks,
3720 chunks, opts = recordfilter(repo.ui, originalchunks,
3721 operation=operation)
3721 operation=operation)
3722 if reversehunks:
3722 if reversehunks:
3723 chunks = patch.reversehunks(chunks)
3723 chunks = patch.reversehunks(chunks)
3724
3724
3725 except error.PatchError as err:
3725 except error.PatchError as err:
3726 raise error.Abort(_('error parsing patch: %s') % err)
3726 raise error.Abort(_('error parsing patch: %s') % err)
3727
3727
3728 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3728 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3729 if tobackup is None:
3729 if tobackup is None:
3730 tobackup = set()
3730 tobackup = set()
3731 # Apply changes
3731 # Apply changes
3732 fp = stringio()
3732 fp = stringio()
3733 for c in chunks:
3733 for c in chunks:
3734 # Create a backup file only if this hunk should be backed up
3734 # Create a backup file only if this hunk should be backed up
3735 if ishunk(c) and c.header.filename() in tobackup:
3735 if ishunk(c) and c.header.filename() in tobackup:
3736 abs = c.header.filename()
3736 abs = c.header.filename()
3737 target = repo.wjoin(abs)
3737 target = repo.wjoin(abs)
3738 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3738 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3739 util.copyfile(target, bakname)
3739 util.copyfile(target, bakname)
3740 tobackup.remove(abs)
3740 tobackup.remove(abs)
3741 c.write(fp)
3741 c.write(fp)
3742 dopatch = fp.tell()
3742 dopatch = fp.tell()
3743 fp.seek(0)
3743 fp.seek(0)
3744 if dopatch:
3744 if dopatch:
3745 try:
3745 try:
3746 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3746 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3747 except error.PatchError as err:
3747 except error.PatchError as err:
3748 raise error.Abort(str(err))
3748 raise error.Abort(str(err))
3749 del fp
3749 del fp
3750 else:
3750 else:
3751 for f in actions['revert'][0]:
3751 for f in actions['revert'][0]:
3752 checkout(f)
3752 checkout(f)
3753 if normal:
3753 if normal:
3754 normal(f)
3754 normal(f)
3755
3755
3756 for f in actions['add'][0]:
3756 for f in actions['add'][0]:
3757 # Don't checkout modified files, they are already created by the diff
3757 # Don't checkout modified files, they are already created by the diff
3758 if f not in newlyaddedandmodifiedfiles:
3758 if f not in newlyaddedandmodifiedfiles:
3759 checkout(f)
3759 checkout(f)
3760 repo.dirstate.add(f)
3760 repo.dirstate.add(f)
3761
3761
3762 normal = repo.dirstate.normallookup
3762 normal = repo.dirstate.normallookup
3763 if node == parent and p2 == nullid:
3763 if node == parent and p2 == nullid:
3764 normal = repo.dirstate.normal
3764 normal = repo.dirstate.normal
3765 for f in actions['undelete'][0]:
3765 for f in actions['undelete'][0]:
3766 checkout(f)
3766 checkout(f)
3767 normal(f)
3767 normal(f)
3768
3768
3769 copied = copies.pathcopies(repo[parent], ctx)
3769 copied = copies.pathcopies(repo[parent], ctx)
3770
3770
3771 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3771 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3772 if f in copied:
3772 if f in copied:
3773 repo.dirstate.copy(copied[f], f)
3773 repo.dirstate.copy(copied[f], f)
3774
3774
3775 class command(registrar.command):
3775 class command(registrar.command):
3776 def _doregister(self, func, name, *args, **kwargs):
3776 def _doregister(self, func, name, *args, **kwargs):
3777 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3777 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3778 return super(command, self)._doregister(func, name, *args, **kwargs)
3778 return super(command, self)._doregister(func, name, *args, **kwargs)
3779
3779
3780 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3780 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3781 # commands.outgoing. "missing" is "missing" of the result of
3781 # commands.outgoing. "missing" is "missing" of the result of
3782 # "findcommonoutgoing()"
3782 # "findcommonoutgoing()"
3783 outgoinghooks = util.hooks()
3783 outgoinghooks = util.hooks()
3784
3784
3785 # a list of (ui, repo) functions called by commands.summary
3785 # a list of (ui, repo) functions called by commands.summary
3786 summaryhooks = util.hooks()
3786 summaryhooks = util.hooks()
3787
3787
3788 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3788 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3789 #
3789 #
3790 # functions should return tuple of booleans below, if 'changes' is None:
3790 # functions should return tuple of booleans below, if 'changes' is None:
3791 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3791 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3792 #
3792 #
3793 # otherwise, 'changes' is a tuple of tuples below:
3793 # otherwise, 'changes' is a tuple of tuples below:
3794 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3794 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3795 # - (desturl, destbranch, destpeer, outgoing)
3795 # - (desturl, destbranch, destpeer, outgoing)
3796 summaryremotehooks = util.hooks()
3796 summaryremotehooks = util.hooks()
3797
3797
3798 # A list of state files kept by multistep operations like graft.
3798 # A list of state files kept by multistep operations like graft.
3799 # Since graft cannot be aborted, it is considered 'clearable' by update.
3799 # Since graft cannot be aborted, it is considered 'clearable' by update.
3800 # note: bisect is intentionally excluded
3800 # note: bisect is intentionally excluded
3801 # (state file, clearable, allowcommit, error, hint)
3801 # (state file, clearable, allowcommit, error, hint)
3802 unfinishedstates = [
3802 unfinishedstates = [
3803 ('graftstate', True, False, _('graft in progress'),
3803 ('graftstate', True, False, _('graft in progress'),
3804 _("use 'hg graft --continue' or 'hg update' to abort")),
3804 _("use 'hg graft --continue' or 'hg update' to abort")),
3805 ('updatestate', True, False, _('last update was interrupted'),
3805 ('updatestate', True, False, _('last update was interrupted'),
3806 _("use 'hg update' to get a consistent checkout"))
3806 _("use 'hg update' to get a consistent checkout"))
3807 ]
3807 ]
3808
3808
3809 def checkunfinished(repo, commit=False):
3809 def checkunfinished(repo, commit=False):
3810 '''Look for an unfinished multistep operation, like graft, and abort
3810 '''Look for an unfinished multistep operation, like graft, and abort
3811 if found. It's probably good to check this right before
3811 if found. It's probably good to check this right before
3812 bailifchanged().
3812 bailifchanged().
3813 '''
3813 '''
3814 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3814 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3815 if commit and allowcommit:
3815 if commit and allowcommit:
3816 continue
3816 continue
3817 if repo.vfs.exists(f):
3817 if repo.vfs.exists(f):
3818 raise error.Abort(msg, hint=hint)
3818 raise error.Abort(msg, hint=hint)
3819
3819
3820 def clearunfinished(repo):
3820 def clearunfinished(repo):
3821 '''Check for unfinished operations (as above), and clear the ones
3821 '''Check for unfinished operations (as above), and clear the ones
3822 that are clearable.
3822 that are clearable.
3823 '''
3823 '''
3824 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3824 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3825 if not clearable and repo.vfs.exists(f):
3825 if not clearable and repo.vfs.exists(f):
3826 raise error.Abort(msg, hint=hint)
3826 raise error.Abort(msg, hint=hint)
3827 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3827 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3828 if clearable and repo.vfs.exists(f):
3828 if clearable and repo.vfs.exists(f):
3829 util.unlink(repo.vfs.join(f))
3829 util.unlink(repo.vfs.join(f))
3830
3830
3831 afterresolvedstates = [
3831 afterresolvedstates = [
3832 ('graftstate',
3832 ('graftstate',
3833 _('hg graft --continue')),
3833 _('hg graft --continue')),
3834 ]
3834 ]
3835
3835
3836 def howtocontinue(repo):
3836 def howtocontinue(repo):
3837 '''Check for an unfinished operation and return the command to finish
3837 '''Check for an unfinished operation and return the command to finish
3838 it.
3838 it.
3839
3839
3840 afterresolvedstates tuples define a .hg/{file} and the corresponding
3840 afterresolvedstates tuples define a .hg/{file} and the corresponding
3841 command needed to finish it.
3841 command needed to finish it.
3842
3842
3843 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3843 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3844 a boolean.
3844 a boolean.
3845 '''
3845 '''
3846 contmsg = _("continue: %s")
3846 contmsg = _("continue: %s")
3847 for f, msg in afterresolvedstates:
3847 for f, msg in afterresolvedstates:
3848 if repo.vfs.exists(f):
3848 if repo.vfs.exists(f):
3849 return contmsg % msg, True
3849 return contmsg % msg, True
3850 if repo[None].dirty(missing=True, merge=False, branch=False):
3850 if repo[None].dirty(missing=True, merge=False, branch=False):
3851 return contmsg % _("hg commit"), False
3851 return contmsg % _("hg commit"), False
3852 return None, None
3852 return None, None
3853
3853
3854 def checkafterresolved(repo):
3854 def checkafterresolved(repo):
3855 '''Inform the user about the next action after completing hg resolve
3855 '''Inform the user about the next action after completing hg resolve
3856
3856
3857 If there's a matching afterresolvedstates, howtocontinue will yield
3857 If there's a matching afterresolvedstates, howtocontinue will yield
3858 repo.ui.warn as the reporter.
3858 repo.ui.warn as the reporter.
3859
3859
3860 Otherwise, it will yield repo.ui.note.
3860 Otherwise, it will yield repo.ui.note.
3861 '''
3861 '''
3862 msg, warning = howtocontinue(repo)
3862 msg, warning = howtocontinue(repo)
3863 if msg is not None:
3863 if msg is not None:
3864 if warning:
3864 if warning:
3865 repo.ui.warn("%s\n" % msg)
3865 repo.ui.warn("%s\n" % msg)
3866 else:
3866 else:
3867 repo.ui.note("%s\n" % msg)
3867 repo.ui.note("%s\n" % msg)
3868
3868
3869 def wrongtooltocontinue(repo, task):
3869 def wrongtooltocontinue(repo, task):
3870 '''Raise an abort suggesting how to properly continue if there is an
3870 '''Raise an abort suggesting how to properly continue if there is an
3871 active task.
3871 active task.
3872
3872
3873 Uses howtocontinue() to find the active task.
3873 Uses howtocontinue() to find the active task.
3874
3874
3875 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3875 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3876 a hint.
3876 a hint.
3877 '''
3877 '''
3878 after = howtocontinue(repo)
3878 after = howtocontinue(repo)
3879 hint = None
3879 hint = None
3880 if after[1]:
3880 if after[1]:
3881 hint = after[0]
3881 hint = after[0]
3882 raise error.Abort(_('no %s in progress') % task, hint=hint)
3882 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2606 +1,2607 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """This internal method provides a way for child objects to override the
106 """This internal method provides a way for child objects to override the
107 match operator.
107 match operator.
108 """
108 """
109 return match
109 return match
110
110
111 def _buildstatus(self, other, s, match, listignored, listclean,
111 def _buildstatus(self, other, s, match, listignored, listclean,
112 listunknown):
112 listunknown):
113 """build a status with respect to another context"""
113 """build a status with respect to another context"""
114 # Load earliest manifest first for caching reasons. More specifically,
114 # Load earliest manifest first for caching reasons. More specifically,
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta application.
119 # delta application.
120 mf2 = None
120 mf2 = None
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
124 if mf2 is None:
124 if mf2 is None:
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126
126
127 modified, added = [], []
127 modified, added = [], []
128 removed = []
128 removed = []
129 clean = []
129 clean = []
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 deletedset = set(deleted)
131 deletedset = set(deleted)
132 d = mf1.diff(mf2, match=match, clean=listclean)
132 d = mf1.diff(mf2, match=match, clean=listclean)
133 for fn, value in d.iteritems():
133 for fn, value in d.iteritems():
134 if fn in deletedset:
134 if fn in deletedset:
135 continue
135 continue
136 if value is None:
136 if value is None:
137 clean.append(fn)
137 clean.append(fn)
138 continue
138 continue
139 (node1, flag1), (node2, flag2) = value
139 (node1, flag1), (node2, flag2) = value
140 if node1 is None:
140 if node1 is None:
141 added.append(fn)
141 added.append(fn)
142 elif node2 is None:
142 elif node2 is None:
143 removed.append(fn)
143 removed.append(fn)
144 elif flag1 != flag2:
144 elif flag1 != flag2:
145 modified.append(fn)
145 modified.append(fn)
146 elif node2 not in wdirnodes:
146 elif node2 not in wdirnodes:
147 # When comparing files between two commits, we save time by
147 # When comparing files between two commits, we save time by
148 # not comparing the file contents when the nodeids differ.
148 # not comparing the file contents when the nodeids differ.
149 # Note that this means we incorrectly report a reverted change
149 # Note that this means we incorrectly report a reverted change
150 # to a file as a modification.
150 # to a file as a modification.
151 modified.append(fn)
151 modified.append(fn)
152 elif self[fn].cmp(other[fn]):
152 elif self[fn].cmp(other[fn]):
153 modified.append(fn)
153 modified.append(fn)
154 else:
154 else:
155 clean.append(fn)
155 clean.append(fn)
156
156
157 if removed:
157 if removed:
158 # need to filter files if they are already reported as removed
158 # need to filter files if they are already reported as removed
159 unknown = [fn for fn in unknown if fn not in mf1 and
159 unknown = [fn for fn in unknown if fn not in mf1 and
160 (not match or match(fn))]
160 (not match or match(fn))]
161 ignored = [fn for fn in ignored if fn not in mf1 and
161 ignored = [fn for fn in ignored if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 # if they're deleted, don't report them as removed
163 # if they're deleted, don't report them as removed
164 removed = [fn for fn in removed if fn not in deletedset]
164 removed = [fn for fn in removed if fn not in deletedset]
165
165
166 return scmutil.status(modified, added, removed, deleted, unknown,
166 return scmutil.status(modified, added, removed, deleted, unknown,
167 ignored, clean)
167 ignored, clean)
168
168
169 @propertycache
169 @propertycache
170 def substate(self):
170 def substate(self):
171 return subrepo.state(self, self._repo.ui)
171 return subrepo.state(self, self._repo.ui)
172
172
173 def subrev(self, subpath):
173 def subrev(self, subpath):
174 return self.substate[subpath][1]
174 return self.substate[subpath][1]
175
175
176 def rev(self):
176 def rev(self):
177 return self._rev
177 return self._rev
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180 def hex(self):
180 def hex(self):
181 return hex(self.node())
181 return hex(self.node())
182 def manifest(self):
182 def manifest(self):
183 return self._manifest
183 return self._manifest
184 def manifestctx(self):
184 def manifestctx(self):
185 return self._manifestctx
185 return self._manifestctx
186 def repo(self):
186 def repo(self):
187 return self._repo
187 return self._repo
188 def phasestr(self):
188 def phasestr(self):
189 return phases.phasenames[self.phase()]
189 return phases.phasenames[self.phase()]
190 def mutable(self):
190 def mutable(self):
191 return self.phase() > phases.public
191 return self.phase() > phases.public
192
192
193 def getfileset(self, expr):
193 def getfileset(self, expr):
194 return fileset.getfileset(self, expr)
194 return fileset.getfileset(self, expr)
195
195
196 def obsolete(self):
196 def obsolete(self):
197 """True if the changeset is obsolete"""
197 """True if the changeset is obsolete"""
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199
199
200 def extinct(self):
200 def extinct(self):
201 """True if the changeset is extinct"""
201 """True if the changeset is extinct"""
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203
203
204 def unstable(self):
204 def unstable(self):
205 msg = ("'context.unstable' is deprecated, "
205 msg = ("'context.unstable' is deprecated, "
206 "use 'context.orphan'")
206 "use 'context.orphan'")
207 self._repo.ui.deprecwarn(msg, '4.4')
207 self._repo.ui.deprecwarn(msg, '4.4')
208 return self.orphan()
208 return self.orphan()
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete but it's ancestor are"""
211 """True if the changeset is not obsolete but it's ancestor are"""
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
213
213
214 def bumped(self):
214 def bumped(self):
215 msg = ("'context.bumped' is deprecated, "
215 msg = ("'context.bumped' is deprecated, "
216 "use 'context.phasedivergent'")
216 "use 'context.phasedivergent'")
217 self._repo.ui.deprecwarn(msg, '4.4')
217 self._repo.ui.deprecwarn(msg, '4.4')
218 return self.phasedivergent()
218 return self.phasedivergent()
219
219
220 def phasedivergent(self):
220 def phasedivergent(self):
221 """True if the changeset try to be a successor of a public changeset
221 """True if the changeset try to be a successor of a public changeset
222
222
223 Only non-public and non-obsolete changesets may be bumped.
223 Only non-public and non-obsolete changesets may be bumped.
224 """
224 """
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
226
226
227 def divergent(self):
227 def divergent(self):
228 msg = ("'context.divergent' is deprecated, "
228 msg = ("'context.divergent' is deprecated, "
229 "use 'context.contentdivergent'")
229 "use 'context.contentdivergent'")
230 self._repo.ui.deprecwarn(msg, '4.4')
230 self._repo.ui.deprecwarn(msg, '4.4')
231 return self.contentdivergent()
231 return self.contentdivergent()
232
232
233 def contentdivergent(self):
233 def contentdivergent(self):
234 """Is a successors of a changeset with multiple possible successors set
234 """Is a successors of a changeset with multiple possible successors set
235
235
236 Only non-public and non-obsolete changesets may be divergent.
236 Only non-public and non-obsolete changesets may be divergent.
237 """
237 """
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
239
239
240 def troubled(self):
240 def troubled(self):
241 msg = ("'context.troubled' is deprecated, "
241 msg = ("'context.troubled' is deprecated, "
242 "use 'context.isunstable'")
242 "use 'context.isunstable'")
243 self._repo.ui.deprecwarn(msg, '4.4')
243 self._repo.ui.deprecwarn(msg, '4.4')
244 return self.isunstable()
244 return self.isunstable()
245
245
246 def isunstable(self):
246 def isunstable(self):
247 """True if the changeset is either unstable, bumped or divergent"""
247 """True if the changeset is either unstable, bumped or divergent"""
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
249
249
250 def troubles(self):
250 def troubles(self):
251 """Keep the old version around in order to avoid breaking extensions
251 """Keep the old version around in order to avoid breaking extensions
252 about different return values.
252 about different return values.
253 """
253 """
254 msg = ("'context.troubles' is deprecated, "
254 msg = ("'context.troubles' is deprecated, "
255 "use 'context.instabilities'")
255 "use 'context.instabilities'")
256 self._repo.ui.deprecwarn(msg, '4.4')
256 self._repo.ui.deprecwarn(msg, '4.4')
257
257
258 troubles = []
258 troubles = []
259 if self.orphan():
259 if self.orphan():
260 troubles.append('orphan')
260 troubles.append('orphan')
261 if self.phasedivergent():
261 if self.phasedivergent():
262 troubles.append('bumped')
262 troubles.append('bumped')
263 if self.contentdivergent():
263 if self.contentdivergent():
264 troubles.append('divergent')
264 troubles.append('divergent')
265 return troubles
265 return troubles
266
266
267 def instabilities(self):
267 def instabilities(self):
268 """return the list of instabilities affecting this changeset.
268 """return the list of instabilities affecting this changeset.
269
269
270 Instabilities are returned as strings. possible values are:
270 Instabilities are returned as strings. possible values are:
271 - orphan,
271 - orphan,
272 - phase-divergent,
272 - phase-divergent,
273 - content-divergent.
273 - content-divergent.
274 """
274 """
275 instabilities = []
275 instabilities = []
276 if self.orphan():
276 if self.orphan():
277 instabilities.append('orphan')
277 instabilities.append('orphan')
278 if self.phasedivergent():
278 if self.phasedivergent():
279 instabilities.append('phase-divergent')
279 instabilities.append('phase-divergent')
280 if self.contentdivergent():
280 if self.contentdivergent():
281 instabilities.append('content-divergent')
281 instabilities.append('content-divergent')
282 return instabilities
282 return instabilities
283
283
284 def parents(self):
284 def parents(self):
285 """return contexts for each parent changeset"""
285 """return contexts for each parent changeset"""
286 return self._parents
286 return self._parents
287
287
288 def p1(self):
288 def p1(self):
289 return self._parents[0]
289 return self._parents[0]
290
290
291 def p2(self):
291 def p2(self):
292 parents = self._parents
292 parents = self._parents
293 if len(parents) == 2:
293 if len(parents) == 2:
294 return parents[1]
294 return parents[1]
295 return changectx(self._repo, nullrev)
295 return changectx(self._repo, nullrev)
296
296
297 def _fileinfo(self, path):
297 def _fileinfo(self, path):
298 if r'_manifest' in self.__dict__:
298 if r'_manifest' in self.__dict__:
299 try:
299 try:
300 return self._manifest[path], self._manifest.flags(path)
300 return self._manifest[path], self._manifest.flags(path)
301 except KeyError:
301 except KeyError:
302 raise error.ManifestLookupError(self._node, path,
302 raise error.ManifestLookupError(self._node, path,
303 _('not found in manifest'))
303 _('not found in manifest'))
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
305 if path in self._manifestdelta:
305 if path in self._manifestdelta:
306 return (self._manifestdelta[path],
306 return (self._manifestdelta[path],
307 self._manifestdelta.flags(path))
307 self._manifestdelta.flags(path))
308 mfl = self._repo.manifestlog
308 mfl = self._repo.manifestlog
309 try:
309 try:
310 node, flag = mfl[self._changeset.manifest].find(path)
310 node, flag = mfl[self._changeset.manifest].find(path)
311 except KeyError:
311 except KeyError:
312 raise error.ManifestLookupError(self._node, path,
312 raise error.ManifestLookupError(self._node, path,
313 _('not found in manifest'))
313 _('not found in manifest'))
314
314
315 return node, flag
315 return node, flag
316
316
317 def filenode(self, path):
317 def filenode(self, path):
318 return self._fileinfo(path)[0]
318 return self._fileinfo(path)[0]
319
319
320 def flags(self, path):
320 def flags(self, path):
321 try:
321 try:
322 return self._fileinfo(path)[1]
322 return self._fileinfo(path)[1]
323 except error.LookupError:
323 except error.LookupError:
324 return ''
324 return ''
325
325
326 def sub(self, path, allowcreate=True):
326 def sub(self, path, allowcreate=True):
327 '''return a subrepo for the stored revision of path, never wdir()'''
327 '''return a subrepo for the stored revision of path, never wdir()'''
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
329
329
330 def nullsub(self, path, pctx):
330 def nullsub(self, path, pctx):
331 return subrepo.nullsubrepo(self, path, pctx)
331 return subrepo.nullsubrepo(self, path, pctx)
332
332
333 def workingsub(self, path):
333 def workingsub(self, path):
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
335 context.
335 context.
336 '''
336 '''
337 return subrepo.subrepo(self, path, allowwdir=True)
337 return subrepo.subrepo(self, path, allowwdir=True)
338
338
339 def match(self, pats=None, include=None, exclude=None, default='glob',
339 def match(self, pats=None, include=None, exclude=None, default='glob',
340 listsubrepos=False, badfn=None):
340 listsubrepos=False, badfn=None):
341 r = self._repo
341 r = self._repo
342 return matchmod.match(r.root, r.getcwd(), pats,
342 return matchmod.match(r.root, r.getcwd(), pats,
343 include, exclude, default,
343 include, exclude, default,
344 auditor=r.nofsauditor, ctx=self,
344 auditor=r.nofsauditor, ctx=self,
345 listsubrepos=listsubrepos, badfn=badfn)
345 listsubrepos=listsubrepos, badfn=badfn)
346
346
347 def diff(self, ctx2=None, match=None, **opts):
347 def diff(self, ctx2=None, match=None, **opts):
348 """Returns a diff generator for the given contexts and matcher"""
348 """Returns a diff generator for the given contexts and matcher"""
349 if ctx2 is None:
349 if ctx2 is None:
350 ctx2 = self.p1()
350 ctx2 = self.p1()
351 if ctx2 is not None:
351 if ctx2 is not None:
352 ctx2 = self._repo[ctx2]
352 ctx2 = self._repo[ctx2]
353 diffopts = patch.diffopts(self._repo.ui, opts)
353 diffopts = patch.diffopts(self._repo.ui, opts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
355
355
356 def dirs(self):
356 def dirs(self):
357 return self._manifest.dirs()
357 return self._manifest.dirs()
358
358
359 def hasdir(self, dir):
359 def hasdir(self, dir):
360 return self._manifest.hasdir(dir)
360 return self._manifest.hasdir(dir)
361
361
362 def status(self, other=None, match=None, listignored=False,
362 def status(self, other=None, match=None, listignored=False,
363 listclean=False, listunknown=False, listsubrepos=False):
363 listclean=False, listunknown=False, listsubrepos=False):
364 """return status of files between two nodes or node and working
364 """return status of files between two nodes or node and working
365 directory.
365 directory.
366
366
367 If other is None, compare this node with working directory.
367 If other is None, compare this node with working directory.
368
368
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
370 """
370 """
371
371
372 ctx1 = self
372 ctx1 = self
373 ctx2 = self._repo[other]
373 ctx2 = self._repo[other]
374
374
375 # This next code block is, admittedly, fragile logic that tests for
375 # This next code block is, admittedly, fragile logic that tests for
376 # reversing the contexts and wouldn't need to exist if it weren't for
376 # reversing the contexts and wouldn't need to exist if it weren't for
377 # the fast (and common) code path of comparing the working directory
377 # the fast (and common) code path of comparing the working directory
378 # with its first parent.
378 # with its first parent.
379 #
379 #
380 # What we're aiming for here is the ability to call:
380 # What we're aiming for here is the ability to call:
381 #
381 #
382 # workingctx.status(parentctx)
382 # workingctx.status(parentctx)
383 #
383 #
384 # If we always built the manifest for each context and compared those,
384 # If we always built the manifest for each context and compared those,
385 # then we'd be done. But the special case of the above call means we
385 # then we'd be done. But the special case of the above call means we
386 # just copy the manifest of the parent.
386 # just copy the manifest of the parent.
387 reversed = False
387 reversed = False
388 if (not isinstance(ctx1, changectx)
388 if (not isinstance(ctx1, changectx)
389 and isinstance(ctx2, changectx)):
389 and isinstance(ctx2, changectx)):
390 reversed = True
390 reversed = True
391 ctx1, ctx2 = ctx2, ctx1
391 ctx1, ctx2 = ctx2, ctx1
392
392
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
394 match = ctx2._matchstatus(ctx1, match)
394 match = ctx2._matchstatus(ctx1, match)
395 r = scmutil.status([], [], [], [], [], [], [])
395 r = scmutil.status([], [], [], [], [], [], [])
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 listunknown)
397 listunknown)
398
398
399 if reversed:
399 if reversed:
400 # Reverse added and removed. Clear deleted, unknown and ignored as
400 # Reverse added and removed. Clear deleted, unknown and ignored as
401 # these make no sense to reverse.
401 # these make no sense to reverse.
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 r.clean)
403 r.clean)
404
404
405 if listsubrepos:
405 if listsubrepos:
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 try:
407 try:
408 rev2 = ctx2.subrev(subpath)
408 rev2 = ctx2.subrev(subpath)
409 except KeyError:
409 except KeyError:
410 # A subrepo that existed in node1 was deleted between
410 # A subrepo that existed in node1 was deleted between
411 # node1 and node2 (inclusive). Thus, ctx2's substate
411 # node1 and node2 (inclusive). Thus, ctx2's substate
412 # won't contain that subpath. The best we can do ignore it.
412 # won't contain that subpath. The best we can do ignore it.
413 rev2 = None
413 rev2 = None
414 submatch = matchmod.subdirmatcher(subpath, match)
414 submatch = matchmod.subdirmatcher(subpath, match)
415 s = sub.status(rev2, match=submatch, ignored=listignored,
415 s = sub.status(rev2, match=submatch, ignored=listignored,
416 clean=listclean, unknown=listunknown,
416 clean=listclean, unknown=listunknown,
417 listsubrepos=True)
417 listsubrepos=True)
418 for rfiles, sfiles in zip(r, s):
418 for rfiles, sfiles in zip(r, s):
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420
420
421 for l in r:
421 for l in r:
422 l.sort()
422 l.sort()
423
423
424 return r
424 return r
425
425
426 def _filterederror(repo, changeid):
426 def _filterederror(repo, changeid):
427 """build an exception to be raised about a filtered changeid
427 """build an exception to be raised about a filtered changeid
428
428
429 This is extracted in a function to help extensions (eg: evolve) to
429 This is extracted in a function to help extensions (eg: evolve) to
430 experiment with various message variants."""
430 experiment with various message variants."""
431 if repo.filtername.startswith('visible'):
431 if repo.filtername.startswith('visible'):
432 msg = _("hidden revision '%s'") % changeid
432 msg = _("hidden revision '%s'") % changeid
433 hint = _('use --hidden to access hidden revisions')
433 hint = _('use --hidden to access hidden revisions')
434 return error.FilteredRepoLookupError(msg, hint=hint)
434 return error.FilteredRepoLookupError(msg, hint=hint)
435 msg = _("filtered revision '%s' (not in '%s' subset)")
435 msg = _("filtered revision '%s' (not in '%s' subset)")
436 msg %= (changeid, repo.filtername)
436 msg %= (changeid, repo.filtername)
437 return error.FilteredRepoLookupError(msg)
437 return error.FilteredRepoLookupError(msg)
438
438
439 class changectx(basectx):
439 class changectx(basectx):
440 """A changecontext object makes access to data related to a particular
440 """A changecontext object makes access to data related to a particular
441 changeset convenient. It represents a read-only context already present in
441 changeset convenient. It represents a read-only context already present in
442 the repo."""
442 the repo."""
443 def __init__(self, repo, changeid=''):
443 def __init__(self, repo, changeid=''):
444 """changeid is a revision number, node, or tag"""
444 """changeid is a revision number, node, or tag"""
445
445
446 # since basectx.__new__ already took care of copying the object, we
446 # since basectx.__new__ already took care of copying the object, we
447 # don't need to do anything in __init__, so we just exit here
447 # don't need to do anything in __init__, so we just exit here
448 if isinstance(changeid, basectx):
448 if isinstance(changeid, basectx):
449 return
449 return
450
450
451 if changeid == '':
451 if changeid == '':
452 changeid = '.'
452 changeid = '.'
453 self._repo = repo
453 self._repo = repo
454
454
455 try:
455 try:
456 if isinstance(changeid, int):
456 if isinstance(changeid, int):
457 self._node = repo.changelog.node(changeid)
457 self._node = repo.changelog.node(changeid)
458 self._rev = changeid
458 self._rev = changeid
459 return
459 return
460 if not pycompat.ispy3 and isinstance(changeid, long):
460 if not pycompat.ispy3 and isinstance(changeid, long):
461 changeid = str(changeid)
461 changeid = str(changeid)
462 if changeid == 'null':
462 if changeid == 'null':
463 self._node = nullid
463 self._node = nullid
464 self._rev = nullrev
464 self._rev = nullrev
465 return
465 return
466 if changeid == 'tip':
466 if changeid == 'tip':
467 self._node = repo.changelog.tip()
467 self._node = repo.changelog.tip()
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 if changeid == '.' or changeid == repo.dirstate.p1():
470 if changeid == '.' or changeid == repo.dirstate.p1():
471 # this is a hack to delay/avoid loading obsmarkers
471 # this is a hack to delay/avoid loading obsmarkers
472 # when we know that '.' won't be hidden
472 # when we know that '.' won't be hidden
473 self._node = repo.dirstate.p1()
473 self._node = repo.dirstate.p1()
474 self._rev = repo.unfiltered().changelog.rev(self._node)
474 self._rev = repo.unfiltered().changelog.rev(self._node)
475 return
475 return
476 if len(changeid) == 20:
476 if len(changeid) == 20:
477 try:
477 try:
478 self._node = changeid
478 self._node = changeid
479 self._rev = repo.changelog.rev(changeid)
479 self._rev = repo.changelog.rev(changeid)
480 return
480 return
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except LookupError:
483 except LookupError:
484 pass
484 pass
485
485
486 try:
486 try:
487 r = int(changeid)
487 r = int(changeid)
488 if '%d' % r != changeid:
488 if '%d' % r != changeid:
489 raise ValueError
489 raise ValueError
490 l = len(repo.changelog)
490 l = len(repo.changelog)
491 if r < 0:
491 if r < 0:
492 r += l
492 r += l
493 if r < 0 or r >= l and r != wdirrev:
493 if r < 0 or r >= l and r != wdirrev:
494 raise ValueError
494 raise ValueError
495 self._rev = r
495 self._rev = r
496 self._node = repo.changelog.node(r)
496 self._node = repo.changelog.node(r)
497 return
497 return
498 except error.FilteredIndexError:
498 except error.FilteredIndexError:
499 raise
499 raise
500 except (ValueError, OverflowError, IndexError):
500 except (ValueError, OverflowError, IndexError):
501 pass
501 pass
502
502
503 if len(changeid) == 40:
503 if len(changeid) == 40:
504 try:
504 try:
505 self._node = bin(changeid)
505 self._node = bin(changeid)
506 self._rev = repo.changelog.rev(self._node)
506 self._rev = repo.changelog.rev(self._node)
507 return
507 return
508 except error.FilteredLookupError:
508 except error.FilteredLookupError:
509 raise
509 raise
510 except (TypeError, LookupError):
510 except (TypeError, LookupError):
511 pass
511 pass
512
512
513 # lookup bookmarks through the name interface
513 # lookup bookmarks through the name interface
514 try:
514 try:
515 self._node = repo.names.singlenode(repo, changeid)
515 self._node = repo.names.singlenode(repo, changeid)
516 self._rev = repo.changelog.rev(self._node)
516 self._rev = repo.changelog.rev(self._node)
517 return
517 return
518 except KeyError:
518 except KeyError:
519 pass
519 pass
520 except error.FilteredRepoLookupError:
520 except error.FilteredRepoLookupError:
521 raise
521 raise
522 except error.RepoLookupError:
522 except error.RepoLookupError:
523 pass
523 pass
524
524
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 if self._node is not None:
526 if self._node is not None:
527 self._rev = repo.changelog.rev(self._node)
527 self._rev = repo.changelog.rev(self._node)
528 return
528 return
529
529
530 # lookup failed
530 # lookup failed
531 # check if it might have come from damaged dirstate
531 # check if it might have come from damaged dirstate
532 #
532 #
533 # XXX we could avoid the unfiltered if we had a recognizable
533 # XXX we could avoid the unfiltered if we had a recognizable
534 # exception for filtered changeset access
534 # exception for filtered changeset access
535 if changeid in repo.unfiltered().dirstate.parents():
535 if changeid in repo.unfiltered().dirstate.parents():
536 msg = _("working directory has unknown parent '%s'!")
536 msg = _("working directory has unknown parent '%s'!")
537 raise error.Abort(msg % short(changeid))
537 raise error.Abort(msg % short(changeid))
538 try:
538 try:
539 if len(changeid) == 20 and nonascii(changeid):
539 if len(changeid) == 20 and nonascii(changeid):
540 changeid = hex(changeid)
540 changeid = hex(changeid)
541 except TypeError:
541 except TypeError:
542 pass
542 pass
543 except (error.FilteredIndexError, error.FilteredLookupError,
543 except (error.FilteredIndexError, error.FilteredLookupError,
544 error.FilteredRepoLookupError):
544 error.FilteredRepoLookupError):
545 raise _filterederror(repo, changeid)
545 raise _filterederror(repo, changeid)
546 except IndexError:
546 except IndexError:
547 pass
547 pass
548 raise error.RepoLookupError(
548 raise error.RepoLookupError(
549 _("unknown revision '%s'") % changeid)
549 _("unknown revision '%s'") % changeid)
550
550
551 def __hash__(self):
551 def __hash__(self):
552 try:
552 try:
553 return hash(self._rev)
553 return hash(self._rev)
554 except AttributeError:
554 except AttributeError:
555 return id(self)
555 return id(self)
556
556
557 def __nonzero__(self):
557 def __nonzero__(self):
558 return self._rev != nullrev
558 return self._rev != nullrev
559
559
560 __bool__ = __nonzero__
560 __bool__ = __nonzero__
561
561
562 @propertycache
562 @propertycache
563 def _changeset(self):
563 def _changeset(self):
564 return self._repo.changelog.changelogrevision(self.rev())
564 return self._repo.changelog.changelogrevision(self.rev())
565
565
566 @propertycache
566 @propertycache
567 def _manifest(self):
567 def _manifest(self):
568 return self._manifestctx.read()
568 return self._manifestctx.read()
569
569
570 @property
570 @property
571 def _manifestctx(self):
571 def _manifestctx(self):
572 return self._repo.manifestlog[self._changeset.manifest]
572 return self._repo.manifestlog[self._changeset.manifest]
573
573
574 @propertycache
574 @propertycache
575 def _manifestdelta(self):
575 def _manifestdelta(self):
576 return self._manifestctx.readdelta()
576 return self._manifestctx.readdelta()
577
577
578 @propertycache
578 @propertycache
579 def _parents(self):
579 def _parents(self):
580 repo = self._repo
580 repo = self._repo
581 p1, p2 = repo.changelog.parentrevs(self._rev)
581 p1, p2 = repo.changelog.parentrevs(self._rev)
582 if p2 == nullrev:
582 if p2 == nullrev:
583 return [changectx(repo, p1)]
583 return [changectx(repo, p1)]
584 return [changectx(repo, p1), changectx(repo, p2)]
584 return [changectx(repo, p1), changectx(repo, p2)]
585
585
586 def changeset(self):
586 def changeset(self):
587 c = self._changeset
587 c = self._changeset
588 return (
588 return (
589 c.manifest,
589 c.manifest,
590 c.user,
590 c.user,
591 c.date,
591 c.date,
592 c.files,
592 c.files,
593 c.description,
593 c.description,
594 c.extra,
594 c.extra,
595 )
595 )
596 def manifestnode(self):
596 def manifestnode(self):
597 return self._changeset.manifest
597 return self._changeset.manifest
598
598
599 def user(self):
599 def user(self):
600 return self._changeset.user
600 return self._changeset.user
601 def date(self):
601 def date(self):
602 return self._changeset.date
602 return self._changeset.date
603 def files(self):
603 def files(self):
604 return self._changeset.files
604 return self._changeset.files
605 def description(self):
605 def description(self):
606 return self._changeset.description
606 return self._changeset.description
607 def branch(self):
607 def branch(self):
608 return encoding.tolocal(self._changeset.extra.get("branch"))
608 return encoding.tolocal(self._changeset.extra.get("branch"))
609 def closesbranch(self):
609 def closesbranch(self):
610 return 'close' in self._changeset.extra
610 return 'close' in self._changeset.extra
611 def extra(self):
611 def extra(self):
612 return self._changeset.extra
612 return self._changeset.extra
613 def tags(self):
613 def tags(self):
614 return self._repo.nodetags(self._node)
614 return self._repo.nodetags(self._node)
615 def bookmarks(self):
615 def bookmarks(self):
616 return self._repo.nodebookmarks(self._node)
616 return self._repo.nodebookmarks(self._node)
617 def phase(self):
617 def phase(self):
618 return self._repo._phasecache.phase(self._repo, self._rev)
618 return self._repo._phasecache.phase(self._repo, self._rev)
619 def hidden(self):
619 def hidden(self):
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
621
621
622 def children(self):
622 def children(self):
623 """return contexts for each child changeset"""
623 """return contexts for each child changeset"""
624 c = self._repo.changelog.children(self._node)
624 c = self._repo.changelog.children(self._node)
625 return [changectx(self._repo, x) for x in c]
625 return [changectx(self._repo, x) for x in c]
626
626
627 def ancestors(self):
627 def ancestors(self):
628 for a in self._repo.changelog.ancestors([self._rev]):
628 for a in self._repo.changelog.ancestors([self._rev]):
629 yield changectx(self._repo, a)
629 yield changectx(self._repo, a)
630
630
631 def descendants(self):
631 def descendants(self):
632 for d in self._repo.changelog.descendants([self._rev]):
632 for d in self._repo.changelog.descendants([self._rev]):
633 yield changectx(self._repo, d)
633 yield changectx(self._repo, d)
634
634
635 def filectx(self, path, fileid=None, filelog=None):
635 def filectx(self, path, fileid=None, filelog=None):
636 """get a file context from this changeset"""
636 """get a file context from this changeset"""
637 if fileid is None:
637 if fileid is None:
638 fileid = self.filenode(path)
638 fileid = self.filenode(path)
639 return filectx(self._repo, path, fileid=fileid,
639 return filectx(self._repo, path, fileid=fileid,
640 changectx=self, filelog=filelog)
640 changectx=self, filelog=filelog)
641
641
642 def ancestor(self, c2, warn=False):
642 def ancestor(self, c2, warn=False):
643 """return the "best" ancestor context of self and c2
643 """return the "best" ancestor context of self and c2
644
644
645 If there are multiple candidates, it will show a message and check
645 If there are multiple candidates, it will show a message and check
646 merge.preferancestor configuration before falling back to the
646 merge.preferancestor configuration before falling back to the
647 revlog ancestor."""
647 revlog ancestor."""
648 # deal with workingctxs
648 # deal with workingctxs
649 n2 = c2._node
649 n2 = c2._node
650 if n2 is None:
650 if n2 is None:
651 n2 = c2._parents[0]._node
651 n2 = c2._parents[0]._node
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 if not cahs:
653 if not cahs:
654 anc = nullid
654 anc = nullid
655 elif len(cahs) == 1:
655 elif len(cahs) == 1:
656 anc = cahs[0]
656 anc = cahs[0]
657 else:
657 else:
658 # experimental config: merge.preferancestor
658 # experimental config: merge.preferancestor
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 try:
660 try:
661 ctx = changectx(self._repo, r)
661 ctx = changectx(self._repo, r)
662 except error.RepoLookupError:
662 except error.RepoLookupError:
663 continue
663 continue
664 anc = ctx.node()
664 anc = ctx.node()
665 if anc in cahs:
665 if anc in cahs:
666 break
666 break
667 else:
667 else:
668 anc = self._repo.changelog.ancestor(self._node, n2)
668 anc = self._repo.changelog.ancestor(self._node, n2)
669 if warn:
669 if warn:
670 self._repo.ui.status(
670 self._repo.ui.status(
671 (_("note: using %s as ancestor of %s and %s\n") %
671 (_("note: using %s as ancestor of %s and %s\n") %
672 (short(anc), short(self._node), short(n2))) +
672 (short(anc), short(self._node), short(n2))) +
673 ''.join(_(" alternatively, use --config "
673 ''.join(_(" alternatively, use --config "
674 "merge.preferancestor=%s\n") %
674 "merge.preferancestor=%s\n") %
675 short(n) for n in sorted(cahs) if n != anc))
675 short(n) for n in sorted(cahs) if n != anc))
676 return changectx(self._repo, anc)
676 return changectx(self._repo, anc)
677
677
678 def descendant(self, other):
678 def descendant(self, other):
679 """True if other is descendant of this changeset"""
679 """True if other is descendant of this changeset"""
680 return self._repo.changelog.descendant(self._rev, other._rev)
680 return self._repo.changelog.descendant(self._rev, other._rev)
681
681
682 def walk(self, match):
682 def walk(self, match):
683 '''Generates matching file names.'''
683 '''Generates matching file names.'''
684
684
685 # Wrap match.bad method to have message with nodeid
685 # Wrap match.bad method to have message with nodeid
686 def bad(fn, msg):
686 def bad(fn, msg):
687 # The manifest doesn't know about subrepos, so don't complain about
687 # The manifest doesn't know about subrepos, so don't complain about
688 # paths into valid subrepos.
688 # paths into valid subrepos.
689 if any(fn == s or fn.startswith(s + '/')
689 if any(fn == s or fn.startswith(s + '/')
690 for s in self.substate):
690 for s in self.substate):
691 return
691 return
692 match.bad(fn, _('no such file in rev %s') % self)
692 match.bad(fn, _('no such file in rev %s') % self)
693
693
694 m = matchmod.badmatch(match, bad)
694 m = matchmod.badmatch(match, bad)
695 return self._manifest.walk(m)
695 return self._manifest.walk(m)
696
696
697 def matches(self, match):
697 def matches(self, match):
698 return self.walk(match)
698 return self.walk(match)
699
699
700 class basefilectx(object):
700 class basefilectx(object):
701 """A filecontext object represents the common logic for its children:
701 """A filecontext object represents the common logic for its children:
702 filectx: read-only access to a filerevision that is already present
702 filectx: read-only access to a filerevision that is already present
703 in the repo,
703 in the repo,
704 workingfilectx: a filecontext that represents files from the working
704 workingfilectx: a filecontext that represents files from the working
705 directory,
705 directory,
706 memfilectx: a filecontext that represents files in-memory,
706 memfilectx: a filecontext that represents files in-memory,
707 overlayfilectx: duplicate another filecontext with some fields overridden.
707 overlayfilectx: duplicate another filecontext with some fields overridden.
708 """
708 """
709 @propertycache
709 @propertycache
710 def _filelog(self):
710 def _filelog(self):
711 return self._repo.file(self._path)
711 return self._repo.file(self._path)
712
712
713 @propertycache
713 @propertycache
714 def _changeid(self):
714 def _changeid(self):
715 if r'_changeid' in self.__dict__:
715 if r'_changeid' in self.__dict__:
716 return self._changeid
716 return self._changeid
717 elif r'_changectx' in self.__dict__:
717 elif r'_changectx' in self.__dict__:
718 return self._changectx.rev()
718 return self._changectx.rev()
719 elif r'_descendantrev' in self.__dict__:
719 elif r'_descendantrev' in self.__dict__:
720 # this file context was created from a revision with a known
720 # this file context was created from a revision with a known
721 # descendant, we can (lazily) correct for linkrev aliases
721 # descendant, we can (lazily) correct for linkrev aliases
722 return self._adjustlinkrev(self._descendantrev)
722 return self._adjustlinkrev(self._descendantrev)
723 else:
723 else:
724 return self._filelog.linkrev(self._filerev)
724 return self._filelog.linkrev(self._filerev)
725
725
726 @propertycache
726 @propertycache
727 def _filenode(self):
727 def _filenode(self):
728 if r'_fileid' in self.__dict__:
728 if r'_fileid' in self.__dict__:
729 return self._filelog.lookup(self._fileid)
729 return self._filelog.lookup(self._fileid)
730 else:
730 else:
731 return self._changectx.filenode(self._path)
731 return self._changectx.filenode(self._path)
732
732
733 @propertycache
733 @propertycache
734 def _filerev(self):
734 def _filerev(self):
735 return self._filelog.rev(self._filenode)
735 return self._filelog.rev(self._filenode)
736
736
737 @propertycache
737 @propertycache
738 def _repopath(self):
738 def _repopath(self):
739 return self._path
739 return self._path
740
740
741 def __nonzero__(self):
741 def __nonzero__(self):
742 try:
742 try:
743 self._filenode
743 self._filenode
744 return True
744 return True
745 except error.LookupError:
745 except error.LookupError:
746 # file is missing
746 # file is missing
747 return False
747 return False
748
748
749 __bool__ = __nonzero__
749 __bool__ = __nonzero__
750
750
751 def __bytes__(self):
751 def __bytes__(self):
752 try:
752 try:
753 return "%s@%s" % (self.path(), self._changectx)
753 return "%s@%s" % (self.path(), self._changectx)
754 except error.LookupError:
754 except error.LookupError:
755 return "%s@???" % self.path()
755 return "%s@???" % self.path()
756
756
757 __str__ = encoding.strmethod(__bytes__)
757 __str__ = encoding.strmethod(__bytes__)
758
758
759 def __repr__(self):
759 def __repr__(self):
760 return "<%s %s>" % (type(self).__name__, str(self))
760 return "<%s %s>" % (type(self).__name__, str(self))
761
761
762 def __hash__(self):
762 def __hash__(self):
763 try:
763 try:
764 return hash((self._path, self._filenode))
764 return hash((self._path, self._filenode))
765 except AttributeError:
765 except AttributeError:
766 return id(self)
766 return id(self)
767
767
768 def __eq__(self, other):
768 def __eq__(self, other):
769 try:
769 try:
770 return (type(self) == type(other) and self._path == other._path
770 return (type(self) == type(other) and self._path == other._path
771 and self._filenode == other._filenode)
771 and self._filenode == other._filenode)
772 except AttributeError:
772 except AttributeError:
773 return False
773 return False
774
774
775 def __ne__(self, other):
775 def __ne__(self, other):
776 return not (self == other)
776 return not (self == other)
777
777
778 def filerev(self):
778 def filerev(self):
779 return self._filerev
779 return self._filerev
780 def filenode(self):
780 def filenode(self):
781 return self._filenode
781 return self._filenode
782 @propertycache
782 @propertycache
783 def _flags(self):
783 def _flags(self):
784 return self._changectx.flags(self._path)
784 return self._changectx.flags(self._path)
785 def flags(self):
785 def flags(self):
786 return self._flags
786 return self._flags
787 def filelog(self):
787 def filelog(self):
788 return self._filelog
788 return self._filelog
789 def rev(self):
789 def rev(self):
790 return self._changeid
790 return self._changeid
791 def linkrev(self):
791 def linkrev(self):
792 return self._filelog.linkrev(self._filerev)
792 return self._filelog.linkrev(self._filerev)
793 def node(self):
793 def node(self):
794 return self._changectx.node()
794 return self._changectx.node()
795 def hex(self):
795 def hex(self):
796 return self._changectx.hex()
796 return self._changectx.hex()
797 def user(self):
797 def user(self):
798 return self._changectx.user()
798 return self._changectx.user()
799 def date(self):
799 def date(self):
800 return self._changectx.date()
800 return self._changectx.date()
801 def files(self):
801 def files(self):
802 return self._changectx.files()
802 return self._changectx.files()
803 def description(self):
803 def description(self):
804 return self._changectx.description()
804 return self._changectx.description()
805 def branch(self):
805 def branch(self):
806 return self._changectx.branch()
806 return self._changectx.branch()
807 def extra(self):
807 def extra(self):
808 return self._changectx.extra()
808 return self._changectx.extra()
809 def phase(self):
809 def phase(self):
810 return self._changectx.phase()
810 return self._changectx.phase()
811 def phasestr(self):
811 def phasestr(self):
812 return self._changectx.phasestr()
812 return self._changectx.phasestr()
813 def manifest(self):
813 def manifest(self):
814 return self._changectx.manifest()
814 return self._changectx.manifest()
815 def changectx(self):
815 def changectx(self):
816 return self._changectx
816 return self._changectx
817 def renamed(self):
817 def renamed(self):
818 return self._copied
818 return self._copied
819 def repo(self):
819 def repo(self):
820 return self._repo
820 return self._repo
821 def size(self):
821 def size(self):
822 return len(self.data())
822 return len(self.data())
823
823
824 def path(self):
824 def path(self):
825 return self._path
825 return self._path
826
826
827 def isbinary(self):
827 def isbinary(self):
828 try:
828 try:
829 return util.binary(self.data())
829 return util.binary(self.data())
830 except IOError:
830 except IOError:
831 return False
831 return False
832 def isexec(self):
832 def isexec(self):
833 return 'x' in self.flags()
833 return 'x' in self.flags()
834 def islink(self):
834 def islink(self):
835 return 'l' in self.flags()
835 return 'l' in self.flags()
836
836
837 def isabsent(self):
837 def isabsent(self):
838 """whether this filectx represents a file not in self._changectx
838 """whether this filectx represents a file not in self._changectx
839
839
840 This is mainly for merge code to detect change/delete conflicts. This is
840 This is mainly for merge code to detect change/delete conflicts. This is
841 expected to be True for all subclasses of basectx."""
841 expected to be True for all subclasses of basectx."""
842 return False
842 return False
843
843
844 _customcmp = False
844 _customcmp = False
845 def cmp(self, fctx):
845 def cmp(self, fctx):
846 """compare with other file context
846 """compare with other file context
847
847
848 returns True if different than fctx.
848 returns True if different than fctx.
849 """
849 """
850 if fctx._customcmp:
850 if fctx._customcmp:
851 return fctx.cmp(self)
851 return fctx.cmp(self)
852
852
853 if (fctx._filenode is None
853 if (fctx._filenode is None
854 and (self._repo._encodefilterpats
854 and (self._repo._encodefilterpats
855 # if file data starts with '\1\n', empty metadata block is
855 # if file data starts with '\1\n', empty metadata block is
856 # prepended, which adds 4 bytes to filelog.size().
856 # prepended, which adds 4 bytes to filelog.size().
857 or self.size() - 4 == fctx.size())
857 or self.size() - 4 == fctx.size())
858 or self.size() == fctx.size()):
858 or self.size() == fctx.size()):
859 return self._filelog.cmp(self._filenode, fctx.data())
859 return self._filelog.cmp(self._filenode, fctx.data())
860
860
861 return True
861 return True
862
862
863 def _adjustlinkrev(self, srcrev, inclusive=False):
863 def _adjustlinkrev(self, srcrev, inclusive=False):
864 """return the first ancestor of <srcrev> introducing <fnode>
864 """return the first ancestor of <srcrev> introducing <fnode>
865
865
866 If the linkrev of the file revision does not point to an ancestor of
866 If the linkrev of the file revision does not point to an ancestor of
867 srcrev, we'll walk down the ancestors until we find one introducing
867 srcrev, we'll walk down the ancestors until we find one introducing
868 this file revision.
868 this file revision.
869
869
870 :srcrev: the changeset revision we search ancestors from
870 :srcrev: the changeset revision we search ancestors from
871 :inclusive: if true, the src revision will also be checked
871 :inclusive: if true, the src revision will also be checked
872 """
872 """
873 repo = self._repo
873 repo = self._repo
874 cl = repo.unfiltered().changelog
874 cl = repo.unfiltered().changelog
875 mfl = repo.manifestlog
875 mfl = repo.manifestlog
876 # fetch the linkrev
876 # fetch the linkrev
877 lkr = self.linkrev()
877 lkr = self.linkrev()
878 # hack to reuse ancestor computation when searching for renames
878 # hack to reuse ancestor computation when searching for renames
879 memberanc = getattr(self, '_ancestrycontext', None)
879 memberanc = getattr(self, '_ancestrycontext', None)
880 iteranc = None
880 iteranc = None
881 if srcrev is None:
881 if srcrev is None:
882 # wctx case, used by workingfilectx during mergecopy
882 # wctx case, used by workingfilectx during mergecopy
883 revs = [p.rev() for p in self._repo[None].parents()]
883 revs = [p.rev() for p in self._repo[None].parents()]
884 inclusive = True # we skipped the real (revless) source
884 inclusive = True # we skipped the real (revless) source
885 else:
885 else:
886 revs = [srcrev]
886 revs = [srcrev]
887 if memberanc is None:
887 if memberanc is None:
888 memberanc = iteranc = cl.ancestors(revs, lkr,
888 memberanc = iteranc = cl.ancestors(revs, lkr,
889 inclusive=inclusive)
889 inclusive=inclusive)
890 # check if this linkrev is an ancestor of srcrev
890 # check if this linkrev is an ancestor of srcrev
891 if lkr not in memberanc:
891 if lkr not in memberanc:
892 if iteranc is None:
892 if iteranc is None:
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 fnode = self._filenode
894 fnode = self._filenode
895 path = self._path
895 path = self._path
896 for a in iteranc:
896 for a in iteranc:
897 ac = cl.read(a) # get changeset data (we avoid object creation)
897 ac = cl.read(a) # get changeset data (we avoid object creation)
898 if path in ac[3]: # checking the 'files' field.
898 if path in ac[3]: # checking the 'files' field.
899 # The file has been touched, check if the content is
899 # The file has been touched, check if the content is
900 # similar to the one we search for.
900 # similar to the one we search for.
901 if fnode == mfl[ac[0]].readfast().get(path):
901 if fnode == mfl[ac[0]].readfast().get(path):
902 return a
902 return a
903 # In theory, we should never get out of that loop without a result.
903 # In theory, we should never get out of that loop without a result.
904 # But if manifest uses a buggy file revision (not children of the
904 # But if manifest uses a buggy file revision (not children of the
905 # one it replaces) we could. Such a buggy situation will likely
905 # one it replaces) we could. Such a buggy situation will likely
906 # result is crash somewhere else at to some point.
906 # result is crash somewhere else at to some point.
907 return lkr
907 return lkr
908
908
909 def introrev(self):
909 def introrev(self):
910 """return the rev of the changeset which introduced this file revision
910 """return the rev of the changeset which introduced this file revision
911
911
912 This method is different from linkrev because it take into account the
912 This method is different from linkrev because it take into account the
913 changeset the filectx was created from. It ensures the returned
913 changeset the filectx was created from. It ensures the returned
914 revision is one of its ancestors. This prevents bugs from
914 revision is one of its ancestors. This prevents bugs from
915 'linkrev-shadowing' when a file revision is used by multiple
915 'linkrev-shadowing' when a file revision is used by multiple
916 changesets.
916 changesets.
917 """
917 """
918 lkr = self.linkrev()
918 lkr = self.linkrev()
919 attrs = vars(self)
919 attrs = vars(self)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 if noctx or self.rev() == lkr:
921 if noctx or self.rev() == lkr:
922 return self.linkrev()
922 return self.linkrev()
923 return self._adjustlinkrev(self.rev(), inclusive=True)
923 return self._adjustlinkrev(self.rev(), inclusive=True)
924
924
925 def _parentfilectx(self, path, fileid, filelog):
925 def _parentfilectx(self, path, fileid, filelog):
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 # If self is associated with a changeset (probably explicitly
929 # If self is associated with a changeset (probably explicitly
930 # fed), ensure the created filectx is associated with a
930 # fed), ensure the created filectx is associated with a
931 # changeset that is an ancestor of self.changectx.
931 # changeset that is an ancestor of self.changectx.
932 # This lets us later use _adjustlinkrev to get a correct link.
932 # This lets us later use _adjustlinkrev to get a correct link.
933 fctx._descendantrev = self.rev()
933 fctx._descendantrev = self.rev()
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 elif '_descendantrev' in vars(self):
935 elif '_descendantrev' in vars(self):
936 # Otherwise propagate _descendantrev if we have one associated.
936 # Otherwise propagate _descendantrev if we have one associated.
937 fctx._descendantrev = self._descendantrev
937 fctx._descendantrev = self._descendantrev
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 return fctx
939 return fctx
940
940
941 def parents(self):
941 def parents(self):
942 _path = self._path
942 _path = self._path
943 fl = self._filelog
943 fl = self._filelog
944 parents = self._filelog.parents(self._filenode)
944 parents = self._filelog.parents(self._filenode)
945 pl = [(_path, node, fl) for node in parents if node != nullid]
945 pl = [(_path, node, fl) for node in parents if node != nullid]
946
946
947 r = fl.renamed(self._filenode)
947 r = fl.renamed(self._filenode)
948 if r:
948 if r:
949 # - In the simple rename case, both parent are nullid, pl is empty.
949 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In case of merge, only one of the parent is null id and should
950 # - In case of merge, only one of the parent is null id and should
951 # be replaced with the rename information. This parent is -always-
951 # be replaced with the rename information. This parent is -always-
952 # the first one.
952 # the first one.
953 #
953 #
954 # As null id have always been filtered out in the previous list
954 # As null id have always been filtered out in the previous list
955 # comprehension, inserting to 0 will always result in "replacing
955 # comprehension, inserting to 0 will always result in "replacing
956 # first nullid parent with rename information.
956 # first nullid parent with rename information.
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958
958
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960
960
961 def p1(self):
961 def p1(self):
962 return self.parents()[0]
962 return self.parents()[0]
963
963
964 def p2(self):
964 def p2(self):
965 p = self.parents()
965 p = self.parents()
966 if len(p) == 2:
966 if len(p) == 2:
967 return p[1]
967 return p[1]
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969
969
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 diffopts=None):
971 diffopts=None):
972 '''returns a list of tuples of ((ctx, number), line) for each line
972 '''returns a list of tuples of ((ctx, number), line) for each line
973 in the file, where ctx is the filectx of the node where
973 in the file, where ctx is the filectx of the node where
974 that line was last changed; if linenumber parameter is true, number is
974 that line was last changed; if linenumber parameter is true, number is
975 the line number at the first appearance in the managed file, otherwise,
975 the line number at the first appearance in the managed file, otherwise,
976 number has a fixed value of False.
976 number has a fixed value of False.
977 '''
977 '''
978
978
979 def lines(text):
979 def lines(text):
980 if text.endswith("\n"):
980 if text.endswith("\n"):
981 return text.count("\n")
981 return text.count("\n")
982 return text.count("\n") + int(bool(text))
982 return text.count("\n") + int(bool(text))
983
983
984 if linenumber:
984 if linenumber:
985 def decorate(text, rev):
985 def decorate(text, rev):
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 else:
987 else:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([(rev, False)] * lines(text), text)
989 return ([(rev, False)] * lines(text), text)
990
990
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992
992
993 def parents(f):
993 def parents(f):
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 # isn't an ancestor of the srcrev.
997 # isn't an ancestor of the srcrev.
998 f._changeid
998 f._changeid
999 pl = f.parents()
999 pl = f.parents()
1000
1000
1001 # Don't return renamed parents if we aren't following.
1001 # Don't return renamed parents if we aren't following.
1002 if not follow:
1002 if not follow:
1003 pl = [p for p in pl if p.path() == f.path()]
1003 pl = [p for p in pl if p.path() == f.path()]
1004
1004
1005 # renamed filectx won't have a filelog yet, so set it
1005 # renamed filectx won't have a filelog yet, so set it
1006 # from the cache to save time
1006 # from the cache to save time
1007 for p in pl:
1007 for p in pl:
1008 if not '_filelog' in p.__dict__:
1008 if not '_filelog' in p.__dict__:
1009 p._filelog = getlog(p.path())
1009 p._filelog = getlog(p.path())
1010
1010
1011 return pl
1011 return pl
1012
1012
1013 # use linkrev to find the first changeset where self appeared
1013 # use linkrev to find the first changeset where self appeared
1014 base = self
1014 base = self
1015 introrev = self.introrev()
1015 introrev = self.introrev()
1016 if self.rev() != introrev:
1016 if self.rev() != introrev:
1017 base = self.filectx(self.filenode(), changeid=introrev)
1017 base = self.filectx(self.filenode(), changeid=introrev)
1018 if getattr(base, '_ancestrycontext', None) is None:
1018 if getattr(base, '_ancestrycontext', None) is None:
1019 cl = self._repo.changelog
1019 cl = self._repo.changelog
1020 if introrev is None:
1020 if introrev is None:
1021 # wctx is not inclusive, but works because _ancestrycontext
1021 # wctx is not inclusive, but works because _ancestrycontext
1022 # is used to test filelog revisions
1022 # is used to test filelog revisions
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 inclusive=True)
1024 inclusive=True)
1025 else:
1025 else:
1026 ac = cl.ancestors([introrev], inclusive=True)
1026 ac = cl.ancestors([introrev], inclusive=True)
1027 base._ancestrycontext = ac
1027 base._ancestrycontext = ac
1028
1028
1029 # This algorithm would prefer to be recursive, but Python is a
1029 # This algorithm would prefer to be recursive, but Python is a
1030 # bit recursion-hostile. Instead we do an iterative
1030 # bit recursion-hostile. Instead we do an iterative
1031 # depth-first search.
1031 # depth-first search.
1032
1032
1033 # 1st DFS pre-calculates pcache and needed
1033 # 1st DFS pre-calculates pcache and needed
1034 visit = [base]
1034 visit = [base]
1035 pcache = {}
1035 pcache = {}
1036 needed = {base: 1}
1036 needed = {base: 1}
1037 while visit:
1037 while visit:
1038 f = visit.pop()
1038 f = visit.pop()
1039 if f in pcache:
1039 if f in pcache:
1040 continue
1040 continue
1041 pl = parents(f)
1041 pl = parents(f)
1042 pcache[f] = pl
1042 pcache[f] = pl
1043 for p in pl:
1043 for p in pl:
1044 needed[p] = needed.get(p, 0) + 1
1044 needed[p] = needed.get(p, 0) + 1
1045 if p not in pcache:
1045 if p not in pcache:
1046 visit.append(p)
1046 visit.append(p)
1047
1047
1048 # 2nd DFS does the actual annotate
1048 # 2nd DFS does the actual annotate
1049 visit[:] = [base]
1049 visit[:] = [base]
1050 hist = {}
1050 hist = {}
1051 while visit:
1051 while visit:
1052 f = visit[-1]
1052 f = visit[-1]
1053 if f in hist:
1053 if f in hist:
1054 visit.pop()
1054 visit.pop()
1055 continue
1055 continue
1056
1056
1057 ready = True
1057 ready = True
1058 pl = pcache[f]
1058 pl = pcache[f]
1059 for p in pl:
1059 for p in pl:
1060 if p not in hist:
1060 if p not in hist:
1061 ready = False
1061 ready = False
1062 visit.append(p)
1062 visit.append(p)
1063 if ready:
1063 if ready:
1064 visit.pop()
1064 visit.pop()
1065 curr = decorate(f.data(), f)
1065 curr = decorate(f.data(), f)
1066 skipchild = False
1066 skipchild = False
1067 if skiprevs is not None:
1067 if skiprevs is not None:
1068 skipchild = f._changeid in skiprevs
1068 skipchild = f._changeid in skiprevs
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 diffopts)
1070 diffopts)
1071 for p in pl:
1071 for p in pl:
1072 if needed[p] == 1:
1072 if needed[p] == 1:
1073 del hist[p]
1073 del hist[p]
1074 del needed[p]
1074 del needed[p]
1075 else:
1075 else:
1076 needed[p] -= 1
1076 needed[p] -= 1
1077
1077
1078 hist[f] = curr
1078 hist[f] = curr
1079 del pcache[f]
1079 del pcache[f]
1080
1080
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1082
1082
1083 def ancestors(self, followfirst=False):
1083 def ancestors(self, followfirst=False):
1084 visit = {}
1084 visit = {}
1085 c = self
1085 c = self
1086 if followfirst:
1086 if followfirst:
1087 cut = 1
1087 cut = 1
1088 else:
1088 else:
1089 cut = None
1089 cut = None
1090
1090
1091 while True:
1091 while True:
1092 for parent in c.parents()[:cut]:
1092 for parent in c.parents()[:cut]:
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1094 if not visit:
1094 if not visit:
1095 break
1095 break
1096 c = visit.pop(max(visit))
1096 c = visit.pop(max(visit))
1097 yield c
1097 yield c
1098
1098
1099 def decodeddata(self):
1099 def decodeddata(self):
1100 """Returns `data()` after running repository decoding filters.
1100 """Returns `data()` after running repository decoding filters.
1101
1101
1102 This is often equivalent to how the data would be expressed on disk.
1102 This is often equivalent to how the data would be expressed on disk.
1103 """
1103 """
1104 return self._repo.wwritedata(self.path(), self.data())
1104 return self._repo.wwritedata(self.path(), self.data())
1105
1105
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1107 r'''
1107 r'''
1108 Given parent and child fctxes and annotate data for parents, for all lines
1108 Given parent and child fctxes and annotate data for parents, for all lines
1109 in either parent that match the child, annotate the child with the parent's
1109 in either parent that match the child, annotate the child with the parent's
1110 data.
1110 data.
1111
1111
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1113 annotate data as well such that child is never blamed for any lines.
1113 annotate data as well such that child is never blamed for any lines.
1114
1114
1115 >>> oldfctx = b'old'
1115 >>> oldfctx = b'old'
1116 >>> p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
1116 >>> p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
1117 >>> olddata = b'a\nb\n'
1117 >>> olddata = b'a\nb\n'
1118 >>> p1data = b'a\nb\nc\n'
1118 >>> p1data = b'a\nb\nc\n'
1119 >>> p2data = b'a\nc\nd\n'
1119 >>> p2data = b'a\nc\nd\n'
1120 >>> childdata = b'a\nb2\nc\nc2\nd\n'
1120 >>> childdata = b'a\nb2\nc\nc2\nd\n'
1121 >>> diffopts = mdiff.diffopts()
1121 >>> diffopts = mdiff.diffopts()
1122
1122
1123 >>> def decorate(text, rev):
1123 >>> def decorate(text, rev):
1124 ... return ([(rev, i) for i in xrange(1, text.count(b'\n') + 1)], text)
1124 ... return ([(rev, i) for i in xrange(1, text.count(b'\n') + 1)], text)
1125
1125
1126 Basic usage:
1126 Basic usage:
1127
1127
1128 >>> oldann = decorate(olddata, oldfctx)
1128 >>> oldann = decorate(olddata, oldfctx)
1129 >>> p1ann = decorate(p1data, p1fctx)
1129 >>> p1ann = decorate(p1data, p1fctx)
1130 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1130 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1131 >>> p1ann[0]
1131 >>> p1ann[0]
1132 [('old', 1), ('old', 2), ('p1', 3)]
1132 [('old', 1), ('old', 2), ('p1', 3)]
1133 >>> p2ann = decorate(p2data, p2fctx)
1133 >>> p2ann = decorate(p2data, p2fctx)
1134 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1134 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1135 >>> p2ann[0]
1135 >>> p2ann[0]
1136 [('old', 1), ('p2', 2), ('p2', 3)]
1136 [('old', 1), ('p2', 2), ('p2', 3)]
1137
1137
1138 Test with multiple parents (note the difference caused by ordering):
1138 Test with multiple parents (note the difference caused by ordering):
1139
1139
1140 >>> childann = decorate(childdata, childfctx)
1140 >>> childann = decorate(childdata, childfctx)
1141 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1141 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1142 ... diffopts)
1142 ... diffopts)
1143 >>> childann[0]
1143 >>> childann[0]
1144 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1144 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1145
1145
1146 >>> childann = decorate(childdata, childfctx)
1146 >>> childann = decorate(childdata, childfctx)
1147 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1147 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1148 ... diffopts)
1148 ... diffopts)
1149 >>> childann[0]
1149 >>> childann[0]
1150 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1150 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1151
1151
1152 Test with skipchild (note the difference caused by ordering):
1152 Test with skipchild (note the difference caused by ordering):
1153
1153
1154 >>> childann = decorate(childdata, childfctx)
1154 >>> childann = decorate(childdata, childfctx)
1155 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1155 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1156 ... diffopts)
1156 ... diffopts)
1157 >>> childann[0]
1157 >>> childann[0]
1158 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1158 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1159
1159
1160 >>> childann = decorate(childdata, childfctx)
1160 >>> childann = decorate(childdata, childfctx)
1161 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1161 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1162 ... diffopts)
1162 ... diffopts)
1163 >>> childann[0]
1163 >>> childann[0]
1164 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1164 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1165 '''
1165 '''
1166 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1166 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1167 for parent in parents]
1167 for parent in parents]
1168
1168
1169 if skipchild:
1169 if skipchild:
1170 # Need to iterate over the blocks twice -- make it a list
1170 # Need to iterate over the blocks twice -- make it a list
1171 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1171 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1172 # Mercurial currently prefers p2 over p1 for annotate.
1172 # Mercurial currently prefers p2 over p1 for annotate.
1173 # TODO: change this?
1173 # TODO: change this?
1174 for parent, blocks in pblocks:
1174 for parent, blocks in pblocks:
1175 for (a1, a2, b1, b2), t in blocks:
1175 for (a1, a2, b1, b2), t in blocks:
1176 # Changed blocks ('!') or blocks made only of blank lines ('~')
1176 # Changed blocks ('!') or blocks made only of blank lines ('~')
1177 # belong to the child.
1177 # belong to the child.
1178 if t == '=':
1178 if t == '=':
1179 child[0][b1:b2] = parent[0][a1:a2]
1179 child[0][b1:b2] = parent[0][a1:a2]
1180
1180
1181 if skipchild:
1181 if skipchild:
1182 # Now try and match up anything that couldn't be matched,
1182 # Now try and match up anything that couldn't be matched,
1183 # Reversing pblocks maintains bias towards p2, matching above
1183 # Reversing pblocks maintains bias towards p2, matching above
1184 # behavior.
1184 # behavior.
1185 pblocks.reverse()
1185 pblocks.reverse()
1186
1186
1187 # The heuristics are:
1187 # The heuristics are:
1188 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1188 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1189 # This could potentially be smarter but works well enough.
1189 # This could potentially be smarter but works well enough.
1190 # * For a non-matching section, do a best-effort fit. Match lines in
1190 # * For a non-matching section, do a best-effort fit. Match lines in
1191 # diff hunks 1:1, dropping lines as necessary.
1191 # diff hunks 1:1, dropping lines as necessary.
1192 # * Repeat the last line as a last resort.
1192 # * Repeat the last line as a last resort.
1193
1193
1194 # First, replace as much as possible without repeating the last line.
1194 # First, replace as much as possible without repeating the last line.
1195 remaining = [(parent, []) for parent, _blocks in pblocks]
1195 remaining = [(parent, []) for parent, _blocks in pblocks]
1196 for idx, (parent, blocks) in enumerate(pblocks):
1196 for idx, (parent, blocks) in enumerate(pblocks):
1197 for (a1, a2, b1, b2), _t in blocks:
1197 for (a1, a2, b1, b2), _t in blocks:
1198 if a2 - a1 >= b2 - b1:
1198 if a2 - a1 >= b2 - b1:
1199 for bk in xrange(b1, b2):
1199 for bk in xrange(b1, b2):
1200 if child[0][bk][0] == childfctx:
1200 if child[0][bk][0] == childfctx:
1201 ak = min(a1 + (bk - b1), a2 - 1)
1201 ak = min(a1 + (bk - b1), a2 - 1)
1202 child[0][bk] = parent[0][ak]
1202 child[0][bk] = parent[0][ak]
1203 else:
1203 else:
1204 remaining[idx][1].append((a1, a2, b1, b2))
1204 remaining[idx][1].append((a1, a2, b1, b2))
1205
1205
1206 # Then, look at anything left, which might involve repeating the last
1206 # Then, look at anything left, which might involve repeating the last
1207 # line.
1207 # line.
1208 for parent, blocks in remaining:
1208 for parent, blocks in remaining:
1209 for a1, a2, b1, b2 in blocks:
1209 for a1, a2, b1, b2 in blocks:
1210 for bk in xrange(b1, b2):
1210 for bk in xrange(b1, b2):
1211 if child[0][bk][0] == childfctx:
1211 if child[0][bk][0] == childfctx:
1212 ak = min(a1 + (bk - b1), a2 - 1)
1212 ak = min(a1 + (bk - b1), a2 - 1)
1213 child[0][bk] = parent[0][ak]
1213 child[0][bk] = parent[0][ak]
1214 return child
1214 return child
1215
1215
1216 class filectx(basefilectx):
1216 class filectx(basefilectx):
1217 """A filecontext object makes access to data related to a particular
1217 """A filecontext object makes access to data related to a particular
1218 filerevision convenient."""
1218 filerevision convenient."""
1219 def __init__(self, repo, path, changeid=None, fileid=None,
1219 def __init__(self, repo, path, changeid=None, fileid=None,
1220 filelog=None, changectx=None):
1220 filelog=None, changectx=None):
1221 """changeid can be a changeset revision, node, or tag.
1221 """changeid can be a changeset revision, node, or tag.
1222 fileid can be a file revision or node."""
1222 fileid can be a file revision or node."""
1223 self._repo = repo
1223 self._repo = repo
1224 self._path = path
1224 self._path = path
1225
1225
1226 assert (changeid is not None
1226 assert (changeid is not None
1227 or fileid is not None
1227 or fileid is not None
1228 or changectx is not None), \
1228 or changectx is not None), \
1229 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1229 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1230 % (changeid, fileid, changectx))
1230 % (changeid, fileid, changectx))
1231
1231
1232 if filelog is not None:
1232 if filelog is not None:
1233 self._filelog = filelog
1233 self._filelog = filelog
1234
1234
1235 if changeid is not None:
1235 if changeid is not None:
1236 self._changeid = changeid
1236 self._changeid = changeid
1237 if changectx is not None:
1237 if changectx is not None:
1238 self._changectx = changectx
1238 self._changectx = changectx
1239 if fileid is not None:
1239 if fileid is not None:
1240 self._fileid = fileid
1240 self._fileid = fileid
1241
1241
1242 @propertycache
1242 @propertycache
1243 def _changectx(self):
1243 def _changectx(self):
1244 try:
1244 try:
1245 return changectx(self._repo, self._changeid)
1245 return changectx(self._repo, self._changeid)
1246 except error.FilteredRepoLookupError:
1246 except error.FilteredRepoLookupError:
1247 # Linkrev may point to any revision in the repository. When the
1247 # Linkrev may point to any revision in the repository. When the
1248 # repository is filtered this may lead to `filectx` trying to build
1248 # repository is filtered this may lead to `filectx` trying to build
1249 # `changectx` for filtered revision. In such case we fallback to
1249 # `changectx` for filtered revision. In such case we fallback to
1250 # creating `changectx` on the unfiltered version of the reposition.
1250 # creating `changectx` on the unfiltered version of the reposition.
1251 # This fallback should not be an issue because `changectx` from
1251 # This fallback should not be an issue because `changectx` from
1252 # `filectx` are not used in complex operations that care about
1252 # `filectx` are not used in complex operations that care about
1253 # filtering.
1253 # filtering.
1254 #
1254 #
1255 # This fallback is a cheap and dirty fix that prevent several
1255 # This fallback is a cheap and dirty fix that prevent several
1256 # crashes. It does not ensure the behavior is correct. However the
1256 # crashes. It does not ensure the behavior is correct. However the
1257 # behavior was not correct before filtering either and "incorrect
1257 # behavior was not correct before filtering either and "incorrect
1258 # behavior" is seen as better as "crash"
1258 # behavior" is seen as better as "crash"
1259 #
1259 #
1260 # Linkrevs have several serious troubles with filtering that are
1260 # Linkrevs have several serious troubles with filtering that are
1261 # complicated to solve. Proper handling of the issue here should be
1261 # complicated to solve. Proper handling of the issue here should be
1262 # considered when solving linkrev issue are on the table.
1262 # considered when solving linkrev issue are on the table.
1263 return changectx(self._repo.unfiltered(), self._changeid)
1263 return changectx(self._repo.unfiltered(), self._changeid)
1264
1264
1265 def filectx(self, fileid, changeid=None):
1265 def filectx(self, fileid, changeid=None):
1266 '''opens an arbitrary revision of the file without
1266 '''opens an arbitrary revision of the file without
1267 opening a new filelog'''
1267 opening a new filelog'''
1268 return filectx(self._repo, self._path, fileid=fileid,
1268 return filectx(self._repo, self._path, fileid=fileid,
1269 filelog=self._filelog, changeid=changeid)
1269 filelog=self._filelog, changeid=changeid)
1270
1270
1271 def rawdata(self):
1271 def rawdata(self):
1272 return self._filelog.revision(self._filenode, raw=True)
1272 return self._filelog.revision(self._filenode, raw=True)
1273
1273
1274 def rawflags(self):
1274 def rawflags(self):
1275 """low-level revlog flags"""
1275 """low-level revlog flags"""
1276 return self._filelog.flags(self._filerev)
1276 return self._filelog.flags(self._filerev)
1277
1277
1278 def data(self):
1278 def data(self):
1279 try:
1279 try:
1280 return self._filelog.read(self._filenode)
1280 return self._filelog.read(self._filenode)
1281 except error.CensoredNodeError:
1281 except error.CensoredNodeError:
1282 if self._repo.ui.config("censor", "policy") == "ignore":
1282 if self._repo.ui.config("censor", "policy") == "ignore":
1283 return ""
1283 return ""
1284 raise error.Abort(_("censored node: %s") % short(self._filenode),
1284 raise error.Abort(_("censored node: %s") % short(self._filenode),
1285 hint=_("set censor.policy to ignore errors"))
1285 hint=_("set censor.policy to ignore errors"))
1286
1286
1287 def size(self):
1287 def size(self):
1288 return self._filelog.size(self._filerev)
1288 return self._filelog.size(self._filerev)
1289
1289
1290 @propertycache
1290 @propertycache
1291 def _copied(self):
1291 def _copied(self):
1292 """check if file was actually renamed in this changeset revision
1292 """check if file was actually renamed in this changeset revision
1293
1293
1294 If rename logged in file revision, we report copy for changeset only
1294 If rename logged in file revision, we report copy for changeset only
1295 if file revisions linkrev points back to the changeset in question
1295 if file revisions linkrev points back to the changeset in question
1296 or both changeset parents contain different file revisions.
1296 or both changeset parents contain different file revisions.
1297 """
1297 """
1298
1298
1299 renamed = self._filelog.renamed(self._filenode)
1299 renamed = self._filelog.renamed(self._filenode)
1300 if not renamed:
1300 if not renamed:
1301 return renamed
1301 return renamed
1302
1302
1303 if self.rev() == self.linkrev():
1303 if self.rev() == self.linkrev():
1304 return renamed
1304 return renamed
1305
1305
1306 name = self.path()
1306 name = self.path()
1307 fnode = self._filenode
1307 fnode = self._filenode
1308 for p in self._changectx.parents():
1308 for p in self._changectx.parents():
1309 try:
1309 try:
1310 if fnode == p.filenode(name):
1310 if fnode == p.filenode(name):
1311 return None
1311 return None
1312 except error.LookupError:
1312 except error.LookupError:
1313 pass
1313 pass
1314 return renamed
1314 return renamed
1315
1315
1316 def children(self):
1316 def children(self):
1317 # hard for renames
1317 # hard for renames
1318 c = self._filelog.children(self._filenode)
1318 c = self._filelog.children(self._filenode)
1319 return [filectx(self._repo, self._path, fileid=x,
1319 return [filectx(self._repo, self._path, fileid=x,
1320 filelog=self._filelog) for x in c]
1320 filelog=self._filelog) for x in c]
1321
1321
1322 class committablectx(basectx):
1322 class committablectx(basectx):
1323 """A committablectx object provides common functionality for a context that
1323 """A committablectx object provides common functionality for a context that
1324 wants the ability to commit, e.g. workingctx or memctx."""
1324 wants the ability to commit, e.g. workingctx or memctx."""
1325 def __init__(self, repo, text="", user=None, date=None, extra=None,
1325 def __init__(self, repo, text="", user=None, date=None, extra=None,
1326 changes=None):
1326 changes=None):
1327 self._repo = repo
1327 self._repo = repo
1328 self._rev = None
1328 self._rev = None
1329 self._node = None
1329 self._node = None
1330 self._text = text
1330 self._text = text
1331 if date:
1331 if date:
1332 self._date = util.parsedate(date)
1332 self._date = util.parsedate(date)
1333 if user:
1333 if user:
1334 self._user = user
1334 self._user = user
1335 if changes:
1335 if changes:
1336 self._status = changes
1336 self._status = changes
1337
1337
1338 self._extra = {}
1338 self._extra = {}
1339 if extra:
1339 if extra:
1340 self._extra = extra.copy()
1340 self._extra = extra.copy()
1341 if 'branch' not in self._extra:
1341 if 'branch' not in self._extra:
1342 try:
1342 try:
1343 branch = encoding.fromlocal(self._repo.dirstate.branch())
1343 branch = encoding.fromlocal(self._repo.dirstate.branch())
1344 except UnicodeDecodeError:
1344 except UnicodeDecodeError:
1345 raise error.Abort(_('branch name not in UTF-8!'))
1345 raise error.Abort(_('branch name not in UTF-8!'))
1346 self._extra['branch'] = branch
1346 self._extra['branch'] = branch
1347 if self._extra['branch'] == '':
1347 if self._extra['branch'] == '':
1348 self._extra['branch'] = 'default'
1348 self._extra['branch'] = 'default'
1349
1349
1350 def __bytes__(self):
1350 def __bytes__(self):
1351 return bytes(self._parents[0]) + "+"
1351 return bytes(self._parents[0]) + "+"
1352
1352
1353 __str__ = encoding.strmethod(__bytes__)
1353 __str__ = encoding.strmethod(__bytes__)
1354
1354
1355 def __nonzero__(self):
1355 def __nonzero__(self):
1356 return True
1356 return True
1357
1357
1358 __bool__ = __nonzero__
1358 __bool__ = __nonzero__
1359
1359
1360 def _buildflagfunc(self):
1360 def _buildflagfunc(self):
1361 # Create a fallback function for getting file flags when the
1361 # Create a fallback function for getting file flags when the
1362 # filesystem doesn't support them
1362 # filesystem doesn't support them
1363
1363
1364 copiesget = self._repo.dirstate.copies().get
1364 copiesget = self._repo.dirstate.copies().get
1365 parents = self.parents()
1365 parents = self.parents()
1366 if len(parents) < 2:
1366 if len(parents) < 2:
1367 # when we have one parent, it's easy: copy from parent
1367 # when we have one parent, it's easy: copy from parent
1368 man = parents[0].manifest()
1368 man = parents[0].manifest()
1369 def func(f):
1369 def func(f):
1370 f = copiesget(f, f)
1370 f = copiesget(f, f)
1371 return man.flags(f)
1371 return man.flags(f)
1372 else:
1372 else:
1373 # merges are tricky: we try to reconstruct the unstored
1373 # merges are tricky: we try to reconstruct the unstored
1374 # result from the merge (issue1802)
1374 # result from the merge (issue1802)
1375 p1, p2 = parents
1375 p1, p2 = parents
1376 pa = p1.ancestor(p2)
1376 pa = p1.ancestor(p2)
1377 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1377 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1378
1378
1379 def func(f):
1379 def func(f):
1380 f = copiesget(f, f) # may be wrong for merges with copies
1380 f = copiesget(f, f) # may be wrong for merges with copies
1381 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1381 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1382 if fl1 == fl2:
1382 if fl1 == fl2:
1383 return fl1
1383 return fl1
1384 if fl1 == fla:
1384 if fl1 == fla:
1385 return fl2
1385 return fl2
1386 if fl2 == fla:
1386 if fl2 == fla:
1387 return fl1
1387 return fl1
1388 return '' # punt for conflicts
1388 return '' # punt for conflicts
1389
1389
1390 return func
1390 return func
1391
1391
1392 @propertycache
1392 @propertycache
1393 def _flagfunc(self):
1393 def _flagfunc(self):
1394 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1394 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1395
1395
1396 @propertycache
1396 @propertycache
1397 def _status(self):
1397 def _status(self):
1398 return self._repo.status()
1398 return self._repo.status()
1399
1399
1400 @propertycache
1400 @propertycache
1401 def _user(self):
1401 def _user(self):
1402 return self._repo.ui.username()
1402 return self._repo.ui.username()
1403
1403
1404 @propertycache
1404 @propertycache
1405 def _date(self):
1405 def _date(self):
1406 ui = self._repo.ui
1406 ui = self._repo.ui
1407 date = ui.configdate('devel', 'default-date')
1407 date = ui.configdate('devel', 'default-date')
1408 if date is None:
1408 if date is None:
1409 date = util.makedate()
1409 date = util.makedate()
1410 return date
1410 return date
1411
1411
1412 def subrev(self, subpath):
1412 def subrev(self, subpath):
1413 return None
1413 return None
1414
1414
1415 def manifestnode(self):
1415 def manifestnode(self):
1416 return None
1416 return None
1417 def user(self):
1417 def user(self):
1418 return self._user or self._repo.ui.username()
1418 return self._user or self._repo.ui.username()
1419 def date(self):
1419 def date(self):
1420 return self._date
1420 return self._date
1421 def description(self):
1421 def description(self):
1422 return self._text
1422 return self._text
1423 def files(self):
1423 def files(self):
1424 return sorted(self._status.modified + self._status.added +
1424 return sorted(self._status.modified + self._status.added +
1425 self._status.removed)
1425 self._status.removed)
1426
1426
1427 def modified(self):
1427 def modified(self):
1428 return self._status.modified
1428 return self._status.modified
1429 def added(self):
1429 def added(self):
1430 return self._status.added
1430 return self._status.added
1431 def removed(self):
1431 def removed(self):
1432 return self._status.removed
1432 return self._status.removed
1433 def deleted(self):
1433 def deleted(self):
1434 return self._status.deleted
1434 return self._status.deleted
1435 def branch(self):
1435 def branch(self):
1436 return encoding.tolocal(self._extra['branch'])
1436 return encoding.tolocal(self._extra['branch'])
1437 def closesbranch(self):
1437 def closesbranch(self):
1438 return 'close' in self._extra
1438 return 'close' in self._extra
1439 def extra(self):
1439 def extra(self):
1440 return self._extra
1440 return self._extra
1441
1441
1442 def tags(self):
1442 def tags(self):
1443 return []
1443 return []
1444
1444
1445 def bookmarks(self):
1445 def bookmarks(self):
1446 b = []
1446 b = []
1447 for p in self.parents():
1447 for p in self.parents():
1448 b.extend(p.bookmarks())
1448 b.extend(p.bookmarks())
1449 return b
1449 return b
1450
1450
1451 def phase(self):
1451 def phase(self):
1452 phase = phases.draft # default phase to draft
1452 phase = phases.draft # default phase to draft
1453 for p in self.parents():
1453 for p in self.parents():
1454 phase = max(phase, p.phase())
1454 phase = max(phase, p.phase())
1455 return phase
1455 return phase
1456
1456
1457 def hidden(self):
1457 def hidden(self):
1458 return False
1458 return False
1459
1459
1460 def children(self):
1460 def children(self):
1461 return []
1461 return []
1462
1462
1463 def flags(self, path):
1463 def flags(self, path):
1464 if r'_manifest' in self.__dict__:
1464 if r'_manifest' in self.__dict__:
1465 try:
1465 try:
1466 return self._manifest.flags(path)
1466 return self._manifest.flags(path)
1467 except KeyError:
1467 except KeyError:
1468 return ''
1468 return ''
1469
1469
1470 try:
1470 try:
1471 return self._flagfunc(path)
1471 return self._flagfunc(path)
1472 except OSError:
1472 except OSError:
1473 return ''
1473 return ''
1474
1474
1475 def ancestor(self, c2):
1475 def ancestor(self, c2):
1476 """return the "best" ancestor context of self and c2"""
1476 """return the "best" ancestor context of self and c2"""
1477 return self._parents[0].ancestor(c2) # punt on two parents for now
1477 return self._parents[0].ancestor(c2) # punt on two parents for now
1478
1478
1479 def walk(self, match):
1479 def walk(self, match):
1480 '''Generates matching file names.'''
1480 '''Generates matching file names.'''
1481 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1481 return sorted(self._repo.dirstate.walk(match,
1482 True, False))
1482 subrepos=sorted(self.substate),
1483 unknown=True, ignored=False))
1483
1484
1484 def matches(self, match):
1485 def matches(self, match):
1485 return sorted(self._repo.dirstate.matches(match))
1486 return sorted(self._repo.dirstate.matches(match))
1486
1487
1487 def ancestors(self):
1488 def ancestors(self):
1488 for p in self._parents:
1489 for p in self._parents:
1489 yield p
1490 yield p
1490 for a in self._repo.changelog.ancestors(
1491 for a in self._repo.changelog.ancestors(
1491 [p.rev() for p in self._parents]):
1492 [p.rev() for p in self._parents]):
1492 yield changectx(self._repo, a)
1493 yield changectx(self._repo, a)
1493
1494
1494 def markcommitted(self, node):
1495 def markcommitted(self, node):
1495 """Perform post-commit cleanup necessary after committing this ctx
1496 """Perform post-commit cleanup necessary after committing this ctx
1496
1497
1497 Specifically, this updates backing stores this working context
1498 Specifically, this updates backing stores this working context
1498 wraps to reflect the fact that the changes reflected by this
1499 wraps to reflect the fact that the changes reflected by this
1499 workingctx have been committed. For example, it marks
1500 workingctx have been committed. For example, it marks
1500 modified and added files as normal in the dirstate.
1501 modified and added files as normal in the dirstate.
1501
1502
1502 """
1503 """
1503
1504
1504 with self._repo.dirstate.parentchange():
1505 with self._repo.dirstate.parentchange():
1505 for f in self.modified() + self.added():
1506 for f in self.modified() + self.added():
1506 self._repo.dirstate.normal(f)
1507 self._repo.dirstate.normal(f)
1507 for f in self.removed():
1508 for f in self.removed():
1508 self._repo.dirstate.drop(f)
1509 self._repo.dirstate.drop(f)
1509 self._repo.dirstate.setparents(node)
1510 self._repo.dirstate.setparents(node)
1510
1511
1511 # write changes out explicitly, because nesting wlock at
1512 # write changes out explicitly, because nesting wlock at
1512 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1513 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1513 # from immediately doing so for subsequent changing files
1514 # from immediately doing so for subsequent changing files
1514 self._repo.dirstate.write(self._repo.currenttransaction())
1515 self._repo.dirstate.write(self._repo.currenttransaction())
1515
1516
1516 def dirty(self, missing=False, merge=True, branch=True):
1517 def dirty(self, missing=False, merge=True, branch=True):
1517 return False
1518 return False
1518
1519
1519 class workingctx(committablectx):
1520 class workingctx(committablectx):
1520 """A workingctx object makes access to data related to
1521 """A workingctx object makes access to data related to
1521 the current working directory convenient.
1522 the current working directory convenient.
1522 date - any valid date string or (unixtime, offset), or None.
1523 date - any valid date string or (unixtime, offset), or None.
1523 user - username string, or None.
1524 user - username string, or None.
1524 extra - a dictionary of extra values, or None.
1525 extra - a dictionary of extra values, or None.
1525 changes - a list of file lists as returned by localrepo.status()
1526 changes - a list of file lists as returned by localrepo.status()
1526 or None to use the repository status.
1527 or None to use the repository status.
1527 """
1528 """
1528 def __init__(self, repo, text="", user=None, date=None, extra=None,
1529 def __init__(self, repo, text="", user=None, date=None, extra=None,
1529 changes=None):
1530 changes=None):
1530 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1531 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1531
1532
1532 def __iter__(self):
1533 def __iter__(self):
1533 d = self._repo.dirstate
1534 d = self._repo.dirstate
1534 for f in d:
1535 for f in d:
1535 if d[f] != 'r':
1536 if d[f] != 'r':
1536 yield f
1537 yield f
1537
1538
1538 def __contains__(self, key):
1539 def __contains__(self, key):
1539 return self._repo.dirstate[key] not in "?r"
1540 return self._repo.dirstate[key] not in "?r"
1540
1541
1541 def hex(self):
1542 def hex(self):
1542 return hex(wdirid)
1543 return hex(wdirid)
1543
1544
1544 @propertycache
1545 @propertycache
1545 def _parents(self):
1546 def _parents(self):
1546 p = self._repo.dirstate.parents()
1547 p = self._repo.dirstate.parents()
1547 if p[1] == nullid:
1548 if p[1] == nullid:
1548 p = p[:-1]
1549 p = p[:-1]
1549 return [changectx(self._repo, x) for x in p]
1550 return [changectx(self._repo, x) for x in p]
1550
1551
1551 def filectx(self, path, filelog=None):
1552 def filectx(self, path, filelog=None):
1552 """get a file context from the working directory"""
1553 """get a file context from the working directory"""
1553 return workingfilectx(self._repo, path, workingctx=self,
1554 return workingfilectx(self._repo, path, workingctx=self,
1554 filelog=filelog)
1555 filelog=filelog)
1555
1556
1556 def dirty(self, missing=False, merge=True, branch=True):
1557 def dirty(self, missing=False, merge=True, branch=True):
1557 "check whether a working directory is modified"
1558 "check whether a working directory is modified"
1558 # check subrepos first
1559 # check subrepos first
1559 for s in sorted(self.substate):
1560 for s in sorted(self.substate):
1560 if self.sub(s).dirty(missing=missing):
1561 if self.sub(s).dirty(missing=missing):
1561 return True
1562 return True
1562 # check current working dir
1563 # check current working dir
1563 return ((merge and self.p2()) or
1564 return ((merge and self.p2()) or
1564 (branch and self.branch() != self.p1().branch()) or
1565 (branch and self.branch() != self.p1().branch()) or
1565 self.modified() or self.added() or self.removed() or
1566 self.modified() or self.added() or self.removed() or
1566 (missing and self.deleted()))
1567 (missing and self.deleted()))
1567
1568
1568 def add(self, list, prefix=""):
1569 def add(self, list, prefix=""):
1569 with self._repo.wlock():
1570 with self._repo.wlock():
1570 ui, ds = self._repo.ui, self._repo.dirstate
1571 ui, ds = self._repo.ui, self._repo.dirstate
1571 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1572 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1572 rejected = []
1573 rejected = []
1573 lstat = self._repo.wvfs.lstat
1574 lstat = self._repo.wvfs.lstat
1574 for f in list:
1575 for f in list:
1575 # ds.pathto() returns an absolute file when this is invoked from
1576 # ds.pathto() returns an absolute file when this is invoked from
1576 # the keyword extension. That gets flagged as non-portable on
1577 # the keyword extension. That gets flagged as non-portable on
1577 # Windows, since it contains the drive letter and colon.
1578 # Windows, since it contains the drive letter and colon.
1578 scmutil.checkportable(ui, os.path.join(prefix, f))
1579 scmutil.checkportable(ui, os.path.join(prefix, f))
1579 try:
1580 try:
1580 st = lstat(f)
1581 st = lstat(f)
1581 except OSError:
1582 except OSError:
1582 ui.warn(_("%s does not exist!\n") % uipath(f))
1583 ui.warn(_("%s does not exist!\n") % uipath(f))
1583 rejected.append(f)
1584 rejected.append(f)
1584 continue
1585 continue
1585 if st.st_size > 10000000:
1586 if st.st_size > 10000000:
1586 ui.warn(_("%s: up to %d MB of RAM may be required "
1587 ui.warn(_("%s: up to %d MB of RAM may be required "
1587 "to manage this file\n"
1588 "to manage this file\n"
1588 "(use 'hg revert %s' to cancel the "
1589 "(use 'hg revert %s' to cancel the "
1589 "pending addition)\n")
1590 "pending addition)\n")
1590 % (f, 3 * st.st_size // 1000000, uipath(f)))
1591 % (f, 3 * st.st_size // 1000000, uipath(f)))
1591 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1592 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1592 ui.warn(_("%s not added: only files and symlinks "
1593 ui.warn(_("%s not added: only files and symlinks "
1593 "supported currently\n") % uipath(f))
1594 "supported currently\n") % uipath(f))
1594 rejected.append(f)
1595 rejected.append(f)
1595 elif ds[f] in 'amn':
1596 elif ds[f] in 'amn':
1596 ui.warn(_("%s already tracked!\n") % uipath(f))
1597 ui.warn(_("%s already tracked!\n") % uipath(f))
1597 elif ds[f] == 'r':
1598 elif ds[f] == 'r':
1598 ds.normallookup(f)
1599 ds.normallookup(f)
1599 else:
1600 else:
1600 ds.add(f)
1601 ds.add(f)
1601 return rejected
1602 return rejected
1602
1603
1603 def forget(self, files, prefix=""):
1604 def forget(self, files, prefix=""):
1604 with self._repo.wlock():
1605 with self._repo.wlock():
1605 ds = self._repo.dirstate
1606 ds = self._repo.dirstate
1606 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1607 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1607 rejected = []
1608 rejected = []
1608 for f in files:
1609 for f in files:
1609 if f not in self._repo.dirstate:
1610 if f not in self._repo.dirstate:
1610 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1611 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1611 rejected.append(f)
1612 rejected.append(f)
1612 elif self._repo.dirstate[f] != 'a':
1613 elif self._repo.dirstate[f] != 'a':
1613 self._repo.dirstate.remove(f)
1614 self._repo.dirstate.remove(f)
1614 else:
1615 else:
1615 self._repo.dirstate.drop(f)
1616 self._repo.dirstate.drop(f)
1616 return rejected
1617 return rejected
1617
1618
1618 def undelete(self, list):
1619 def undelete(self, list):
1619 pctxs = self.parents()
1620 pctxs = self.parents()
1620 with self._repo.wlock():
1621 with self._repo.wlock():
1621 ds = self._repo.dirstate
1622 ds = self._repo.dirstate
1622 for f in list:
1623 for f in list:
1623 if self._repo.dirstate[f] != 'r':
1624 if self._repo.dirstate[f] != 'r':
1624 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1625 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1625 else:
1626 else:
1626 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1627 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1627 t = fctx.data()
1628 t = fctx.data()
1628 self._repo.wwrite(f, t, fctx.flags())
1629 self._repo.wwrite(f, t, fctx.flags())
1629 self._repo.dirstate.normal(f)
1630 self._repo.dirstate.normal(f)
1630
1631
1631 def copy(self, source, dest):
1632 def copy(self, source, dest):
1632 try:
1633 try:
1633 st = self._repo.wvfs.lstat(dest)
1634 st = self._repo.wvfs.lstat(dest)
1634 except OSError as err:
1635 except OSError as err:
1635 if err.errno != errno.ENOENT:
1636 if err.errno != errno.ENOENT:
1636 raise
1637 raise
1637 self._repo.ui.warn(_("%s does not exist!\n")
1638 self._repo.ui.warn(_("%s does not exist!\n")
1638 % self._repo.dirstate.pathto(dest))
1639 % self._repo.dirstate.pathto(dest))
1639 return
1640 return
1640 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1642 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1642 "symbolic link\n")
1643 "symbolic link\n")
1643 % self._repo.dirstate.pathto(dest))
1644 % self._repo.dirstate.pathto(dest))
1644 else:
1645 else:
1645 with self._repo.wlock():
1646 with self._repo.wlock():
1646 if self._repo.dirstate[dest] in '?':
1647 if self._repo.dirstate[dest] in '?':
1647 self._repo.dirstate.add(dest)
1648 self._repo.dirstate.add(dest)
1648 elif self._repo.dirstate[dest] in 'r':
1649 elif self._repo.dirstate[dest] in 'r':
1649 self._repo.dirstate.normallookup(dest)
1650 self._repo.dirstate.normallookup(dest)
1650 self._repo.dirstate.copy(source, dest)
1651 self._repo.dirstate.copy(source, dest)
1651
1652
1652 def match(self, pats=None, include=None, exclude=None, default='glob',
1653 def match(self, pats=None, include=None, exclude=None, default='glob',
1653 listsubrepos=False, badfn=None):
1654 listsubrepos=False, badfn=None):
1654 r = self._repo
1655 r = self._repo
1655
1656
1656 # Only a case insensitive filesystem needs magic to translate user input
1657 # Only a case insensitive filesystem needs magic to translate user input
1657 # to actual case in the filesystem.
1658 # to actual case in the filesystem.
1658 icasefs = not util.fscasesensitive(r.root)
1659 icasefs = not util.fscasesensitive(r.root)
1659 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1660 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1660 default, auditor=r.auditor, ctx=self,
1661 default, auditor=r.auditor, ctx=self,
1661 listsubrepos=listsubrepos, badfn=badfn,
1662 listsubrepos=listsubrepos, badfn=badfn,
1662 icasefs=icasefs)
1663 icasefs=icasefs)
1663
1664
1664 def flushall(self):
1665 def flushall(self):
1665 pass # For overlayworkingfilectx compatibility.
1666 pass # For overlayworkingfilectx compatibility.
1666
1667
1667 def _filtersuspectsymlink(self, files):
1668 def _filtersuspectsymlink(self, files):
1668 if not files or self._repo.dirstate._checklink:
1669 if not files or self._repo.dirstate._checklink:
1669 return files
1670 return files
1670
1671
1671 # Symlink placeholders may get non-symlink-like contents
1672 # Symlink placeholders may get non-symlink-like contents
1672 # via user error or dereferencing by NFS or Samba servers,
1673 # via user error or dereferencing by NFS or Samba servers,
1673 # so we filter out any placeholders that don't look like a
1674 # so we filter out any placeholders that don't look like a
1674 # symlink
1675 # symlink
1675 sane = []
1676 sane = []
1676 for f in files:
1677 for f in files:
1677 if self.flags(f) == 'l':
1678 if self.flags(f) == 'l':
1678 d = self[f].data()
1679 d = self[f].data()
1679 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1680 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1680 self._repo.ui.debug('ignoring suspect symlink placeholder'
1681 self._repo.ui.debug('ignoring suspect symlink placeholder'
1681 ' "%s"\n' % f)
1682 ' "%s"\n' % f)
1682 continue
1683 continue
1683 sane.append(f)
1684 sane.append(f)
1684 return sane
1685 return sane
1685
1686
1686 def _checklookup(self, files):
1687 def _checklookup(self, files):
1687 # check for any possibly clean files
1688 # check for any possibly clean files
1688 if not files:
1689 if not files:
1689 return [], [], []
1690 return [], [], []
1690
1691
1691 modified = []
1692 modified = []
1692 deleted = []
1693 deleted = []
1693 fixup = []
1694 fixup = []
1694 pctx = self._parents[0]
1695 pctx = self._parents[0]
1695 # do a full compare of any files that might have changed
1696 # do a full compare of any files that might have changed
1696 for f in sorted(files):
1697 for f in sorted(files):
1697 try:
1698 try:
1698 # This will return True for a file that got replaced by a
1699 # This will return True for a file that got replaced by a
1699 # directory in the interim, but fixing that is pretty hard.
1700 # directory in the interim, but fixing that is pretty hard.
1700 if (f not in pctx or self.flags(f) != pctx.flags(f)
1701 if (f not in pctx or self.flags(f) != pctx.flags(f)
1701 or pctx[f].cmp(self[f])):
1702 or pctx[f].cmp(self[f])):
1702 modified.append(f)
1703 modified.append(f)
1703 else:
1704 else:
1704 fixup.append(f)
1705 fixup.append(f)
1705 except (IOError, OSError):
1706 except (IOError, OSError):
1706 # A file become inaccessible in between? Mark it as deleted,
1707 # A file become inaccessible in between? Mark it as deleted,
1707 # matching dirstate behavior (issue5584).
1708 # matching dirstate behavior (issue5584).
1708 # The dirstate has more complex behavior around whether a
1709 # The dirstate has more complex behavior around whether a
1709 # missing file matches a directory, etc, but we don't need to
1710 # missing file matches a directory, etc, but we don't need to
1710 # bother with that: if f has made it to this point, we're sure
1711 # bother with that: if f has made it to this point, we're sure
1711 # it's in the dirstate.
1712 # it's in the dirstate.
1712 deleted.append(f)
1713 deleted.append(f)
1713
1714
1714 return modified, deleted, fixup
1715 return modified, deleted, fixup
1715
1716
1716 def _poststatusfixup(self, status, fixup):
1717 def _poststatusfixup(self, status, fixup):
1717 """update dirstate for files that are actually clean"""
1718 """update dirstate for files that are actually clean"""
1718 poststatus = self._repo.postdsstatus()
1719 poststatus = self._repo.postdsstatus()
1719 if fixup or poststatus:
1720 if fixup or poststatus:
1720 try:
1721 try:
1721 oldid = self._repo.dirstate.identity()
1722 oldid = self._repo.dirstate.identity()
1722
1723
1723 # updating the dirstate is optional
1724 # updating the dirstate is optional
1724 # so we don't wait on the lock
1725 # so we don't wait on the lock
1725 # wlock can invalidate the dirstate, so cache normal _after_
1726 # wlock can invalidate the dirstate, so cache normal _after_
1726 # taking the lock
1727 # taking the lock
1727 with self._repo.wlock(False):
1728 with self._repo.wlock(False):
1728 if self._repo.dirstate.identity() == oldid:
1729 if self._repo.dirstate.identity() == oldid:
1729 if fixup:
1730 if fixup:
1730 normal = self._repo.dirstate.normal
1731 normal = self._repo.dirstate.normal
1731 for f in fixup:
1732 for f in fixup:
1732 normal(f)
1733 normal(f)
1733 # write changes out explicitly, because nesting
1734 # write changes out explicitly, because nesting
1734 # wlock at runtime may prevent 'wlock.release()'
1735 # wlock at runtime may prevent 'wlock.release()'
1735 # after this block from doing so for subsequent
1736 # after this block from doing so for subsequent
1736 # changing files
1737 # changing files
1737 tr = self._repo.currenttransaction()
1738 tr = self._repo.currenttransaction()
1738 self._repo.dirstate.write(tr)
1739 self._repo.dirstate.write(tr)
1739
1740
1740 if poststatus:
1741 if poststatus:
1741 for ps in poststatus:
1742 for ps in poststatus:
1742 ps(self, status)
1743 ps(self, status)
1743 else:
1744 else:
1744 # in this case, writing changes out breaks
1745 # in this case, writing changes out breaks
1745 # consistency, because .hg/dirstate was
1746 # consistency, because .hg/dirstate was
1746 # already changed simultaneously after last
1747 # already changed simultaneously after last
1747 # caching (see also issue5584 for detail)
1748 # caching (see also issue5584 for detail)
1748 self._repo.ui.debug('skip updating dirstate: '
1749 self._repo.ui.debug('skip updating dirstate: '
1749 'identity mismatch\n')
1750 'identity mismatch\n')
1750 except error.LockError:
1751 except error.LockError:
1751 pass
1752 pass
1752 finally:
1753 finally:
1753 # Even if the wlock couldn't be grabbed, clear out the list.
1754 # Even if the wlock couldn't be grabbed, clear out the list.
1754 self._repo.clearpostdsstatus()
1755 self._repo.clearpostdsstatus()
1755
1756
1756 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1757 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1757 '''Gets the status from the dirstate -- internal use only.'''
1758 '''Gets the status from the dirstate -- internal use only.'''
1758 listignored, listclean, listunknown = ignored, clean, unknown
1759 listignored, listclean, listunknown = ignored, clean, unknown
1759 subrepos = []
1760 subrepos = []
1760 if '.hgsub' in self:
1761 if '.hgsub' in self:
1761 subrepos = sorted(self.substate)
1762 subrepos = sorted(self.substate)
1762 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1763 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1763 listclean, listunknown)
1764 listclean, listunknown)
1764
1765
1765 # check for any possibly clean files
1766 # check for any possibly clean files
1766 fixup = []
1767 fixup = []
1767 if cmp:
1768 if cmp:
1768 modified2, deleted2, fixup = self._checklookup(cmp)
1769 modified2, deleted2, fixup = self._checklookup(cmp)
1769 s.modified.extend(modified2)
1770 s.modified.extend(modified2)
1770 s.deleted.extend(deleted2)
1771 s.deleted.extend(deleted2)
1771
1772
1772 if fixup and listclean:
1773 if fixup and listclean:
1773 s.clean.extend(fixup)
1774 s.clean.extend(fixup)
1774
1775
1775 self._poststatusfixup(s, fixup)
1776 self._poststatusfixup(s, fixup)
1776
1777
1777 if match.always():
1778 if match.always():
1778 # cache for performance
1779 # cache for performance
1779 if s.unknown or s.ignored or s.clean:
1780 if s.unknown or s.ignored or s.clean:
1780 # "_status" is cached with list*=False in the normal route
1781 # "_status" is cached with list*=False in the normal route
1781 self._status = scmutil.status(s.modified, s.added, s.removed,
1782 self._status = scmutil.status(s.modified, s.added, s.removed,
1782 s.deleted, [], [], [])
1783 s.deleted, [], [], [])
1783 else:
1784 else:
1784 self._status = s
1785 self._status = s
1785
1786
1786 return s
1787 return s
1787
1788
1788 @propertycache
1789 @propertycache
1789 def _manifest(self):
1790 def _manifest(self):
1790 """generate a manifest corresponding to the values in self._status
1791 """generate a manifest corresponding to the values in self._status
1791
1792
1792 This reuse the file nodeid from parent, but we use special node
1793 This reuse the file nodeid from parent, but we use special node
1793 identifiers for added and modified files. This is used by manifests
1794 identifiers for added and modified files. This is used by manifests
1794 merge to see that files are different and by update logic to avoid
1795 merge to see that files are different and by update logic to avoid
1795 deleting newly added files.
1796 deleting newly added files.
1796 """
1797 """
1797 return self._buildstatusmanifest(self._status)
1798 return self._buildstatusmanifest(self._status)
1798
1799
1799 def _buildstatusmanifest(self, status):
1800 def _buildstatusmanifest(self, status):
1800 """Builds a manifest that includes the given status results."""
1801 """Builds a manifest that includes the given status results."""
1801 parents = self.parents()
1802 parents = self.parents()
1802
1803
1803 man = parents[0].manifest().copy()
1804 man = parents[0].manifest().copy()
1804
1805
1805 ff = self._flagfunc
1806 ff = self._flagfunc
1806 for i, l in ((addednodeid, status.added),
1807 for i, l in ((addednodeid, status.added),
1807 (modifiednodeid, status.modified)):
1808 (modifiednodeid, status.modified)):
1808 for f in l:
1809 for f in l:
1809 man[f] = i
1810 man[f] = i
1810 try:
1811 try:
1811 man.setflag(f, ff(f))
1812 man.setflag(f, ff(f))
1812 except OSError:
1813 except OSError:
1813 pass
1814 pass
1814
1815
1815 for f in status.deleted + status.removed:
1816 for f in status.deleted + status.removed:
1816 if f in man:
1817 if f in man:
1817 del man[f]
1818 del man[f]
1818
1819
1819 return man
1820 return man
1820
1821
1821 def _buildstatus(self, other, s, match, listignored, listclean,
1822 def _buildstatus(self, other, s, match, listignored, listclean,
1822 listunknown):
1823 listunknown):
1823 """build a status with respect to another context
1824 """build a status with respect to another context
1824
1825
1825 This includes logic for maintaining the fast path of status when
1826 This includes logic for maintaining the fast path of status when
1826 comparing the working directory against its parent, which is to skip
1827 comparing the working directory against its parent, which is to skip
1827 building a new manifest if self (working directory) is not comparing
1828 building a new manifest if self (working directory) is not comparing
1828 against its parent (repo['.']).
1829 against its parent (repo['.']).
1829 """
1830 """
1830 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1831 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1831 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1832 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1832 # might have accidentally ended up with the entire contents of the file
1833 # might have accidentally ended up with the entire contents of the file
1833 # they are supposed to be linking to.
1834 # they are supposed to be linking to.
1834 s.modified[:] = self._filtersuspectsymlink(s.modified)
1835 s.modified[:] = self._filtersuspectsymlink(s.modified)
1835 if other != self._repo['.']:
1836 if other != self._repo['.']:
1836 s = super(workingctx, self)._buildstatus(other, s, match,
1837 s = super(workingctx, self)._buildstatus(other, s, match,
1837 listignored, listclean,
1838 listignored, listclean,
1838 listunknown)
1839 listunknown)
1839 return s
1840 return s
1840
1841
1841 def _matchstatus(self, other, match):
1842 def _matchstatus(self, other, match):
1842 """override the match method with a filter for directory patterns
1843 """override the match method with a filter for directory patterns
1843
1844
1844 We use inheritance to customize the match.bad method only in cases of
1845 We use inheritance to customize the match.bad method only in cases of
1845 workingctx since it belongs only to the working directory when
1846 workingctx since it belongs only to the working directory when
1846 comparing against the parent changeset.
1847 comparing against the parent changeset.
1847
1848
1848 If we aren't comparing against the working directory's parent, then we
1849 If we aren't comparing against the working directory's parent, then we
1849 just use the default match object sent to us.
1850 just use the default match object sent to us.
1850 """
1851 """
1851 if other != self._repo['.']:
1852 if other != self._repo['.']:
1852 def bad(f, msg):
1853 def bad(f, msg):
1853 # 'f' may be a directory pattern from 'match.files()',
1854 # 'f' may be a directory pattern from 'match.files()',
1854 # so 'f not in ctx1' is not enough
1855 # so 'f not in ctx1' is not enough
1855 if f not in other and not other.hasdir(f):
1856 if f not in other and not other.hasdir(f):
1856 self._repo.ui.warn('%s: %s\n' %
1857 self._repo.ui.warn('%s: %s\n' %
1857 (self._repo.dirstate.pathto(f), msg))
1858 (self._repo.dirstate.pathto(f), msg))
1858 match.bad = bad
1859 match.bad = bad
1859 return match
1860 return match
1860
1861
1861 def markcommitted(self, node):
1862 def markcommitted(self, node):
1862 super(workingctx, self).markcommitted(node)
1863 super(workingctx, self).markcommitted(node)
1863
1864
1864 sparse.aftercommit(self._repo, node)
1865 sparse.aftercommit(self._repo, node)
1865
1866
1866 class committablefilectx(basefilectx):
1867 class committablefilectx(basefilectx):
1867 """A committablefilectx provides common functionality for a file context
1868 """A committablefilectx provides common functionality for a file context
1868 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1869 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1869 def __init__(self, repo, path, filelog=None, ctx=None):
1870 def __init__(self, repo, path, filelog=None, ctx=None):
1870 self._repo = repo
1871 self._repo = repo
1871 self._path = path
1872 self._path = path
1872 self._changeid = None
1873 self._changeid = None
1873 self._filerev = self._filenode = None
1874 self._filerev = self._filenode = None
1874
1875
1875 if filelog is not None:
1876 if filelog is not None:
1876 self._filelog = filelog
1877 self._filelog = filelog
1877 if ctx:
1878 if ctx:
1878 self._changectx = ctx
1879 self._changectx = ctx
1879
1880
1880 def __nonzero__(self):
1881 def __nonzero__(self):
1881 return True
1882 return True
1882
1883
1883 __bool__ = __nonzero__
1884 __bool__ = __nonzero__
1884
1885
1885 def linkrev(self):
1886 def linkrev(self):
1886 # linked to self._changectx no matter if file is modified or not
1887 # linked to self._changectx no matter if file is modified or not
1887 return self.rev()
1888 return self.rev()
1888
1889
1889 def parents(self):
1890 def parents(self):
1890 '''return parent filectxs, following copies if necessary'''
1891 '''return parent filectxs, following copies if necessary'''
1891 def filenode(ctx, path):
1892 def filenode(ctx, path):
1892 return ctx._manifest.get(path, nullid)
1893 return ctx._manifest.get(path, nullid)
1893
1894
1894 path = self._path
1895 path = self._path
1895 fl = self._filelog
1896 fl = self._filelog
1896 pcl = self._changectx._parents
1897 pcl = self._changectx._parents
1897 renamed = self.renamed()
1898 renamed = self.renamed()
1898
1899
1899 if renamed:
1900 if renamed:
1900 pl = [renamed + (None,)]
1901 pl = [renamed + (None,)]
1901 else:
1902 else:
1902 pl = [(path, filenode(pcl[0], path), fl)]
1903 pl = [(path, filenode(pcl[0], path), fl)]
1903
1904
1904 for pc in pcl[1:]:
1905 for pc in pcl[1:]:
1905 pl.append((path, filenode(pc, path), fl))
1906 pl.append((path, filenode(pc, path), fl))
1906
1907
1907 return [self._parentfilectx(p, fileid=n, filelog=l)
1908 return [self._parentfilectx(p, fileid=n, filelog=l)
1908 for p, n, l in pl if n != nullid]
1909 for p, n, l in pl if n != nullid]
1909
1910
1910 def children(self):
1911 def children(self):
1911 return []
1912 return []
1912
1913
1913 class workingfilectx(committablefilectx):
1914 class workingfilectx(committablefilectx):
1914 """A workingfilectx object makes access to data related to a particular
1915 """A workingfilectx object makes access to data related to a particular
1915 file in the working directory convenient."""
1916 file in the working directory convenient."""
1916 def __init__(self, repo, path, filelog=None, workingctx=None):
1917 def __init__(self, repo, path, filelog=None, workingctx=None):
1917 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1918 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1918
1919
1919 @propertycache
1920 @propertycache
1920 def _changectx(self):
1921 def _changectx(self):
1921 return workingctx(self._repo)
1922 return workingctx(self._repo)
1922
1923
1923 def data(self):
1924 def data(self):
1924 return self._repo.wread(self._path)
1925 return self._repo.wread(self._path)
1925 def renamed(self):
1926 def renamed(self):
1926 rp = self._repo.dirstate.copied(self._path)
1927 rp = self._repo.dirstate.copied(self._path)
1927 if not rp:
1928 if not rp:
1928 return None
1929 return None
1929 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1930 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1930
1931
1931 def size(self):
1932 def size(self):
1932 return self._repo.wvfs.lstat(self._path).st_size
1933 return self._repo.wvfs.lstat(self._path).st_size
1933 def date(self):
1934 def date(self):
1934 t, tz = self._changectx.date()
1935 t, tz = self._changectx.date()
1935 try:
1936 try:
1936 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1937 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1937 except OSError as err:
1938 except OSError as err:
1938 if err.errno != errno.ENOENT:
1939 if err.errno != errno.ENOENT:
1939 raise
1940 raise
1940 return (t, tz)
1941 return (t, tz)
1941
1942
1942 def exists(self):
1943 def exists(self):
1943 return self._repo.wvfs.exists(self._path)
1944 return self._repo.wvfs.exists(self._path)
1944
1945
1945 def lexists(self):
1946 def lexists(self):
1946 return self._repo.wvfs.lexists(self._path)
1947 return self._repo.wvfs.lexists(self._path)
1947
1948
1948 def audit(self):
1949 def audit(self):
1949 return self._repo.wvfs.audit(self._path)
1950 return self._repo.wvfs.audit(self._path)
1950
1951
1951 def cmp(self, fctx):
1952 def cmp(self, fctx):
1952 """compare with other file context
1953 """compare with other file context
1953
1954
1954 returns True if different than fctx.
1955 returns True if different than fctx.
1955 """
1956 """
1956 # fctx should be a filectx (not a workingfilectx)
1957 # fctx should be a filectx (not a workingfilectx)
1957 # invert comparison to reuse the same code path
1958 # invert comparison to reuse the same code path
1958 return fctx.cmp(self)
1959 return fctx.cmp(self)
1959
1960
1960 def remove(self, ignoremissing=False):
1961 def remove(self, ignoremissing=False):
1961 """wraps unlink for a repo's working directory"""
1962 """wraps unlink for a repo's working directory"""
1962 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1963 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1963
1964
1964 def write(self, data, flags, backgroundclose=False):
1965 def write(self, data, flags, backgroundclose=False):
1965 """wraps repo.wwrite"""
1966 """wraps repo.wwrite"""
1966 self._repo.wwrite(self._path, data, flags,
1967 self._repo.wwrite(self._path, data, flags,
1967 backgroundclose=backgroundclose)
1968 backgroundclose=backgroundclose)
1968
1969
1969 def clearunknown(self):
1970 def clearunknown(self):
1970 """Removes conflicting items in the working directory so that
1971 """Removes conflicting items in the working directory so that
1971 ``write()`` can be called successfully.
1972 ``write()`` can be called successfully.
1972 """
1973 """
1973 wvfs = self._repo.wvfs
1974 wvfs = self._repo.wvfs
1974 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1975 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1975 wvfs.removedirs(self._path)
1976 wvfs.removedirs(self._path)
1976
1977
1977 def setflags(self, l, x):
1978 def setflags(self, l, x):
1978 self._repo.wvfs.setflags(self._path, l, x)
1979 self._repo.wvfs.setflags(self._path, l, x)
1979
1980
1980 class overlayworkingctx(workingctx):
1981 class overlayworkingctx(workingctx):
1981 """Wraps another mutable context with a write-back cache that can be flushed
1982 """Wraps another mutable context with a write-back cache that can be flushed
1982 at a later time.
1983 at a later time.
1983
1984
1984 self._cache[path] maps to a dict with keys: {
1985 self._cache[path] maps to a dict with keys: {
1985 'exists': bool?
1986 'exists': bool?
1986 'date': date?
1987 'date': date?
1987 'data': str?
1988 'data': str?
1988 'flags': str?
1989 'flags': str?
1989 }
1990 }
1990 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1991 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1991 is `False`, the file was deleted.
1992 is `False`, the file was deleted.
1992 """
1993 """
1993
1994
1994 def __init__(self, repo, wrappedctx):
1995 def __init__(self, repo, wrappedctx):
1995 super(overlayworkingctx, self).__init__(repo)
1996 super(overlayworkingctx, self).__init__(repo)
1996 self._repo = repo
1997 self._repo = repo
1997 self._wrappedctx = wrappedctx
1998 self._wrappedctx = wrappedctx
1998 self._clean()
1999 self._clean()
1999
2000
2000 def data(self, path):
2001 def data(self, path):
2001 if self.isdirty(path):
2002 if self.isdirty(path):
2002 if self._cache[path]['exists']:
2003 if self._cache[path]['exists']:
2003 if self._cache[path]['data']:
2004 if self._cache[path]['data']:
2004 return self._cache[path]['data']
2005 return self._cache[path]['data']
2005 else:
2006 else:
2006 # Must fallback here, too, because we only set flags.
2007 # Must fallback here, too, because we only set flags.
2007 return self._wrappedctx[path].data()
2008 return self._wrappedctx[path].data()
2008 else:
2009 else:
2009 raise error.ProgrammingError("No such file or directory: %s" %
2010 raise error.ProgrammingError("No such file or directory: %s" %
2010 self._path)
2011 self._path)
2011 else:
2012 else:
2012 return self._wrappedctx[path].data()
2013 return self._wrappedctx[path].data()
2013
2014
2014 def filedate(self, path):
2015 def filedate(self, path):
2015 if self.isdirty(path):
2016 if self.isdirty(path):
2016 return self._cache[path]['date']
2017 return self._cache[path]['date']
2017 else:
2018 else:
2018 return self._wrappedctx[path].date()
2019 return self._wrappedctx[path].date()
2019
2020
2020 def flags(self, path):
2021 def flags(self, path):
2021 if self.isdirty(path):
2022 if self.isdirty(path):
2022 if self._cache[path]['exists']:
2023 if self._cache[path]['exists']:
2023 return self._cache[path]['flags']
2024 return self._cache[path]['flags']
2024 else:
2025 else:
2025 raise error.ProgrammingError("No such file or directory: %s" %
2026 raise error.ProgrammingError("No such file or directory: %s" %
2026 self._path)
2027 self._path)
2027 else:
2028 else:
2028 return self._wrappedctx[path].flags()
2029 return self._wrappedctx[path].flags()
2029
2030
2030 def write(self, path, data, flags=''):
2031 def write(self, path, data, flags=''):
2031 if data is None:
2032 if data is None:
2032 raise error.ProgrammingError("data must be non-None")
2033 raise error.ProgrammingError("data must be non-None")
2033 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2034 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2034 flags=flags)
2035 flags=flags)
2035
2036
2036 def setflags(self, path, l, x):
2037 def setflags(self, path, l, x):
2037 self._markdirty(path, exists=True, date=util.makedate(),
2038 self._markdirty(path, exists=True, date=util.makedate(),
2038 flags=(l and 'l' or '') + (x and 'x' or ''))
2039 flags=(l and 'l' or '') + (x and 'x' or ''))
2039
2040
2040 def remove(self, path):
2041 def remove(self, path):
2041 self._markdirty(path, exists=False)
2042 self._markdirty(path, exists=False)
2042
2043
2043 def exists(self, path):
2044 def exists(self, path):
2044 """exists behaves like `lexists`, but needs to follow symlinks and
2045 """exists behaves like `lexists`, but needs to follow symlinks and
2045 return False if they are broken.
2046 return False if they are broken.
2046 """
2047 """
2047 if self.isdirty(path):
2048 if self.isdirty(path):
2048 # If this path exists and is a symlink, "follow" it by calling
2049 # If this path exists and is a symlink, "follow" it by calling
2049 # exists on the destination path.
2050 # exists on the destination path.
2050 if (self._cache[path]['exists'] and
2051 if (self._cache[path]['exists'] and
2051 'l' in self._cache[path]['flags']):
2052 'l' in self._cache[path]['flags']):
2052 return self.exists(self._cache[path]['data'].strip())
2053 return self.exists(self._cache[path]['data'].strip())
2053 else:
2054 else:
2054 return self._cache[path]['exists']
2055 return self._cache[path]['exists']
2055 return self._wrappedctx[path].exists()
2056 return self._wrappedctx[path].exists()
2056
2057
2057 def lexists(self, path):
2058 def lexists(self, path):
2058 """lexists returns True if the path exists"""
2059 """lexists returns True if the path exists"""
2059 if self.isdirty(path):
2060 if self.isdirty(path):
2060 return self._cache[path]['exists']
2061 return self._cache[path]['exists']
2061 return self._wrappedctx[path].lexists()
2062 return self._wrappedctx[path].lexists()
2062
2063
2063 def size(self, path):
2064 def size(self, path):
2064 if self.isdirty(path):
2065 if self.isdirty(path):
2065 if self._cache[path]['exists']:
2066 if self._cache[path]['exists']:
2066 return len(self._cache[path]['data'])
2067 return len(self._cache[path]['data'])
2067 else:
2068 else:
2068 raise error.ProgrammingError("No such file or directory: %s" %
2069 raise error.ProgrammingError("No such file or directory: %s" %
2069 self._path)
2070 self._path)
2070 return self._wrappedctx[path].size()
2071 return self._wrappedctx[path].size()
2071
2072
2072 def flushall(self):
2073 def flushall(self):
2073 for path in self._writeorder:
2074 for path in self._writeorder:
2074 entry = self._cache[path]
2075 entry = self._cache[path]
2075 if entry['exists']:
2076 if entry['exists']:
2076 self._wrappedctx[path].clearunknown()
2077 self._wrappedctx[path].clearunknown()
2077 if entry['data'] is not None:
2078 if entry['data'] is not None:
2078 if entry['flags'] is None:
2079 if entry['flags'] is None:
2079 raise error.ProgrammingError('data set but not flags')
2080 raise error.ProgrammingError('data set but not flags')
2080 self._wrappedctx[path].write(
2081 self._wrappedctx[path].write(
2081 entry['data'],
2082 entry['data'],
2082 entry['flags'])
2083 entry['flags'])
2083 else:
2084 else:
2084 self._wrappedctx[path].setflags(
2085 self._wrappedctx[path].setflags(
2085 'l' in entry['flags'],
2086 'l' in entry['flags'],
2086 'x' in entry['flags'])
2087 'x' in entry['flags'])
2087 else:
2088 else:
2088 self._wrappedctx[path].remove(path)
2089 self._wrappedctx[path].remove(path)
2089 self._clean()
2090 self._clean()
2090
2091
2091 def isdirty(self, path):
2092 def isdirty(self, path):
2092 return path in self._cache
2093 return path in self._cache
2093
2094
2094 def _clean(self):
2095 def _clean(self):
2095 self._cache = {}
2096 self._cache = {}
2096 self._writeorder = []
2097 self._writeorder = []
2097
2098
2098 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2099 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2099 if path not in self._cache:
2100 if path not in self._cache:
2100 self._writeorder.append(path)
2101 self._writeorder.append(path)
2101
2102
2102 self._cache[path] = {
2103 self._cache[path] = {
2103 'exists': exists,
2104 'exists': exists,
2104 'data': data,
2105 'data': data,
2105 'date': date,
2106 'date': date,
2106 'flags': flags,
2107 'flags': flags,
2107 }
2108 }
2108
2109
2109 def filectx(self, path, filelog=None):
2110 def filectx(self, path, filelog=None):
2110 return overlayworkingfilectx(self._repo, path, parent=self,
2111 return overlayworkingfilectx(self._repo, path, parent=self,
2111 filelog=filelog)
2112 filelog=filelog)
2112
2113
2113 class overlayworkingfilectx(workingfilectx):
2114 class overlayworkingfilectx(workingfilectx):
2114 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2115 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2115 cache, which can be flushed through later by calling ``flush()``."""
2116 cache, which can be flushed through later by calling ``flush()``."""
2116
2117
2117 def __init__(self, repo, path, filelog=None, parent=None):
2118 def __init__(self, repo, path, filelog=None, parent=None):
2118 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2119 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2119 parent)
2120 parent)
2120 self._repo = repo
2121 self._repo = repo
2121 self._parent = parent
2122 self._parent = parent
2122 self._path = path
2123 self._path = path
2123
2124
2124 def ctx(self):
2125 def ctx(self):
2125 return self._parent
2126 return self._parent
2126
2127
2127 def data(self):
2128 def data(self):
2128 return self._parent.data(self._path)
2129 return self._parent.data(self._path)
2129
2130
2130 def date(self):
2131 def date(self):
2131 return self._parent.filedate(self._path)
2132 return self._parent.filedate(self._path)
2132
2133
2133 def exists(self):
2134 def exists(self):
2134 return self.lexists()
2135 return self.lexists()
2135
2136
2136 def lexists(self):
2137 def lexists(self):
2137 return self._parent.exists(self._path)
2138 return self._parent.exists(self._path)
2138
2139
2139 def renamed(self):
2140 def renamed(self):
2140 # Copies are currently tracked in the dirstate as before. Straight copy
2141 # Copies are currently tracked in the dirstate as before. Straight copy
2141 # from workingfilectx.
2142 # from workingfilectx.
2142 rp = self._repo.dirstate.copied(self._path)
2143 rp = self._repo.dirstate.copied(self._path)
2143 if not rp:
2144 if not rp:
2144 return None
2145 return None
2145 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2146 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2146
2147
2147 def size(self):
2148 def size(self):
2148 return self._parent.size(self._path)
2149 return self._parent.size(self._path)
2149
2150
2150 def audit(self):
2151 def audit(self):
2151 pass
2152 pass
2152
2153
2153 def flags(self):
2154 def flags(self):
2154 return self._parent.flags(self._path)
2155 return self._parent.flags(self._path)
2155
2156
2156 def setflags(self, islink, isexec):
2157 def setflags(self, islink, isexec):
2157 return self._parent.setflags(self._path, islink, isexec)
2158 return self._parent.setflags(self._path, islink, isexec)
2158
2159
2159 def write(self, data, flags, backgroundclose=False):
2160 def write(self, data, flags, backgroundclose=False):
2160 return self._parent.write(self._path, data, flags)
2161 return self._parent.write(self._path, data, flags)
2161
2162
2162 def remove(self, ignoremissing=False):
2163 def remove(self, ignoremissing=False):
2163 return self._parent.remove(self._path)
2164 return self._parent.remove(self._path)
2164
2165
2165 class workingcommitctx(workingctx):
2166 class workingcommitctx(workingctx):
2166 """A workingcommitctx object makes access to data related to
2167 """A workingcommitctx object makes access to data related to
2167 the revision being committed convenient.
2168 the revision being committed convenient.
2168
2169
2169 This hides changes in the working directory, if they aren't
2170 This hides changes in the working directory, if they aren't
2170 committed in this context.
2171 committed in this context.
2171 """
2172 """
2172 def __init__(self, repo, changes,
2173 def __init__(self, repo, changes,
2173 text="", user=None, date=None, extra=None):
2174 text="", user=None, date=None, extra=None):
2174 super(workingctx, self).__init__(repo, text, user, date, extra,
2175 super(workingctx, self).__init__(repo, text, user, date, extra,
2175 changes)
2176 changes)
2176
2177
2177 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2178 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2178 """Return matched files only in ``self._status``
2179 """Return matched files only in ``self._status``
2179
2180
2180 Uncommitted files appear "clean" via this context, even if
2181 Uncommitted files appear "clean" via this context, even if
2181 they aren't actually so in the working directory.
2182 they aren't actually so in the working directory.
2182 """
2183 """
2183 if clean:
2184 if clean:
2184 clean = [f for f in self._manifest if f not in self._changedset]
2185 clean = [f for f in self._manifest if f not in self._changedset]
2185 else:
2186 else:
2186 clean = []
2187 clean = []
2187 return scmutil.status([f for f in self._status.modified if match(f)],
2188 return scmutil.status([f for f in self._status.modified if match(f)],
2188 [f for f in self._status.added if match(f)],
2189 [f for f in self._status.added if match(f)],
2189 [f for f in self._status.removed if match(f)],
2190 [f for f in self._status.removed if match(f)],
2190 [], [], [], clean)
2191 [], [], [], clean)
2191
2192
2192 @propertycache
2193 @propertycache
2193 def _changedset(self):
2194 def _changedset(self):
2194 """Return the set of files changed in this context
2195 """Return the set of files changed in this context
2195 """
2196 """
2196 changed = set(self._status.modified)
2197 changed = set(self._status.modified)
2197 changed.update(self._status.added)
2198 changed.update(self._status.added)
2198 changed.update(self._status.removed)
2199 changed.update(self._status.removed)
2199 return changed
2200 return changed
2200
2201
2201 def makecachingfilectxfn(func):
2202 def makecachingfilectxfn(func):
2202 """Create a filectxfn that caches based on the path.
2203 """Create a filectxfn that caches based on the path.
2203
2204
2204 We can't use util.cachefunc because it uses all arguments as the cache
2205 We can't use util.cachefunc because it uses all arguments as the cache
2205 key and this creates a cycle since the arguments include the repo and
2206 key and this creates a cycle since the arguments include the repo and
2206 memctx.
2207 memctx.
2207 """
2208 """
2208 cache = {}
2209 cache = {}
2209
2210
2210 def getfilectx(repo, memctx, path):
2211 def getfilectx(repo, memctx, path):
2211 if path not in cache:
2212 if path not in cache:
2212 cache[path] = func(repo, memctx, path)
2213 cache[path] = func(repo, memctx, path)
2213 return cache[path]
2214 return cache[path]
2214
2215
2215 return getfilectx
2216 return getfilectx
2216
2217
2217 def memfilefromctx(ctx):
2218 def memfilefromctx(ctx):
2218 """Given a context return a memfilectx for ctx[path]
2219 """Given a context return a memfilectx for ctx[path]
2219
2220
2220 This is a convenience method for building a memctx based on another
2221 This is a convenience method for building a memctx based on another
2221 context.
2222 context.
2222 """
2223 """
2223 def getfilectx(repo, memctx, path):
2224 def getfilectx(repo, memctx, path):
2224 fctx = ctx[path]
2225 fctx = ctx[path]
2225 # this is weird but apparently we only keep track of one parent
2226 # this is weird but apparently we only keep track of one parent
2226 # (why not only store that instead of a tuple?)
2227 # (why not only store that instead of a tuple?)
2227 copied = fctx.renamed()
2228 copied = fctx.renamed()
2228 if copied:
2229 if copied:
2229 copied = copied[0]
2230 copied = copied[0]
2230 return memfilectx(repo, path, fctx.data(),
2231 return memfilectx(repo, path, fctx.data(),
2231 islink=fctx.islink(), isexec=fctx.isexec(),
2232 islink=fctx.islink(), isexec=fctx.isexec(),
2232 copied=copied, memctx=memctx)
2233 copied=copied, memctx=memctx)
2233
2234
2234 return getfilectx
2235 return getfilectx
2235
2236
2236 def memfilefrompatch(patchstore):
2237 def memfilefrompatch(patchstore):
2237 """Given a patch (e.g. patchstore object) return a memfilectx
2238 """Given a patch (e.g. patchstore object) return a memfilectx
2238
2239
2239 This is a convenience method for building a memctx based on a patchstore.
2240 This is a convenience method for building a memctx based on a patchstore.
2240 """
2241 """
2241 def getfilectx(repo, memctx, path):
2242 def getfilectx(repo, memctx, path):
2242 data, mode, copied = patchstore.getfile(path)
2243 data, mode, copied = patchstore.getfile(path)
2243 if data is None:
2244 if data is None:
2244 return None
2245 return None
2245 islink, isexec = mode
2246 islink, isexec = mode
2246 return memfilectx(repo, path, data, islink=islink,
2247 return memfilectx(repo, path, data, islink=islink,
2247 isexec=isexec, copied=copied,
2248 isexec=isexec, copied=copied,
2248 memctx=memctx)
2249 memctx=memctx)
2249
2250
2250 return getfilectx
2251 return getfilectx
2251
2252
2252 class memctx(committablectx):
2253 class memctx(committablectx):
2253 """Use memctx to perform in-memory commits via localrepo.commitctx().
2254 """Use memctx to perform in-memory commits via localrepo.commitctx().
2254
2255
2255 Revision information is supplied at initialization time while
2256 Revision information is supplied at initialization time while
2256 related files data and is made available through a callback
2257 related files data and is made available through a callback
2257 mechanism. 'repo' is the current localrepo, 'parents' is a
2258 mechanism. 'repo' is the current localrepo, 'parents' is a
2258 sequence of two parent revisions identifiers (pass None for every
2259 sequence of two parent revisions identifiers (pass None for every
2259 missing parent), 'text' is the commit message and 'files' lists
2260 missing parent), 'text' is the commit message and 'files' lists
2260 names of files touched by the revision (normalized and relative to
2261 names of files touched by the revision (normalized and relative to
2261 repository root).
2262 repository root).
2262
2263
2263 filectxfn(repo, memctx, path) is a callable receiving the
2264 filectxfn(repo, memctx, path) is a callable receiving the
2264 repository, the current memctx object and the normalized path of
2265 repository, the current memctx object and the normalized path of
2265 requested file, relative to repository root. It is fired by the
2266 requested file, relative to repository root. It is fired by the
2266 commit function for every file in 'files', but calls order is
2267 commit function for every file in 'files', but calls order is
2267 undefined. If the file is available in the revision being
2268 undefined. If the file is available in the revision being
2268 committed (updated or added), filectxfn returns a memfilectx
2269 committed (updated or added), filectxfn returns a memfilectx
2269 object. If the file was removed, filectxfn return None for recent
2270 object. If the file was removed, filectxfn return None for recent
2270 Mercurial. Moved files are represented by marking the source file
2271 Mercurial. Moved files are represented by marking the source file
2271 removed and the new file added with copy information (see
2272 removed and the new file added with copy information (see
2272 memfilectx).
2273 memfilectx).
2273
2274
2274 user receives the committer name and defaults to current
2275 user receives the committer name and defaults to current
2275 repository username, date is the commit date in any format
2276 repository username, date is the commit date in any format
2276 supported by util.parsedate() and defaults to current date, extra
2277 supported by util.parsedate() and defaults to current date, extra
2277 is a dictionary of metadata or is left empty.
2278 is a dictionary of metadata or is left empty.
2278 """
2279 """
2279
2280
2280 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2281 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2281 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2282 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2282 # this field to determine what to do in filectxfn.
2283 # this field to determine what to do in filectxfn.
2283 _returnnoneformissingfiles = True
2284 _returnnoneformissingfiles = True
2284
2285
2285 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2286 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2286 date=None, extra=None, branch=None, editor=False):
2287 date=None, extra=None, branch=None, editor=False):
2287 super(memctx, self).__init__(repo, text, user, date, extra)
2288 super(memctx, self).__init__(repo, text, user, date, extra)
2288 self._rev = None
2289 self._rev = None
2289 self._node = None
2290 self._node = None
2290 parents = [(p or nullid) for p in parents]
2291 parents = [(p or nullid) for p in parents]
2291 p1, p2 = parents
2292 p1, p2 = parents
2292 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2293 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2293 files = sorted(set(files))
2294 files = sorted(set(files))
2294 self._files = files
2295 self._files = files
2295 if branch is not None:
2296 if branch is not None:
2296 self._extra['branch'] = encoding.fromlocal(branch)
2297 self._extra['branch'] = encoding.fromlocal(branch)
2297 self.substate = {}
2298 self.substate = {}
2298
2299
2299 if isinstance(filectxfn, patch.filestore):
2300 if isinstance(filectxfn, patch.filestore):
2300 filectxfn = memfilefrompatch(filectxfn)
2301 filectxfn = memfilefrompatch(filectxfn)
2301 elif not callable(filectxfn):
2302 elif not callable(filectxfn):
2302 # if store is not callable, wrap it in a function
2303 # if store is not callable, wrap it in a function
2303 filectxfn = memfilefromctx(filectxfn)
2304 filectxfn = memfilefromctx(filectxfn)
2304
2305
2305 # memoizing increases performance for e.g. vcs convert scenarios.
2306 # memoizing increases performance for e.g. vcs convert scenarios.
2306 self._filectxfn = makecachingfilectxfn(filectxfn)
2307 self._filectxfn = makecachingfilectxfn(filectxfn)
2307
2308
2308 if editor:
2309 if editor:
2309 self._text = editor(self._repo, self, [])
2310 self._text = editor(self._repo, self, [])
2310 self._repo.savecommitmessage(self._text)
2311 self._repo.savecommitmessage(self._text)
2311
2312
2312 def filectx(self, path, filelog=None):
2313 def filectx(self, path, filelog=None):
2313 """get a file context from the working directory
2314 """get a file context from the working directory
2314
2315
2315 Returns None if file doesn't exist and should be removed."""
2316 Returns None if file doesn't exist and should be removed."""
2316 return self._filectxfn(self._repo, self, path)
2317 return self._filectxfn(self._repo, self, path)
2317
2318
2318 def commit(self):
2319 def commit(self):
2319 """commit context to the repo"""
2320 """commit context to the repo"""
2320 return self._repo.commitctx(self)
2321 return self._repo.commitctx(self)
2321
2322
2322 @propertycache
2323 @propertycache
2323 def _manifest(self):
2324 def _manifest(self):
2324 """generate a manifest based on the return values of filectxfn"""
2325 """generate a manifest based on the return values of filectxfn"""
2325
2326
2326 # keep this simple for now; just worry about p1
2327 # keep this simple for now; just worry about p1
2327 pctx = self._parents[0]
2328 pctx = self._parents[0]
2328 man = pctx.manifest().copy()
2329 man = pctx.manifest().copy()
2329
2330
2330 for f in self._status.modified:
2331 for f in self._status.modified:
2331 p1node = nullid
2332 p1node = nullid
2332 p2node = nullid
2333 p2node = nullid
2333 p = pctx[f].parents() # if file isn't in pctx, check p2?
2334 p = pctx[f].parents() # if file isn't in pctx, check p2?
2334 if len(p) > 0:
2335 if len(p) > 0:
2335 p1node = p[0].filenode()
2336 p1node = p[0].filenode()
2336 if len(p) > 1:
2337 if len(p) > 1:
2337 p2node = p[1].filenode()
2338 p2node = p[1].filenode()
2338 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2339 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2339
2340
2340 for f in self._status.added:
2341 for f in self._status.added:
2341 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2342 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2342
2343
2343 for f in self._status.removed:
2344 for f in self._status.removed:
2344 if f in man:
2345 if f in man:
2345 del man[f]
2346 del man[f]
2346
2347
2347 return man
2348 return man
2348
2349
2349 @propertycache
2350 @propertycache
2350 def _status(self):
2351 def _status(self):
2351 """Calculate exact status from ``files`` specified at construction
2352 """Calculate exact status from ``files`` specified at construction
2352 """
2353 """
2353 man1 = self.p1().manifest()
2354 man1 = self.p1().manifest()
2354 p2 = self._parents[1]
2355 p2 = self._parents[1]
2355 # "1 < len(self._parents)" can't be used for checking
2356 # "1 < len(self._parents)" can't be used for checking
2356 # existence of the 2nd parent, because "memctx._parents" is
2357 # existence of the 2nd parent, because "memctx._parents" is
2357 # explicitly initialized by the list, of which length is 2.
2358 # explicitly initialized by the list, of which length is 2.
2358 if p2.node() != nullid:
2359 if p2.node() != nullid:
2359 man2 = p2.manifest()
2360 man2 = p2.manifest()
2360 managing = lambda f: f in man1 or f in man2
2361 managing = lambda f: f in man1 or f in man2
2361 else:
2362 else:
2362 managing = lambda f: f in man1
2363 managing = lambda f: f in man1
2363
2364
2364 modified, added, removed = [], [], []
2365 modified, added, removed = [], [], []
2365 for f in self._files:
2366 for f in self._files:
2366 if not managing(f):
2367 if not managing(f):
2367 added.append(f)
2368 added.append(f)
2368 elif self[f]:
2369 elif self[f]:
2369 modified.append(f)
2370 modified.append(f)
2370 else:
2371 else:
2371 removed.append(f)
2372 removed.append(f)
2372
2373
2373 return scmutil.status(modified, added, removed, [], [], [], [])
2374 return scmutil.status(modified, added, removed, [], [], [], [])
2374
2375
2375 class memfilectx(committablefilectx):
2376 class memfilectx(committablefilectx):
2376 """memfilectx represents an in-memory file to commit.
2377 """memfilectx represents an in-memory file to commit.
2377
2378
2378 See memctx and committablefilectx for more details.
2379 See memctx and committablefilectx for more details.
2379 """
2380 """
2380 def __init__(self, repo, path, data, islink=False,
2381 def __init__(self, repo, path, data, islink=False,
2381 isexec=False, copied=None, memctx=None):
2382 isexec=False, copied=None, memctx=None):
2382 """
2383 """
2383 path is the normalized file path relative to repository root.
2384 path is the normalized file path relative to repository root.
2384 data is the file content as a string.
2385 data is the file content as a string.
2385 islink is True if the file is a symbolic link.
2386 islink is True if the file is a symbolic link.
2386 isexec is True if the file is executable.
2387 isexec is True if the file is executable.
2387 copied is the source file path if current file was copied in the
2388 copied is the source file path if current file was copied in the
2388 revision being committed, or None."""
2389 revision being committed, or None."""
2389 super(memfilectx, self).__init__(repo, path, None, memctx)
2390 super(memfilectx, self).__init__(repo, path, None, memctx)
2390 self._data = data
2391 self._data = data
2391 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2392 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2392 self._copied = None
2393 self._copied = None
2393 if copied:
2394 if copied:
2394 self._copied = (copied, nullid)
2395 self._copied = (copied, nullid)
2395
2396
2396 def data(self):
2397 def data(self):
2397 return self._data
2398 return self._data
2398
2399
2399 def remove(self, ignoremissing=False):
2400 def remove(self, ignoremissing=False):
2400 """wraps unlink for a repo's working directory"""
2401 """wraps unlink for a repo's working directory"""
2401 # need to figure out what to do here
2402 # need to figure out what to do here
2402 del self._changectx[self._path]
2403 del self._changectx[self._path]
2403
2404
2404 def write(self, data, flags):
2405 def write(self, data, flags):
2405 """wraps repo.wwrite"""
2406 """wraps repo.wwrite"""
2406 self._data = data
2407 self._data = data
2407
2408
2408 class overlayfilectx(committablefilectx):
2409 class overlayfilectx(committablefilectx):
2409 """Like memfilectx but take an original filectx and optional parameters to
2410 """Like memfilectx but take an original filectx and optional parameters to
2410 override parts of it. This is useful when fctx.data() is expensive (i.e.
2411 override parts of it. This is useful when fctx.data() is expensive (i.e.
2411 flag processor is expensive) and raw data, flags, and filenode could be
2412 flag processor is expensive) and raw data, flags, and filenode could be
2412 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2413 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2413 """
2414 """
2414
2415
2415 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2416 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2416 copied=None, ctx=None):
2417 copied=None, ctx=None):
2417 """originalfctx: filecontext to duplicate
2418 """originalfctx: filecontext to duplicate
2418
2419
2419 datafunc: None or a function to override data (file content). It is a
2420 datafunc: None or a function to override data (file content). It is a
2420 function to be lazy. path, flags, copied, ctx: None or overridden value
2421 function to be lazy. path, flags, copied, ctx: None or overridden value
2421
2422
2422 copied could be (path, rev), or False. copied could also be just path,
2423 copied could be (path, rev), or False. copied could also be just path,
2423 and will be converted to (path, nullid). This simplifies some callers.
2424 and will be converted to (path, nullid). This simplifies some callers.
2424 """
2425 """
2425
2426
2426 if path is None:
2427 if path is None:
2427 path = originalfctx.path()
2428 path = originalfctx.path()
2428 if ctx is None:
2429 if ctx is None:
2429 ctx = originalfctx.changectx()
2430 ctx = originalfctx.changectx()
2430 ctxmatch = lambda: True
2431 ctxmatch = lambda: True
2431 else:
2432 else:
2432 ctxmatch = lambda: ctx == originalfctx.changectx()
2433 ctxmatch = lambda: ctx == originalfctx.changectx()
2433
2434
2434 repo = originalfctx.repo()
2435 repo = originalfctx.repo()
2435 flog = originalfctx.filelog()
2436 flog = originalfctx.filelog()
2436 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2437 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2437
2438
2438 if copied is None:
2439 if copied is None:
2439 copied = originalfctx.renamed()
2440 copied = originalfctx.renamed()
2440 copiedmatch = lambda: True
2441 copiedmatch = lambda: True
2441 else:
2442 else:
2442 if copied and not isinstance(copied, tuple):
2443 if copied and not isinstance(copied, tuple):
2443 # repo._filecommit will recalculate copyrev so nullid is okay
2444 # repo._filecommit will recalculate copyrev so nullid is okay
2444 copied = (copied, nullid)
2445 copied = (copied, nullid)
2445 copiedmatch = lambda: copied == originalfctx.renamed()
2446 copiedmatch = lambda: copied == originalfctx.renamed()
2446
2447
2447 # When data, copied (could affect data), ctx (could affect filelog
2448 # When data, copied (could affect data), ctx (could affect filelog
2448 # parents) are not overridden, rawdata, rawflags, and filenode may be
2449 # parents) are not overridden, rawdata, rawflags, and filenode may be
2449 # reused (repo._filecommit should double check filelog parents).
2450 # reused (repo._filecommit should double check filelog parents).
2450 #
2451 #
2451 # path, flags are not hashed in filelog (but in manifestlog) so they do
2452 # path, flags are not hashed in filelog (but in manifestlog) so they do
2452 # not affect reusable here.
2453 # not affect reusable here.
2453 #
2454 #
2454 # If ctx or copied is overridden to a same value with originalfctx,
2455 # If ctx or copied is overridden to a same value with originalfctx,
2455 # still consider it's reusable. originalfctx.renamed() may be a bit
2456 # still consider it's reusable. originalfctx.renamed() may be a bit
2456 # expensive so it's not called unless necessary. Assuming datafunc is
2457 # expensive so it's not called unless necessary. Assuming datafunc is
2457 # always expensive, do not call it for this "reusable" test.
2458 # always expensive, do not call it for this "reusable" test.
2458 reusable = datafunc is None and ctxmatch() and copiedmatch()
2459 reusable = datafunc is None and ctxmatch() and copiedmatch()
2459
2460
2460 if datafunc is None:
2461 if datafunc is None:
2461 datafunc = originalfctx.data
2462 datafunc = originalfctx.data
2462 if flags is None:
2463 if flags is None:
2463 flags = originalfctx.flags()
2464 flags = originalfctx.flags()
2464
2465
2465 self._datafunc = datafunc
2466 self._datafunc = datafunc
2466 self._flags = flags
2467 self._flags = flags
2467 self._copied = copied
2468 self._copied = copied
2468
2469
2469 if reusable:
2470 if reusable:
2470 # copy extra fields from originalfctx
2471 # copy extra fields from originalfctx
2471 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2472 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2472 for attr in attrs:
2473 for attr in attrs:
2473 if util.safehasattr(originalfctx, attr):
2474 if util.safehasattr(originalfctx, attr):
2474 setattr(self, attr, getattr(originalfctx, attr))
2475 setattr(self, attr, getattr(originalfctx, attr))
2475
2476
2476 def data(self):
2477 def data(self):
2477 return self._datafunc()
2478 return self._datafunc()
2478
2479
2479 class metadataonlyctx(committablectx):
2480 class metadataonlyctx(committablectx):
2480 """Like memctx but it's reusing the manifest of different commit.
2481 """Like memctx but it's reusing the manifest of different commit.
2481 Intended to be used by lightweight operations that are creating
2482 Intended to be used by lightweight operations that are creating
2482 metadata-only changes.
2483 metadata-only changes.
2483
2484
2484 Revision information is supplied at initialization time. 'repo' is the
2485 Revision information is supplied at initialization time. 'repo' is the
2485 current localrepo, 'ctx' is original revision which manifest we're reuisng
2486 current localrepo, 'ctx' is original revision which manifest we're reuisng
2486 'parents' is a sequence of two parent revisions identifiers (pass None for
2487 'parents' is a sequence of two parent revisions identifiers (pass None for
2487 every missing parent), 'text' is the commit.
2488 every missing parent), 'text' is the commit.
2488
2489
2489 user receives the committer name and defaults to current repository
2490 user receives the committer name and defaults to current repository
2490 username, date is the commit date in any format supported by
2491 username, date is the commit date in any format supported by
2491 util.parsedate() and defaults to current date, extra is a dictionary of
2492 util.parsedate() and defaults to current date, extra is a dictionary of
2492 metadata or is left empty.
2493 metadata or is left empty.
2493 """
2494 """
2494 def __new__(cls, repo, originalctx, *args, **kwargs):
2495 def __new__(cls, repo, originalctx, *args, **kwargs):
2495 return super(metadataonlyctx, cls).__new__(cls, repo)
2496 return super(metadataonlyctx, cls).__new__(cls, repo)
2496
2497
2497 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2498 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2498 date=None, extra=None, editor=False):
2499 date=None, extra=None, editor=False):
2499 if text is None:
2500 if text is None:
2500 text = originalctx.description()
2501 text = originalctx.description()
2501 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2502 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2502 self._rev = None
2503 self._rev = None
2503 self._node = None
2504 self._node = None
2504 self._originalctx = originalctx
2505 self._originalctx = originalctx
2505 self._manifestnode = originalctx.manifestnode()
2506 self._manifestnode = originalctx.manifestnode()
2506 if parents is None:
2507 if parents is None:
2507 parents = originalctx.parents()
2508 parents = originalctx.parents()
2508 else:
2509 else:
2509 parents = [repo[p] for p in parents if p is not None]
2510 parents = [repo[p] for p in parents if p is not None]
2510 parents = parents[:]
2511 parents = parents[:]
2511 while len(parents) < 2:
2512 while len(parents) < 2:
2512 parents.append(repo[nullid])
2513 parents.append(repo[nullid])
2513 p1, p2 = self._parents = parents
2514 p1, p2 = self._parents = parents
2514
2515
2515 # sanity check to ensure that the reused manifest parents are
2516 # sanity check to ensure that the reused manifest parents are
2516 # manifests of our commit parents
2517 # manifests of our commit parents
2517 mp1, mp2 = self.manifestctx().parents
2518 mp1, mp2 = self.manifestctx().parents
2518 if p1 != nullid and p1.manifestnode() != mp1:
2519 if p1 != nullid and p1.manifestnode() != mp1:
2519 raise RuntimeError('can\'t reuse the manifest: '
2520 raise RuntimeError('can\'t reuse the manifest: '
2520 'its p1 doesn\'t match the new ctx p1')
2521 'its p1 doesn\'t match the new ctx p1')
2521 if p2 != nullid and p2.manifestnode() != mp2:
2522 if p2 != nullid and p2.manifestnode() != mp2:
2522 raise RuntimeError('can\'t reuse the manifest: '
2523 raise RuntimeError('can\'t reuse the manifest: '
2523 'its p2 doesn\'t match the new ctx p2')
2524 'its p2 doesn\'t match the new ctx p2')
2524
2525
2525 self._files = originalctx.files()
2526 self._files = originalctx.files()
2526 self.substate = {}
2527 self.substate = {}
2527
2528
2528 if editor:
2529 if editor:
2529 self._text = editor(self._repo, self, [])
2530 self._text = editor(self._repo, self, [])
2530 self._repo.savecommitmessage(self._text)
2531 self._repo.savecommitmessage(self._text)
2531
2532
2532 def manifestnode(self):
2533 def manifestnode(self):
2533 return self._manifestnode
2534 return self._manifestnode
2534
2535
2535 @property
2536 @property
2536 def _manifestctx(self):
2537 def _manifestctx(self):
2537 return self._repo.manifestlog[self._manifestnode]
2538 return self._repo.manifestlog[self._manifestnode]
2538
2539
2539 def filectx(self, path, filelog=None):
2540 def filectx(self, path, filelog=None):
2540 return self._originalctx.filectx(path, filelog=filelog)
2541 return self._originalctx.filectx(path, filelog=filelog)
2541
2542
2542 def commit(self):
2543 def commit(self):
2543 """commit context to the repo"""
2544 """commit context to the repo"""
2544 return self._repo.commitctx(self)
2545 return self._repo.commitctx(self)
2545
2546
2546 @property
2547 @property
2547 def _manifest(self):
2548 def _manifest(self):
2548 return self._originalctx.manifest()
2549 return self._originalctx.manifest()
2549
2550
2550 @propertycache
2551 @propertycache
2551 def _status(self):
2552 def _status(self):
2552 """Calculate exact status from ``files`` specified in the ``origctx``
2553 """Calculate exact status from ``files`` specified in the ``origctx``
2553 and parents manifests.
2554 and parents manifests.
2554 """
2555 """
2555 man1 = self.p1().manifest()
2556 man1 = self.p1().manifest()
2556 p2 = self._parents[1]
2557 p2 = self._parents[1]
2557 # "1 < len(self._parents)" can't be used for checking
2558 # "1 < len(self._parents)" can't be used for checking
2558 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2559 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2559 # explicitly initialized by the list, of which length is 2.
2560 # explicitly initialized by the list, of which length is 2.
2560 if p2.node() != nullid:
2561 if p2.node() != nullid:
2561 man2 = p2.manifest()
2562 man2 = p2.manifest()
2562 managing = lambda f: f in man1 or f in man2
2563 managing = lambda f: f in man1 or f in man2
2563 else:
2564 else:
2564 managing = lambda f: f in man1
2565 managing = lambda f: f in man1
2565
2566
2566 modified, added, removed = [], [], []
2567 modified, added, removed = [], [], []
2567 for f in self._files:
2568 for f in self._files:
2568 if not managing(f):
2569 if not managing(f):
2569 added.append(f)
2570 added.append(f)
2570 elif f in self:
2571 elif f in self:
2571 modified.append(f)
2572 modified.append(f)
2572 else:
2573 else:
2573 removed.append(f)
2574 removed.append(f)
2574
2575
2575 return scmutil.status(modified, added, removed, [], [], [], [])
2576 return scmutil.status(modified, added, removed, [], [], [], [])
2576
2577
2577 class arbitraryfilectx(object):
2578 class arbitraryfilectx(object):
2578 """Allows you to use filectx-like functions on a file in an arbitrary
2579 """Allows you to use filectx-like functions on a file in an arbitrary
2579 location on disk, possibly not in the working directory.
2580 location on disk, possibly not in the working directory.
2580 """
2581 """
2581 def __init__(self, path):
2582 def __init__(self, path):
2582 self._path = path
2583 self._path = path
2583
2584
2584 def cmp(self, otherfilectx):
2585 def cmp(self, otherfilectx):
2585 return self.data() != otherfilectx.data()
2586 return self.data() != otherfilectx.data()
2586
2587
2587 def path(self):
2588 def path(self):
2588 return self._path
2589 return self._path
2589
2590
2590 def flags(self):
2591 def flags(self):
2591 return ''
2592 return ''
2592
2593
2593 def data(self):
2594 def data(self):
2594 return util.readfile(self._path)
2595 return util.readfile(self._path)
2595
2596
2596 def decodeddata(self):
2597 def decodeddata(self):
2597 with open(self._path, "rb") as f:
2598 with open(self._path, "rb") as f:
2598 return f.read()
2599 return f.read()
2599
2600
2600 def remove(self):
2601 def remove(self):
2601 util.unlink(self._path)
2602 util.unlink(self._path)
2602
2603
2603 def write(self, data, flags):
2604 def write(self, data, flags):
2604 assert not flags
2605 assert not flags
2605 with open(self._path, "w") as f:
2606 with open(self._path, "w") as f:
2606 f.write(data)
2607 f.write(data)
@@ -1,1123 +1,1123 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirid,
23 wdirid,
24 wdirrev,
24 wdirrev,
25 )
25 )
26
26
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 match as matchmod,
30 match as matchmod,
31 obsolete,
31 obsolete,
32 obsutil,
32 obsutil,
33 pathutil,
33 pathutil,
34 phases,
34 phases,
35 pycompat,
35 pycompat,
36 revsetlang,
36 revsetlang,
37 similar,
37 similar,
38 util,
38 util,
39 )
39 )
40
40
41 if pycompat.osname == 'nt':
41 if pycompat.osname == 'nt':
42 from . import scmwindows as scmplatform
42 from . import scmwindows as scmplatform
43 else:
43 else:
44 from . import scmposix as scmplatform
44 from . import scmposix as scmplatform
45
45
46 termsize = scmplatform.termsize
46 termsize = scmplatform.termsize
47
47
48 class status(tuple):
48 class status(tuple):
49 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
49 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
50 and 'ignored' properties are only relevant to the working copy.
50 and 'ignored' properties are only relevant to the working copy.
51 '''
51 '''
52
52
53 __slots__ = ()
53 __slots__ = ()
54
54
55 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
55 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
56 clean):
56 clean):
57 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
57 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
58 ignored, clean))
58 ignored, clean))
59
59
60 @property
60 @property
61 def modified(self):
61 def modified(self):
62 '''files that have been modified'''
62 '''files that have been modified'''
63 return self[0]
63 return self[0]
64
64
65 @property
65 @property
66 def added(self):
66 def added(self):
67 '''files that have been added'''
67 '''files that have been added'''
68 return self[1]
68 return self[1]
69
69
70 @property
70 @property
71 def removed(self):
71 def removed(self):
72 '''files that have been removed'''
72 '''files that have been removed'''
73 return self[2]
73 return self[2]
74
74
75 @property
75 @property
76 def deleted(self):
76 def deleted(self):
77 '''files that are in the dirstate, but have been deleted from the
77 '''files that are in the dirstate, but have been deleted from the
78 working copy (aka "missing")
78 working copy (aka "missing")
79 '''
79 '''
80 return self[3]
80 return self[3]
81
81
82 @property
82 @property
83 def unknown(self):
83 def unknown(self):
84 '''files not in the dirstate that are not ignored'''
84 '''files not in the dirstate that are not ignored'''
85 return self[4]
85 return self[4]
86
86
87 @property
87 @property
88 def ignored(self):
88 def ignored(self):
89 '''files not in the dirstate that are ignored (by _dirignore())'''
89 '''files not in the dirstate that are ignored (by _dirignore())'''
90 return self[5]
90 return self[5]
91
91
92 @property
92 @property
93 def clean(self):
93 def clean(self):
94 '''files that have not been modified'''
94 '''files that have not been modified'''
95 return self[6]
95 return self[6]
96
96
97 def __repr__(self, *args, **kwargs):
97 def __repr__(self, *args, **kwargs):
98 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
98 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
99 'unknown=%r, ignored=%r, clean=%r>') % self)
99 'unknown=%r, ignored=%r, clean=%r>') % self)
100
100
101 def itersubrepos(ctx1, ctx2):
101 def itersubrepos(ctx1, ctx2):
102 """find subrepos in ctx1 or ctx2"""
102 """find subrepos in ctx1 or ctx2"""
103 # Create a (subpath, ctx) mapping where we prefer subpaths from
103 # Create a (subpath, ctx) mapping where we prefer subpaths from
104 # ctx1. The subpaths from ctx2 are important when the .hgsub file
104 # ctx1. The subpaths from ctx2 are important when the .hgsub file
105 # has been modified (in ctx2) but not yet committed (in ctx1).
105 # has been modified (in ctx2) but not yet committed (in ctx1).
106 subpaths = dict.fromkeys(ctx2.substate, ctx2)
106 subpaths = dict.fromkeys(ctx2.substate, ctx2)
107 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
107 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
108
108
109 missing = set()
109 missing = set()
110
110
111 for subpath in ctx2.substate:
111 for subpath in ctx2.substate:
112 if subpath not in ctx1.substate:
112 if subpath not in ctx1.substate:
113 del subpaths[subpath]
113 del subpaths[subpath]
114 missing.add(subpath)
114 missing.add(subpath)
115
115
116 for subpath, ctx in sorted(subpaths.iteritems()):
116 for subpath, ctx in sorted(subpaths.iteritems()):
117 yield subpath, ctx.sub(subpath)
117 yield subpath, ctx.sub(subpath)
118
118
119 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
119 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
120 # status and diff will have an accurate result when it does
120 # status and diff will have an accurate result when it does
121 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
121 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
122 # against itself.
122 # against itself.
123 for subpath in missing:
123 for subpath in missing:
124 yield subpath, ctx2.nullsub(subpath, ctx1)
124 yield subpath, ctx2.nullsub(subpath, ctx1)
125
125
126 def nochangesfound(ui, repo, excluded=None):
126 def nochangesfound(ui, repo, excluded=None):
127 '''Report no changes for push/pull, excluded is None or a list of
127 '''Report no changes for push/pull, excluded is None or a list of
128 nodes excluded from the push/pull.
128 nodes excluded from the push/pull.
129 '''
129 '''
130 secretlist = []
130 secretlist = []
131 if excluded:
131 if excluded:
132 for n in excluded:
132 for n in excluded:
133 ctx = repo[n]
133 ctx = repo[n]
134 if ctx.phase() >= phases.secret and not ctx.extinct():
134 if ctx.phase() >= phases.secret and not ctx.extinct():
135 secretlist.append(n)
135 secretlist.append(n)
136
136
137 if secretlist:
137 if secretlist:
138 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 ui.status(_("no changes found (ignored %d secret changesets)\n")
139 % len(secretlist))
139 % len(secretlist))
140 else:
140 else:
141 ui.status(_("no changes found\n"))
141 ui.status(_("no changes found\n"))
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 try:
149 try:
150 try:
150 try:
151 return func()
151 return func()
152 except: # re-raises
152 except: # re-raises
153 ui.traceback()
153 ui.traceback()
154 raise
154 raise
155 # Global exception handling, alphabetically
155 # Global exception handling, alphabetically
156 # Mercurial-specific first, followed by built-in and library exceptions
156 # Mercurial-specific first, followed by built-in and library exceptions
157 except error.LockHeld as inst:
157 except error.LockHeld as inst:
158 if inst.errno == errno.ETIMEDOUT:
158 if inst.errno == errno.ETIMEDOUT:
159 reason = _('timed out waiting for lock held by %r') % inst.locker
159 reason = _('timed out waiting for lock held by %r') % inst.locker
160 else:
160 else:
161 reason = _('lock held by %r') % inst.locker
161 reason = _('lock held by %r') % inst.locker
162 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
162 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
163 if not inst.locker:
163 if not inst.locker:
164 ui.warn(_("(lock might be very busy)\n"))
164 ui.warn(_("(lock might be very busy)\n"))
165 except error.LockUnavailable as inst:
165 except error.LockUnavailable as inst:
166 ui.warn(_("abort: could not lock %s: %s\n") %
166 ui.warn(_("abort: could not lock %s: %s\n") %
167 (inst.desc or inst.filename,
167 (inst.desc or inst.filename,
168 encoding.strtolocal(inst.strerror)))
168 encoding.strtolocal(inst.strerror)))
169 except error.OutOfBandError as inst:
169 except error.OutOfBandError as inst:
170 if inst.args:
170 if inst.args:
171 msg = _("abort: remote error:\n")
171 msg = _("abort: remote error:\n")
172 else:
172 else:
173 msg = _("abort: remote error\n")
173 msg = _("abort: remote error\n")
174 ui.warn(msg)
174 ui.warn(msg)
175 if inst.args:
175 if inst.args:
176 ui.warn(''.join(inst.args))
176 ui.warn(''.join(inst.args))
177 if inst.hint:
177 if inst.hint:
178 ui.warn('(%s)\n' % inst.hint)
178 ui.warn('(%s)\n' % inst.hint)
179 except error.RepoError as inst:
179 except error.RepoError as inst:
180 ui.warn(_("abort: %s!\n") % inst)
180 ui.warn(_("abort: %s!\n") % inst)
181 if inst.hint:
181 if inst.hint:
182 ui.warn(_("(%s)\n") % inst.hint)
182 ui.warn(_("(%s)\n") % inst.hint)
183 except error.ResponseError as inst:
183 except error.ResponseError as inst:
184 ui.warn(_("abort: %s") % inst.args[0])
184 ui.warn(_("abort: %s") % inst.args[0])
185 if not isinstance(inst.args[1], basestring):
185 if not isinstance(inst.args[1], basestring):
186 ui.warn(" %r\n" % (inst.args[1],))
186 ui.warn(" %r\n" % (inst.args[1],))
187 elif not inst.args[1]:
187 elif not inst.args[1]:
188 ui.warn(_(" empty string\n"))
188 ui.warn(_(" empty string\n"))
189 else:
189 else:
190 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
190 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
191 except error.CensoredNodeError as inst:
191 except error.CensoredNodeError as inst:
192 ui.warn(_("abort: file censored %s!\n") % inst)
192 ui.warn(_("abort: file censored %s!\n") % inst)
193 except error.RevlogError as inst:
193 except error.RevlogError as inst:
194 ui.warn(_("abort: %s!\n") % inst)
194 ui.warn(_("abort: %s!\n") % inst)
195 except error.InterventionRequired as inst:
195 except error.InterventionRequired as inst:
196 ui.warn("%s\n" % inst)
196 ui.warn("%s\n" % inst)
197 if inst.hint:
197 if inst.hint:
198 ui.warn(_("(%s)\n") % inst.hint)
198 ui.warn(_("(%s)\n") % inst.hint)
199 return 1
199 return 1
200 except error.WdirUnsupported:
200 except error.WdirUnsupported:
201 ui.warn(_("abort: working directory revision cannot be specified\n"))
201 ui.warn(_("abort: working directory revision cannot be specified\n"))
202 except error.Abort as inst:
202 except error.Abort as inst:
203 ui.warn(_("abort: %s\n") % inst)
203 ui.warn(_("abort: %s\n") % inst)
204 if inst.hint:
204 if inst.hint:
205 ui.warn(_("(%s)\n") % inst.hint)
205 ui.warn(_("(%s)\n") % inst.hint)
206 except ImportError as inst:
206 except ImportError as inst:
207 ui.warn(_("abort: %s!\n") % inst)
207 ui.warn(_("abort: %s!\n") % inst)
208 m = str(inst).split()[-1]
208 m = str(inst).split()[-1]
209 if m in "mpatch bdiff".split():
209 if m in "mpatch bdiff".split():
210 ui.warn(_("(did you forget to compile extensions?)\n"))
210 ui.warn(_("(did you forget to compile extensions?)\n"))
211 elif m in "zlib".split():
211 elif m in "zlib".split():
212 ui.warn(_("(is your Python install correct?)\n"))
212 ui.warn(_("(is your Python install correct?)\n"))
213 except IOError as inst:
213 except IOError as inst:
214 if util.safehasattr(inst, "code"):
214 if util.safehasattr(inst, "code"):
215 ui.warn(_("abort: %s\n") % inst)
215 ui.warn(_("abort: %s\n") % inst)
216 elif util.safehasattr(inst, "reason"):
216 elif util.safehasattr(inst, "reason"):
217 try: # usually it is in the form (errno, strerror)
217 try: # usually it is in the form (errno, strerror)
218 reason = inst.reason.args[1]
218 reason = inst.reason.args[1]
219 except (AttributeError, IndexError):
219 except (AttributeError, IndexError):
220 # it might be anything, for example a string
220 # it might be anything, for example a string
221 reason = inst.reason
221 reason = inst.reason
222 if isinstance(reason, unicode):
222 if isinstance(reason, unicode):
223 # SSLError of Python 2.7.9 contains a unicode
223 # SSLError of Python 2.7.9 contains a unicode
224 reason = encoding.unitolocal(reason)
224 reason = encoding.unitolocal(reason)
225 ui.warn(_("abort: error: %s\n") % reason)
225 ui.warn(_("abort: error: %s\n") % reason)
226 elif (util.safehasattr(inst, "args")
226 elif (util.safehasattr(inst, "args")
227 and inst.args and inst.args[0] == errno.EPIPE):
227 and inst.args and inst.args[0] == errno.EPIPE):
228 pass
228 pass
229 elif getattr(inst, "strerror", None):
229 elif getattr(inst, "strerror", None):
230 if getattr(inst, "filename", None):
230 if getattr(inst, "filename", None):
231 ui.warn(_("abort: %s: %s\n") % (
231 ui.warn(_("abort: %s: %s\n") % (
232 encoding.strtolocal(inst.strerror), inst.filename))
232 encoding.strtolocal(inst.strerror), inst.filename))
233 else:
233 else:
234 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
234 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
235 else:
235 else:
236 raise
236 raise
237 except OSError as inst:
237 except OSError as inst:
238 if getattr(inst, "filename", None) is not None:
238 if getattr(inst, "filename", None) is not None:
239 ui.warn(_("abort: %s: '%s'\n") % (
239 ui.warn(_("abort: %s: '%s'\n") % (
240 encoding.strtolocal(inst.strerror), inst.filename))
240 encoding.strtolocal(inst.strerror), inst.filename))
241 else:
241 else:
242 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
242 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
243 except MemoryError:
243 except MemoryError:
244 ui.warn(_("abort: out of memory\n"))
244 ui.warn(_("abort: out of memory\n"))
245 except SystemExit as inst:
245 except SystemExit as inst:
246 # Commands shouldn't sys.exit directly, but give a return code.
246 # Commands shouldn't sys.exit directly, but give a return code.
247 # Just in case catch this and and pass exit code to caller.
247 # Just in case catch this and and pass exit code to caller.
248 return inst.code
248 return inst.code
249 except socket.error as inst:
249 except socket.error as inst:
250 ui.warn(_("abort: %s\n") % inst.args[-1])
250 ui.warn(_("abort: %s\n") % inst.args[-1])
251
251
252 return -1
252 return -1
253
253
254 def checknewlabel(repo, lbl, kind):
254 def checknewlabel(repo, lbl, kind):
255 # Do not use the "kind" parameter in ui output.
255 # Do not use the "kind" parameter in ui output.
256 # It makes strings difficult to translate.
256 # It makes strings difficult to translate.
257 if lbl in ['tip', '.', 'null']:
257 if lbl in ['tip', '.', 'null']:
258 raise error.Abort(_("the name '%s' is reserved") % lbl)
258 raise error.Abort(_("the name '%s' is reserved") % lbl)
259 for c in (':', '\0', '\n', '\r'):
259 for c in (':', '\0', '\n', '\r'):
260 if c in lbl:
260 if c in lbl:
261 raise error.Abort(_("%r cannot be used in a name") % c)
261 raise error.Abort(_("%r cannot be used in a name") % c)
262 try:
262 try:
263 int(lbl)
263 int(lbl)
264 raise error.Abort(_("cannot use an integer as a name"))
264 raise error.Abort(_("cannot use an integer as a name"))
265 except ValueError:
265 except ValueError:
266 pass
266 pass
267
267
268 def checkfilename(f):
268 def checkfilename(f):
269 '''Check that the filename f is an acceptable filename for a tracked file'''
269 '''Check that the filename f is an acceptable filename for a tracked file'''
270 if '\r' in f or '\n' in f:
270 if '\r' in f or '\n' in f:
271 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
271 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
272
272
273 def checkportable(ui, f):
273 def checkportable(ui, f):
274 '''Check if filename f is portable and warn or abort depending on config'''
274 '''Check if filename f is portable and warn or abort depending on config'''
275 checkfilename(f)
275 checkfilename(f)
276 abort, warn = checkportabilityalert(ui)
276 abort, warn = checkportabilityalert(ui)
277 if abort or warn:
277 if abort or warn:
278 msg = util.checkwinfilename(f)
278 msg = util.checkwinfilename(f)
279 if msg:
279 if msg:
280 msg = "%s: %s" % (msg, util.shellquote(f))
280 msg = "%s: %s" % (msg, util.shellquote(f))
281 if abort:
281 if abort:
282 raise error.Abort(msg)
282 raise error.Abort(msg)
283 ui.warn(_("warning: %s\n") % msg)
283 ui.warn(_("warning: %s\n") % msg)
284
284
285 def checkportabilityalert(ui):
285 def checkportabilityalert(ui):
286 '''check if the user's config requests nothing, a warning, or abort for
286 '''check if the user's config requests nothing, a warning, or abort for
287 non-portable filenames'''
287 non-portable filenames'''
288 val = ui.config('ui', 'portablefilenames')
288 val = ui.config('ui', 'portablefilenames')
289 lval = val.lower()
289 lval = val.lower()
290 bval = util.parsebool(val)
290 bval = util.parsebool(val)
291 abort = pycompat.osname == 'nt' or lval == 'abort'
291 abort = pycompat.osname == 'nt' or lval == 'abort'
292 warn = bval or lval == 'warn'
292 warn = bval or lval == 'warn'
293 if bval is None and not (warn or abort or lval == 'ignore'):
293 if bval is None and not (warn or abort or lval == 'ignore'):
294 raise error.ConfigError(
294 raise error.ConfigError(
295 _("ui.portablefilenames value is invalid ('%s')") % val)
295 _("ui.portablefilenames value is invalid ('%s')") % val)
296 return abort, warn
296 return abort, warn
297
297
298 class casecollisionauditor(object):
298 class casecollisionauditor(object):
299 def __init__(self, ui, abort, dirstate):
299 def __init__(self, ui, abort, dirstate):
300 self._ui = ui
300 self._ui = ui
301 self._abort = abort
301 self._abort = abort
302 allfiles = '\0'.join(dirstate._map)
302 allfiles = '\0'.join(dirstate._map)
303 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
303 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
304 self._dirstate = dirstate
304 self._dirstate = dirstate
305 # The purpose of _newfiles is so that we don't complain about
305 # The purpose of _newfiles is so that we don't complain about
306 # case collisions if someone were to call this object with the
306 # case collisions if someone were to call this object with the
307 # same filename twice.
307 # same filename twice.
308 self._newfiles = set()
308 self._newfiles = set()
309
309
310 def __call__(self, f):
310 def __call__(self, f):
311 if f in self._newfiles:
311 if f in self._newfiles:
312 return
312 return
313 fl = encoding.lower(f)
313 fl = encoding.lower(f)
314 if fl in self._loweredfiles and f not in self._dirstate:
314 if fl in self._loweredfiles and f not in self._dirstate:
315 msg = _('possible case-folding collision for %s') % f
315 msg = _('possible case-folding collision for %s') % f
316 if self._abort:
316 if self._abort:
317 raise error.Abort(msg)
317 raise error.Abort(msg)
318 self._ui.warn(_("warning: %s\n") % msg)
318 self._ui.warn(_("warning: %s\n") % msg)
319 self._loweredfiles.add(fl)
319 self._loweredfiles.add(fl)
320 self._newfiles.add(f)
320 self._newfiles.add(f)
321
321
322 def filteredhash(repo, maxrev):
322 def filteredhash(repo, maxrev):
323 """build hash of filtered revisions in the current repoview.
323 """build hash of filtered revisions in the current repoview.
324
324
325 Multiple caches perform up-to-date validation by checking that the
325 Multiple caches perform up-to-date validation by checking that the
326 tiprev and tipnode stored in the cache file match the current repository.
326 tiprev and tipnode stored in the cache file match the current repository.
327 However, this is not sufficient for validating repoviews because the set
327 However, this is not sufficient for validating repoviews because the set
328 of revisions in the view may change without the repository tiprev and
328 of revisions in the view may change without the repository tiprev and
329 tipnode changing.
329 tipnode changing.
330
330
331 This function hashes all the revs filtered from the view and returns
331 This function hashes all the revs filtered from the view and returns
332 that SHA-1 digest.
332 that SHA-1 digest.
333 """
333 """
334 cl = repo.changelog
334 cl = repo.changelog
335 if not cl.filteredrevs:
335 if not cl.filteredrevs:
336 return None
336 return None
337 key = None
337 key = None
338 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
338 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
339 if revs:
339 if revs:
340 s = hashlib.sha1()
340 s = hashlib.sha1()
341 for rev in revs:
341 for rev in revs:
342 s.update('%d;' % rev)
342 s.update('%d;' % rev)
343 key = s.digest()
343 key = s.digest()
344 return key
344 return key
345
345
346 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
346 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
347 '''yield every hg repository under path, always recursively.
347 '''yield every hg repository under path, always recursively.
348 The recurse flag will only control recursion into repo working dirs'''
348 The recurse flag will only control recursion into repo working dirs'''
349 def errhandler(err):
349 def errhandler(err):
350 if err.filename == path:
350 if err.filename == path:
351 raise err
351 raise err
352 samestat = getattr(os.path, 'samestat', None)
352 samestat = getattr(os.path, 'samestat', None)
353 if followsym and samestat is not None:
353 if followsym and samestat is not None:
354 def adddir(dirlst, dirname):
354 def adddir(dirlst, dirname):
355 match = False
355 match = False
356 dirstat = os.stat(dirname)
356 dirstat = os.stat(dirname)
357 for lstdirstat in dirlst:
357 for lstdirstat in dirlst:
358 if samestat(dirstat, lstdirstat):
358 if samestat(dirstat, lstdirstat):
359 match = True
359 match = True
360 break
360 break
361 if not match:
361 if not match:
362 dirlst.append(dirstat)
362 dirlst.append(dirstat)
363 return not match
363 return not match
364 else:
364 else:
365 followsym = False
365 followsym = False
366
366
367 if (seen_dirs is None) and followsym:
367 if (seen_dirs is None) and followsym:
368 seen_dirs = []
368 seen_dirs = []
369 adddir(seen_dirs, path)
369 adddir(seen_dirs, path)
370 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
370 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
371 dirs.sort()
371 dirs.sort()
372 if '.hg' in dirs:
372 if '.hg' in dirs:
373 yield root # found a repository
373 yield root # found a repository
374 qroot = os.path.join(root, '.hg', 'patches')
374 qroot = os.path.join(root, '.hg', 'patches')
375 if os.path.isdir(os.path.join(qroot, '.hg')):
375 if os.path.isdir(os.path.join(qroot, '.hg')):
376 yield qroot # we have a patch queue repo here
376 yield qroot # we have a patch queue repo here
377 if recurse:
377 if recurse:
378 # avoid recursing inside the .hg directory
378 # avoid recursing inside the .hg directory
379 dirs.remove('.hg')
379 dirs.remove('.hg')
380 else:
380 else:
381 dirs[:] = [] # don't descend further
381 dirs[:] = [] # don't descend further
382 elif followsym:
382 elif followsym:
383 newdirs = []
383 newdirs = []
384 for d in dirs:
384 for d in dirs:
385 fname = os.path.join(root, d)
385 fname = os.path.join(root, d)
386 if adddir(seen_dirs, fname):
386 if adddir(seen_dirs, fname):
387 if os.path.islink(fname):
387 if os.path.islink(fname):
388 for hgname in walkrepos(fname, True, seen_dirs):
388 for hgname in walkrepos(fname, True, seen_dirs):
389 yield hgname
389 yield hgname
390 else:
390 else:
391 newdirs.append(d)
391 newdirs.append(d)
392 dirs[:] = newdirs
392 dirs[:] = newdirs
393
393
394 def binnode(ctx):
394 def binnode(ctx):
395 """Return binary node id for a given basectx"""
395 """Return binary node id for a given basectx"""
396 node = ctx.node()
396 node = ctx.node()
397 if node is None:
397 if node is None:
398 return wdirid
398 return wdirid
399 return node
399 return node
400
400
401 def intrev(ctx):
401 def intrev(ctx):
402 """Return integer for a given basectx that can be used in comparison or
402 """Return integer for a given basectx that can be used in comparison or
403 arithmetic operation"""
403 arithmetic operation"""
404 rev = ctx.rev()
404 rev = ctx.rev()
405 if rev is None:
405 if rev is None:
406 return wdirrev
406 return wdirrev
407 return rev
407 return rev
408
408
409 def formatchangeid(ctx):
409 def formatchangeid(ctx):
410 """Format changectx as '{rev}:{node|formatnode}', which is the default
410 """Format changectx as '{rev}:{node|formatnode}', which is the default
411 template provided by cmdutil.changeset_templater"""
411 template provided by cmdutil.changeset_templater"""
412 repo = ctx.repo()
412 repo = ctx.repo()
413 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
413 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
414
414
415 def formatrevnode(ui, rev, node):
415 def formatrevnode(ui, rev, node):
416 """Format given revision and node depending on the current verbosity"""
416 """Format given revision and node depending on the current verbosity"""
417 if ui.debugflag:
417 if ui.debugflag:
418 hexfunc = hex
418 hexfunc = hex
419 else:
419 else:
420 hexfunc = short
420 hexfunc = short
421 return '%d:%s' % (rev, hexfunc(node))
421 return '%d:%s' % (rev, hexfunc(node))
422
422
423 def revsingle(repo, revspec, default='.', localalias=None):
423 def revsingle(repo, revspec, default='.', localalias=None):
424 if not revspec and revspec != 0:
424 if not revspec and revspec != 0:
425 return repo[default]
425 return repo[default]
426
426
427 l = revrange(repo, [revspec], localalias=localalias)
427 l = revrange(repo, [revspec], localalias=localalias)
428 if not l:
428 if not l:
429 raise error.Abort(_('empty revision set'))
429 raise error.Abort(_('empty revision set'))
430 return repo[l.last()]
430 return repo[l.last()]
431
431
432 def _pairspec(revspec):
432 def _pairspec(revspec):
433 tree = revsetlang.parse(revspec)
433 tree = revsetlang.parse(revspec)
434 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
434 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
435
435
436 def revpair(repo, revs):
436 def revpair(repo, revs):
437 if not revs:
437 if not revs:
438 return repo.dirstate.p1(), None
438 return repo.dirstate.p1(), None
439
439
440 l = revrange(repo, revs)
440 l = revrange(repo, revs)
441
441
442 if not l:
442 if not l:
443 first = second = None
443 first = second = None
444 elif l.isascending():
444 elif l.isascending():
445 first = l.min()
445 first = l.min()
446 second = l.max()
446 second = l.max()
447 elif l.isdescending():
447 elif l.isdescending():
448 first = l.max()
448 first = l.max()
449 second = l.min()
449 second = l.min()
450 else:
450 else:
451 first = l.first()
451 first = l.first()
452 second = l.last()
452 second = l.last()
453
453
454 if first is None:
454 if first is None:
455 raise error.Abort(_('empty revision range'))
455 raise error.Abort(_('empty revision range'))
456 if (first == second and len(revs) >= 2
456 if (first == second and len(revs) >= 2
457 and not all(revrange(repo, [r]) for r in revs)):
457 and not all(revrange(repo, [r]) for r in revs)):
458 raise error.Abort(_('empty revision on one side of range'))
458 raise error.Abort(_('empty revision on one side of range'))
459
459
460 # if top-level is range expression, the result must always be a pair
460 # if top-level is range expression, the result must always be a pair
461 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
461 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
462 return repo.lookup(first), None
462 return repo.lookup(first), None
463
463
464 return repo.lookup(first), repo.lookup(second)
464 return repo.lookup(first), repo.lookup(second)
465
465
466 def revrange(repo, specs, localalias=None):
466 def revrange(repo, specs, localalias=None):
467 """Execute 1 to many revsets and return the union.
467 """Execute 1 to many revsets and return the union.
468
468
469 This is the preferred mechanism for executing revsets using user-specified
469 This is the preferred mechanism for executing revsets using user-specified
470 config options, such as revset aliases.
470 config options, such as revset aliases.
471
471
472 The revsets specified by ``specs`` will be executed via a chained ``OR``
472 The revsets specified by ``specs`` will be executed via a chained ``OR``
473 expression. If ``specs`` is empty, an empty result is returned.
473 expression. If ``specs`` is empty, an empty result is returned.
474
474
475 ``specs`` can contain integers, in which case they are assumed to be
475 ``specs`` can contain integers, in which case they are assumed to be
476 revision numbers.
476 revision numbers.
477
477
478 It is assumed the revsets are already formatted. If you have arguments
478 It is assumed the revsets are already formatted. If you have arguments
479 that need to be expanded in the revset, call ``revsetlang.formatspec()``
479 that need to be expanded in the revset, call ``revsetlang.formatspec()``
480 and pass the result as an element of ``specs``.
480 and pass the result as an element of ``specs``.
481
481
482 Specifying a single revset is allowed.
482 Specifying a single revset is allowed.
483
483
484 Returns a ``revset.abstractsmartset`` which is a list-like interface over
484 Returns a ``revset.abstractsmartset`` which is a list-like interface over
485 integer revisions.
485 integer revisions.
486 """
486 """
487 allspecs = []
487 allspecs = []
488 for spec in specs:
488 for spec in specs:
489 if isinstance(spec, int):
489 if isinstance(spec, int):
490 spec = revsetlang.formatspec('rev(%d)', spec)
490 spec = revsetlang.formatspec('rev(%d)', spec)
491 allspecs.append(spec)
491 allspecs.append(spec)
492 return repo.anyrevs(allspecs, user=True, localalias=localalias)
492 return repo.anyrevs(allspecs, user=True, localalias=localalias)
493
493
494 def meaningfulparents(repo, ctx):
494 def meaningfulparents(repo, ctx):
495 """Return list of meaningful (or all if debug) parentrevs for rev.
495 """Return list of meaningful (or all if debug) parentrevs for rev.
496
496
497 For merges (two non-nullrev revisions) both parents are meaningful.
497 For merges (two non-nullrev revisions) both parents are meaningful.
498 Otherwise the first parent revision is considered meaningful if it
498 Otherwise the first parent revision is considered meaningful if it
499 is not the preceding revision.
499 is not the preceding revision.
500 """
500 """
501 parents = ctx.parents()
501 parents = ctx.parents()
502 if len(parents) > 1:
502 if len(parents) > 1:
503 return parents
503 return parents
504 if repo.ui.debugflag:
504 if repo.ui.debugflag:
505 return [parents[0], repo['null']]
505 return [parents[0], repo['null']]
506 if parents[0].rev() >= intrev(ctx) - 1:
506 if parents[0].rev() >= intrev(ctx) - 1:
507 return []
507 return []
508 return parents
508 return parents
509
509
510 def expandpats(pats):
510 def expandpats(pats):
511 '''Expand bare globs when running on windows.
511 '''Expand bare globs when running on windows.
512 On posix we assume it already has already been done by sh.'''
512 On posix we assume it already has already been done by sh.'''
513 if not util.expandglobs:
513 if not util.expandglobs:
514 return list(pats)
514 return list(pats)
515 ret = []
515 ret = []
516 for kindpat in pats:
516 for kindpat in pats:
517 kind, pat = matchmod._patsplit(kindpat, None)
517 kind, pat = matchmod._patsplit(kindpat, None)
518 if kind is None:
518 if kind is None:
519 try:
519 try:
520 globbed = glob.glob(pat)
520 globbed = glob.glob(pat)
521 except re.error:
521 except re.error:
522 globbed = [pat]
522 globbed = [pat]
523 if globbed:
523 if globbed:
524 ret.extend(globbed)
524 ret.extend(globbed)
525 continue
525 continue
526 ret.append(kindpat)
526 ret.append(kindpat)
527 return ret
527 return ret
528
528
529 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
529 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
530 badfn=None):
530 badfn=None):
531 '''Return a matcher and the patterns that were used.
531 '''Return a matcher and the patterns that were used.
532 The matcher will warn about bad matches, unless an alternate badfn callback
532 The matcher will warn about bad matches, unless an alternate badfn callback
533 is provided.'''
533 is provided.'''
534 if pats == ("",):
534 if pats == ("",):
535 pats = []
535 pats = []
536 if opts is None:
536 if opts is None:
537 opts = {}
537 opts = {}
538 if not globbed and default == 'relpath':
538 if not globbed and default == 'relpath':
539 pats = expandpats(pats or [])
539 pats = expandpats(pats or [])
540
540
541 def bad(f, msg):
541 def bad(f, msg):
542 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
542 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
543
543
544 if badfn is None:
544 if badfn is None:
545 badfn = bad
545 badfn = bad
546
546
547 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
547 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
548 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
548 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
549
549
550 if m.always():
550 if m.always():
551 pats = []
551 pats = []
552 return m, pats
552 return m, pats
553
553
554 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
554 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
555 badfn=None):
555 badfn=None):
556 '''Return a matcher that will warn about bad matches.'''
556 '''Return a matcher that will warn about bad matches.'''
557 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
557 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
558
558
559 def matchall(repo):
559 def matchall(repo):
560 '''Return a matcher that will efficiently match everything.'''
560 '''Return a matcher that will efficiently match everything.'''
561 return matchmod.always(repo.root, repo.getcwd())
561 return matchmod.always(repo.root, repo.getcwd())
562
562
563 def matchfiles(repo, files, badfn=None):
563 def matchfiles(repo, files, badfn=None):
564 '''Return a matcher that will efficiently match exactly these files.'''
564 '''Return a matcher that will efficiently match exactly these files.'''
565 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
565 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
566
566
567 def origpath(ui, repo, filepath):
567 def origpath(ui, repo, filepath):
568 '''customize where .orig files are created
568 '''customize where .orig files are created
569
569
570 Fetch user defined path from config file: [ui] origbackuppath = <path>
570 Fetch user defined path from config file: [ui] origbackuppath = <path>
571 Fall back to default (filepath with .orig suffix) if not specified
571 Fall back to default (filepath with .orig suffix) if not specified
572 '''
572 '''
573 origbackuppath = ui.config('ui', 'origbackuppath')
573 origbackuppath = ui.config('ui', 'origbackuppath')
574 if origbackuppath is None:
574 if origbackuppath is None:
575 return filepath + ".orig"
575 return filepath + ".orig"
576
576
577 filepathfromroot = os.path.relpath(filepath, start=repo.root)
577 filepathfromroot = os.path.relpath(filepath, start=repo.root)
578 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
578 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
579
579
580 origbackupdir = repo.vfs.dirname(fullorigpath)
580 origbackupdir = repo.vfs.dirname(fullorigpath)
581 if not repo.vfs.exists(origbackupdir):
581 if not repo.vfs.exists(origbackupdir):
582 ui.note(_('creating directory: %s\n') % origbackupdir)
582 ui.note(_('creating directory: %s\n') % origbackupdir)
583 util.makedirs(origbackupdir)
583 util.makedirs(origbackupdir)
584
584
585 return fullorigpath
585 return fullorigpath
586
586
587 class _containsnode(object):
587 class _containsnode(object):
588 """proxy __contains__(node) to container.__contains__ which accepts revs"""
588 """proxy __contains__(node) to container.__contains__ which accepts revs"""
589
589
590 def __init__(self, repo, revcontainer):
590 def __init__(self, repo, revcontainer):
591 self._torev = repo.changelog.rev
591 self._torev = repo.changelog.rev
592 self._revcontains = revcontainer.__contains__
592 self._revcontains = revcontainer.__contains__
593
593
594 def __contains__(self, node):
594 def __contains__(self, node):
595 return self._revcontains(self._torev(node))
595 return self._revcontains(self._torev(node))
596
596
597 def cleanupnodes(repo, mapping, operation):
597 def cleanupnodes(repo, mapping, operation):
598 """do common cleanups when old nodes are replaced by new nodes
598 """do common cleanups when old nodes are replaced by new nodes
599
599
600 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
600 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
601 (we might also want to move working directory parent in the future)
601 (we might also want to move working directory parent in the future)
602
602
603 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
603 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
604 replacements. operation is a string, like "rebase".
604 replacements. operation is a string, like "rebase".
605 """
605 """
606 if not util.safehasattr(mapping, 'items'):
606 if not util.safehasattr(mapping, 'items'):
607 mapping = {n: () for n in mapping}
607 mapping = {n: () for n in mapping}
608
608
609 with repo.transaction('cleanup') as tr:
609 with repo.transaction('cleanup') as tr:
610 # Move bookmarks
610 # Move bookmarks
611 bmarks = repo._bookmarks
611 bmarks = repo._bookmarks
612 bmarkchanges = []
612 bmarkchanges = []
613 allnewnodes = [n for ns in mapping.values() for n in ns]
613 allnewnodes = [n for ns in mapping.values() for n in ns]
614 for oldnode, newnodes in mapping.items():
614 for oldnode, newnodes in mapping.items():
615 oldbmarks = repo.nodebookmarks(oldnode)
615 oldbmarks = repo.nodebookmarks(oldnode)
616 if not oldbmarks:
616 if not oldbmarks:
617 continue
617 continue
618 from . import bookmarks # avoid import cycle
618 from . import bookmarks # avoid import cycle
619 if len(newnodes) > 1:
619 if len(newnodes) > 1:
620 # usually a split, take the one with biggest rev number
620 # usually a split, take the one with biggest rev number
621 newnode = next(repo.set('max(%ln)', newnodes)).node()
621 newnode = next(repo.set('max(%ln)', newnodes)).node()
622 elif len(newnodes) == 0:
622 elif len(newnodes) == 0:
623 # move bookmark backwards
623 # move bookmark backwards
624 roots = list(repo.set('max((::%n) - %ln)', oldnode,
624 roots = list(repo.set('max((::%n) - %ln)', oldnode,
625 list(mapping)))
625 list(mapping)))
626 if roots:
626 if roots:
627 newnode = roots[0].node()
627 newnode = roots[0].node()
628 else:
628 else:
629 newnode = nullid
629 newnode = nullid
630 else:
630 else:
631 newnode = newnodes[0]
631 newnode = newnodes[0]
632 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
632 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
633 (oldbmarks, hex(oldnode), hex(newnode)))
633 (oldbmarks, hex(oldnode), hex(newnode)))
634 # Delete divergent bookmarks being parents of related newnodes
634 # Delete divergent bookmarks being parents of related newnodes
635 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
635 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
636 allnewnodes, newnode, oldnode)
636 allnewnodes, newnode, oldnode)
637 deletenodes = _containsnode(repo, deleterevs)
637 deletenodes = _containsnode(repo, deleterevs)
638 for name in oldbmarks:
638 for name in oldbmarks:
639 bmarkchanges.append((name, newnode))
639 bmarkchanges.append((name, newnode))
640 for b in bookmarks.divergent2delete(repo, deletenodes, name):
640 for b in bookmarks.divergent2delete(repo, deletenodes, name):
641 bmarkchanges.append((b, None))
641 bmarkchanges.append((b, None))
642
642
643 if bmarkchanges:
643 if bmarkchanges:
644 bmarks.applychanges(repo, tr, bmarkchanges)
644 bmarks.applychanges(repo, tr, bmarkchanges)
645
645
646 # Obsolete or strip nodes
646 # Obsolete or strip nodes
647 if obsolete.isenabled(repo, obsolete.createmarkersopt):
647 if obsolete.isenabled(repo, obsolete.createmarkersopt):
648 # If a node is already obsoleted, and we want to obsolete it
648 # If a node is already obsoleted, and we want to obsolete it
649 # without a successor, skip that obssolete request since it's
649 # without a successor, skip that obssolete request since it's
650 # unnecessary. That's the "if s or not isobs(n)" check below.
650 # unnecessary. That's the "if s or not isobs(n)" check below.
651 # Also sort the node in topology order, that might be useful for
651 # Also sort the node in topology order, that might be useful for
652 # some obsstore logic.
652 # some obsstore logic.
653 # NOTE: the filtering and sorting might belong to createmarkers.
653 # NOTE: the filtering and sorting might belong to createmarkers.
654 # Unfiltered repo is needed since nodes in mapping might be hidden.
654 # Unfiltered repo is needed since nodes in mapping might be hidden.
655 unfi = repo.unfiltered()
655 unfi = repo.unfiltered()
656 isobs = unfi.obsstore.successors.__contains__
656 isobs = unfi.obsstore.successors.__contains__
657 torev = unfi.changelog.rev
657 torev = unfi.changelog.rev
658 sortfunc = lambda ns: torev(ns[0])
658 sortfunc = lambda ns: torev(ns[0])
659 rels = [(unfi[n], tuple(unfi[m] for m in s))
659 rels = [(unfi[n], tuple(unfi[m] for m in s))
660 for n, s in sorted(mapping.items(), key=sortfunc)
660 for n, s in sorted(mapping.items(), key=sortfunc)
661 if s or not isobs(n)]
661 if s or not isobs(n)]
662 obsolete.createmarkers(repo, rels, operation=operation)
662 obsolete.createmarkers(repo, rels, operation=operation)
663 else:
663 else:
664 from . import repair # avoid import cycle
664 from . import repair # avoid import cycle
665 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
665 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
666
666
667 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
667 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
668 if opts is None:
668 if opts is None:
669 opts = {}
669 opts = {}
670 m = matcher
670 m = matcher
671 if dry_run is None:
671 if dry_run is None:
672 dry_run = opts.get('dry_run')
672 dry_run = opts.get('dry_run')
673 if similarity is None:
673 if similarity is None:
674 similarity = float(opts.get('similarity') or 0)
674 similarity = float(opts.get('similarity') or 0)
675
675
676 ret = 0
676 ret = 0
677 join = lambda f: os.path.join(prefix, f)
677 join = lambda f: os.path.join(prefix, f)
678
678
679 wctx = repo[None]
679 wctx = repo[None]
680 for subpath in sorted(wctx.substate):
680 for subpath in sorted(wctx.substate):
681 submatch = matchmod.subdirmatcher(subpath, m)
681 submatch = matchmod.subdirmatcher(subpath, m)
682 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
682 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
683 sub = wctx.sub(subpath)
683 sub = wctx.sub(subpath)
684 try:
684 try:
685 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
685 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
686 ret = 1
686 ret = 1
687 except error.LookupError:
687 except error.LookupError:
688 repo.ui.status(_("skipping missing subrepository: %s\n")
688 repo.ui.status(_("skipping missing subrepository: %s\n")
689 % join(subpath))
689 % join(subpath))
690
690
691 rejected = []
691 rejected = []
692 def badfn(f, msg):
692 def badfn(f, msg):
693 if f in m.files():
693 if f in m.files():
694 m.bad(f, msg)
694 m.bad(f, msg)
695 rejected.append(f)
695 rejected.append(f)
696
696
697 badmatch = matchmod.badmatch(m, badfn)
697 badmatch = matchmod.badmatch(m, badfn)
698 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
698 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
699 badmatch)
699 badmatch)
700
700
701 unknownset = set(unknown + forgotten)
701 unknownset = set(unknown + forgotten)
702 toprint = unknownset.copy()
702 toprint = unknownset.copy()
703 toprint.update(deleted)
703 toprint.update(deleted)
704 for abs in sorted(toprint):
704 for abs in sorted(toprint):
705 if repo.ui.verbose or not m.exact(abs):
705 if repo.ui.verbose or not m.exact(abs):
706 if abs in unknownset:
706 if abs in unknownset:
707 status = _('adding %s\n') % m.uipath(abs)
707 status = _('adding %s\n') % m.uipath(abs)
708 else:
708 else:
709 status = _('removing %s\n') % m.uipath(abs)
709 status = _('removing %s\n') % m.uipath(abs)
710 repo.ui.status(status)
710 repo.ui.status(status)
711
711
712 renames = _findrenames(repo, m, added + unknown, removed + deleted,
712 renames = _findrenames(repo, m, added + unknown, removed + deleted,
713 similarity)
713 similarity)
714
714
715 if not dry_run:
715 if not dry_run:
716 _markchanges(repo, unknown + forgotten, deleted, renames)
716 _markchanges(repo, unknown + forgotten, deleted, renames)
717
717
718 for f in rejected:
718 for f in rejected:
719 if f in m.files():
719 if f in m.files():
720 return 1
720 return 1
721 return ret
721 return ret
722
722
723 def marktouched(repo, files, similarity=0.0):
723 def marktouched(repo, files, similarity=0.0):
724 '''Assert that files have somehow been operated upon. files are relative to
724 '''Assert that files have somehow been operated upon. files are relative to
725 the repo root.'''
725 the repo root.'''
726 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
726 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
727 rejected = []
727 rejected = []
728
728
729 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
729 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
730
730
731 if repo.ui.verbose:
731 if repo.ui.verbose:
732 unknownset = set(unknown + forgotten)
732 unknownset = set(unknown + forgotten)
733 toprint = unknownset.copy()
733 toprint = unknownset.copy()
734 toprint.update(deleted)
734 toprint.update(deleted)
735 for abs in sorted(toprint):
735 for abs in sorted(toprint):
736 if abs in unknownset:
736 if abs in unknownset:
737 status = _('adding %s\n') % abs
737 status = _('adding %s\n') % abs
738 else:
738 else:
739 status = _('removing %s\n') % abs
739 status = _('removing %s\n') % abs
740 repo.ui.status(status)
740 repo.ui.status(status)
741
741
742 renames = _findrenames(repo, m, added + unknown, removed + deleted,
742 renames = _findrenames(repo, m, added + unknown, removed + deleted,
743 similarity)
743 similarity)
744
744
745 _markchanges(repo, unknown + forgotten, deleted, renames)
745 _markchanges(repo, unknown + forgotten, deleted, renames)
746
746
747 for f in rejected:
747 for f in rejected:
748 if f in m.files():
748 if f in m.files():
749 return 1
749 return 1
750 return 0
750 return 0
751
751
752 def _interestingfiles(repo, matcher):
752 def _interestingfiles(repo, matcher):
753 '''Walk dirstate with matcher, looking for files that addremove would care
753 '''Walk dirstate with matcher, looking for files that addremove would care
754 about.
754 about.
755
755
756 This is different from dirstate.status because it doesn't care about
756 This is different from dirstate.status because it doesn't care about
757 whether files are modified or clean.'''
757 whether files are modified or clean.'''
758 added, unknown, deleted, removed, forgotten = [], [], [], [], []
758 added, unknown, deleted, removed, forgotten = [], [], [], [], []
759 audit_path = pathutil.pathauditor(repo.root, cached=True)
759 audit_path = pathutil.pathauditor(repo.root, cached=True)
760
760
761 ctx = repo[None]
761 ctx = repo[None]
762 dirstate = repo.dirstate
762 dirstate = repo.dirstate
763 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
763 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
764 full=False)
764 unknown=True, ignored=False, full=False)
765 for abs, st in walkresults.iteritems():
765 for abs, st in walkresults.iteritems():
766 dstate = dirstate[abs]
766 dstate = dirstate[abs]
767 if dstate == '?' and audit_path.check(abs):
767 if dstate == '?' and audit_path.check(abs):
768 unknown.append(abs)
768 unknown.append(abs)
769 elif dstate != 'r' and not st:
769 elif dstate != 'r' and not st:
770 deleted.append(abs)
770 deleted.append(abs)
771 elif dstate == 'r' and st:
771 elif dstate == 'r' and st:
772 forgotten.append(abs)
772 forgotten.append(abs)
773 # for finding renames
773 # for finding renames
774 elif dstate == 'r' and not st:
774 elif dstate == 'r' and not st:
775 removed.append(abs)
775 removed.append(abs)
776 elif dstate == 'a':
776 elif dstate == 'a':
777 added.append(abs)
777 added.append(abs)
778
778
779 return added, unknown, deleted, removed, forgotten
779 return added, unknown, deleted, removed, forgotten
780
780
781 def _findrenames(repo, matcher, added, removed, similarity):
781 def _findrenames(repo, matcher, added, removed, similarity):
782 '''Find renames from removed files to added ones.'''
782 '''Find renames from removed files to added ones.'''
783 renames = {}
783 renames = {}
784 if similarity > 0:
784 if similarity > 0:
785 for old, new, score in similar.findrenames(repo, added, removed,
785 for old, new, score in similar.findrenames(repo, added, removed,
786 similarity):
786 similarity):
787 if (repo.ui.verbose or not matcher.exact(old)
787 if (repo.ui.verbose or not matcher.exact(old)
788 or not matcher.exact(new)):
788 or not matcher.exact(new)):
789 repo.ui.status(_('recording removal of %s as rename to %s '
789 repo.ui.status(_('recording removal of %s as rename to %s '
790 '(%d%% similar)\n') %
790 '(%d%% similar)\n') %
791 (matcher.rel(old), matcher.rel(new),
791 (matcher.rel(old), matcher.rel(new),
792 score * 100))
792 score * 100))
793 renames[new] = old
793 renames[new] = old
794 return renames
794 return renames
795
795
796 def _markchanges(repo, unknown, deleted, renames):
796 def _markchanges(repo, unknown, deleted, renames):
797 '''Marks the files in unknown as added, the files in deleted as removed,
797 '''Marks the files in unknown as added, the files in deleted as removed,
798 and the files in renames as copied.'''
798 and the files in renames as copied.'''
799 wctx = repo[None]
799 wctx = repo[None]
800 with repo.wlock():
800 with repo.wlock():
801 wctx.forget(deleted)
801 wctx.forget(deleted)
802 wctx.add(unknown)
802 wctx.add(unknown)
803 for new, old in renames.iteritems():
803 for new, old in renames.iteritems():
804 wctx.copy(old, new)
804 wctx.copy(old, new)
805
805
806 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
806 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
807 """Update the dirstate to reflect the intent of copying src to dst. For
807 """Update the dirstate to reflect the intent of copying src to dst. For
808 different reasons it might not end with dst being marked as copied from src.
808 different reasons it might not end with dst being marked as copied from src.
809 """
809 """
810 origsrc = repo.dirstate.copied(src) or src
810 origsrc = repo.dirstate.copied(src) or src
811 if dst == origsrc: # copying back a copy?
811 if dst == origsrc: # copying back a copy?
812 if repo.dirstate[dst] not in 'mn' and not dryrun:
812 if repo.dirstate[dst] not in 'mn' and not dryrun:
813 repo.dirstate.normallookup(dst)
813 repo.dirstate.normallookup(dst)
814 else:
814 else:
815 if repo.dirstate[origsrc] == 'a' and origsrc == src:
815 if repo.dirstate[origsrc] == 'a' and origsrc == src:
816 if not ui.quiet:
816 if not ui.quiet:
817 ui.warn(_("%s has not been committed yet, so no copy "
817 ui.warn(_("%s has not been committed yet, so no copy "
818 "data will be stored for %s.\n")
818 "data will be stored for %s.\n")
819 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
819 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
820 if repo.dirstate[dst] in '?r' and not dryrun:
820 if repo.dirstate[dst] in '?r' and not dryrun:
821 wctx.add([dst])
821 wctx.add([dst])
822 elif not dryrun:
822 elif not dryrun:
823 wctx.copy(origsrc, dst)
823 wctx.copy(origsrc, dst)
824
824
825 def readrequires(opener, supported):
825 def readrequires(opener, supported):
826 '''Reads and parses .hg/requires and checks if all entries found
826 '''Reads and parses .hg/requires and checks if all entries found
827 are in the list of supported features.'''
827 are in the list of supported features.'''
828 requirements = set(opener.read("requires").splitlines())
828 requirements = set(opener.read("requires").splitlines())
829 missings = []
829 missings = []
830 for r in requirements:
830 for r in requirements:
831 if r not in supported:
831 if r not in supported:
832 if not r or not r[0].isalnum():
832 if not r or not r[0].isalnum():
833 raise error.RequirementError(_(".hg/requires file is corrupt"))
833 raise error.RequirementError(_(".hg/requires file is corrupt"))
834 missings.append(r)
834 missings.append(r)
835 missings.sort()
835 missings.sort()
836 if missings:
836 if missings:
837 raise error.RequirementError(
837 raise error.RequirementError(
838 _("repository requires features unknown to this Mercurial: %s")
838 _("repository requires features unknown to this Mercurial: %s")
839 % " ".join(missings),
839 % " ".join(missings),
840 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
840 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
841 " for more information"))
841 " for more information"))
842 return requirements
842 return requirements
843
843
844 def writerequires(opener, requirements):
844 def writerequires(opener, requirements):
845 with opener('requires', 'w') as fp:
845 with opener('requires', 'w') as fp:
846 for r in sorted(requirements):
846 for r in sorted(requirements):
847 fp.write("%s\n" % r)
847 fp.write("%s\n" % r)
848
848
849 class filecachesubentry(object):
849 class filecachesubentry(object):
850 def __init__(self, path, stat):
850 def __init__(self, path, stat):
851 self.path = path
851 self.path = path
852 self.cachestat = None
852 self.cachestat = None
853 self._cacheable = None
853 self._cacheable = None
854
854
855 if stat:
855 if stat:
856 self.cachestat = filecachesubentry.stat(self.path)
856 self.cachestat = filecachesubentry.stat(self.path)
857
857
858 if self.cachestat:
858 if self.cachestat:
859 self._cacheable = self.cachestat.cacheable()
859 self._cacheable = self.cachestat.cacheable()
860 else:
860 else:
861 # None means we don't know yet
861 # None means we don't know yet
862 self._cacheable = None
862 self._cacheable = None
863
863
864 def refresh(self):
864 def refresh(self):
865 if self.cacheable():
865 if self.cacheable():
866 self.cachestat = filecachesubentry.stat(self.path)
866 self.cachestat = filecachesubentry.stat(self.path)
867
867
868 def cacheable(self):
868 def cacheable(self):
869 if self._cacheable is not None:
869 if self._cacheable is not None:
870 return self._cacheable
870 return self._cacheable
871
871
872 # we don't know yet, assume it is for now
872 # we don't know yet, assume it is for now
873 return True
873 return True
874
874
875 def changed(self):
875 def changed(self):
876 # no point in going further if we can't cache it
876 # no point in going further if we can't cache it
877 if not self.cacheable():
877 if not self.cacheable():
878 return True
878 return True
879
879
880 newstat = filecachesubentry.stat(self.path)
880 newstat = filecachesubentry.stat(self.path)
881
881
882 # we may not know if it's cacheable yet, check again now
882 # we may not know if it's cacheable yet, check again now
883 if newstat and self._cacheable is None:
883 if newstat and self._cacheable is None:
884 self._cacheable = newstat.cacheable()
884 self._cacheable = newstat.cacheable()
885
885
886 # check again
886 # check again
887 if not self._cacheable:
887 if not self._cacheable:
888 return True
888 return True
889
889
890 if self.cachestat != newstat:
890 if self.cachestat != newstat:
891 self.cachestat = newstat
891 self.cachestat = newstat
892 return True
892 return True
893 else:
893 else:
894 return False
894 return False
895
895
896 @staticmethod
896 @staticmethod
897 def stat(path):
897 def stat(path):
898 try:
898 try:
899 return util.cachestat(path)
899 return util.cachestat(path)
900 except OSError as e:
900 except OSError as e:
901 if e.errno != errno.ENOENT:
901 if e.errno != errno.ENOENT:
902 raise
902 raise
903
903
904 class filecacheentry(object):
904 class filecacheentry(object):
905 def __init__(self, paths, stat=True):
905 def __init__(self, paths, stat=True):
906 self._entries = []
906 self._entries = []
907 for path in paths:
907 for path in paths:
908 self._entries.append(filecachesubentry(path, stat))
908 self._entries.append(filecachesubentry(path, stat))
909
909
910 def changed(self):
910 def changed(self):
911 '''true if any entry has changed'''
911 '''true if any entry has changed'''
912 for entry in self._entries:
912 for entry in self._entries:
913 if entry.changed():
913 if entry.changed():
914 return True
914 return True
915 return False
915 return False
916
916
917 def refresh(self):
917 def refresh(self):
918 for entry in self._entries:
918 for entry in self._entries:
919 entry.refresh()
919 entry.refresh()
920
920
921 class filecache(object):
921 class filecache(object):
922 '''A property like decorator that tracks files under .hg/ for updates.
922 '''A property like decorator that tracks files under .hg/ for updates.
923
923
924 Records stat info when called in _filecache.
924 Records stat info when called in _filecache.
925
925
926 On subsequent calls, compares old stat info with new info, and recreates the
926 On subsequent calls, compares old stat info with new info, and recreates the
927 object when any of the files changes, updating the new stat info in
927 object when any of the files changes, updating the new stat info in
928 _filecache.
928 _filecache.
929
929
930 Mercurial either atomic renames or appends for files under .hg,
930 Mercurial either atomic renames or appends for files under .hg,
931 so to ensure the cache is reliable we need the filesystem to be able
931 so to ensure the cache is reliable we need the filesystem to be able
932 to tell us if a file has been replaced. If it can't, we fallback to
932 to tell us if a file has been replaced. If it can't, we fallback to
933 recreating the object on every call (essentially the same behavior as
933 recreating the object on every call (essentially the same behavior as
934 propertycache).
934 propertycache).
935
935
936 '''
936 '''
937 def __init__(self, *paths):
937 def __init__(self, *paths):
938 self.paths = paths
938 self.paths = paths
939
939
940 def join(self, obj, fname):
940 def join(self, obj, fname):
941 """Used to compute the runtime path of a cached file.
941 """Used to compute the runtime path of a cached file.
942
942
943 Users should subclass filecache and provide their own version of this
943 Users should subclass filecache and provide their own version of this
944 function to call the appropriate join function on 'obj' (an instance
944 function to call the appropriate join function on 'obj' (an instance
945 of the class that its member function was decorated).
945 of the class that its member function was decorated).
946 """
946 """
947 raise NotImplementedError
947 raise NotImplementedError
948
948
949 def __call__(self, func):
949 def __call__(self, func):
950 self.func = func
950 self.func = func
951 self.name = func.__name__.encode('ascii')
951 self.name = func.__name__.encode('ascii')
952 return self
952 return self
953
953
954 def __get__(self, obj, type=None):
954 def __get__(self, obj, type=None):
955 # if accessed on the class, return the descriptor itself.
955 # if accessed on the class, return the descriptor itself.
956 if obj is None:
956 if obj is None:
957 return self
957 return self
958 # do we need to check if the file changed?
958 # do we need to check if the file changed?
959 if self.name in obj.__dict__:
959 if self.name in obj.__dict__:
960 assert self.name in obj._filecache, self.name
960 assert self.name in obj._filecache, self.name
961 return obj.__dict__[self.name]
961 return obj.__dict__[self.name]
962
962
963 entry = obj._filecache.get(self.name)
963 entry = obj._filecache.get(self.name)
964
964
965 if entry:
965 if entry:
966 if entry.changed():
966 if entry.changed():
967 entry.obj = self.func(obj)
967 entry.obj = self.func(obj)
968 else:
968 else:
969 paths = [self.join(obj, path) for path in self.paths]
969 paths = [self.join(obj, path) for path in self.paths]
970
970
971 # We stat -before- creating the object so our cache doesn't lie if
971 # We stat -before- creating the object so our cache doesn't lie if
972 # a writer modified between the time we read and stat
972 # a writer modified between the time we read and stat
973 entry = filecacheentry(paths, True)
973 entry = filecacheentry(paths, True)
974 entry.obj = self.func(obj)
974 entry.obj = self.func(obj)
975
975
976 obj._filecache[self.name] = entry
976 obj._filecache[self.name] = entry
977
977
978 obj.__dict__[self.name] = entry.obj
978 obj.__dict__[self.name] = entry.obj
979 return entry.obj
979 return entry.obj
980
980
981 def __set__(self, obj, value):
981 def __set__(self, obj, value):
982 if self.name not in obj._filecache:
982 if self.name not in obj._filecache:
983 # we add an entry for the missing value because X in __dict__
983 # we add an entry for the missing value because X in __dict__
984 # implies X in _filecache
984 # implies X in _filecache
985 paths = [self.join(obj, path) for path in self.paths]
985 paths = [self.join(obj, path) for path in self.paths]
986 ce = filecacheentry(paths, False)
986 ce = filecacheentry(paths, False)
987 obj._filecache[self.name] = ce
987 obj._filecache[self.name] = ce
988 else:
988 else:
989 ce = obj._filecache[self.name]
989 ce = obj._filecache[self.name]
990
990
991 ce.obj = value # update cached copy
991 ce.obj = value # update cached copy
992 obj.__dict__[self.name] = value # update copy returned by obj.x
992 obj.__dict__[self.name] = value # update copy returned by obj.x
993
993
994 def __delete__(self, obj):
994 def __delete__(self, obj):
995 try:
995 try:
996 del obj.__dict__[self.name]
996 del obj.__dict__[self.name]
997 except KeyError:
997 except KeyError:
998 raise AttributeError(self.name)
998 raise AttributeError(self.name)
999
999
1000 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1000 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1001 if lock is None:
1001 if lock is None:
1002 raise error.LockInheritanceContractViolation(
1002 raise error.LockInheritanceContractViolation(
1003 'lock can only be inherited while held')
1003 'lock can only be inherited while held')
1004 if environ is None:
1004 if environ is None:
1005 environ = {}
1005 environ = {}
1006 with lock.inherit() as locker:
1006 with lock.inherit() as locker:
1007 environ[envvar] = locker
1007 environ[envvar] = locker
1008 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1008 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1009
1009
1010 def wlocksub(repo, cmd, *args, **kwargs):
1010 def wlocksub(repo, cmd, *args, **kwargs):
1011 """run cmd as a subprocess that allows inheriting repo's wlock
1011 """run cmd as a subprocess that allows inheriting repo's wlock
1012
1012
1013 This can only be called while the wlock is held. This takes all the
1013 This can only be called while the wlock is held. This takes all the
1014 arguments that ui.system does, and returns the exit code of the
1014 arguments that ui.system does, and returns the exit code of the
1015 subprocess."""
1015 subprocess."""
1016 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1016 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1017 **kwargs)
1017 **kwargs)
1018
1018
1019 def gdinitconfig(ui):
1019 def gdinitconfig(ui):
1020 """helper function to know if a repo should be created as general delta
1020 """helper function to know if a repo should be created as general delta
1021 """
1021 """
1022 # experimental config: format.generaldelta
1022 # experimental config: format.generaldelta
1023 return (ui.configbool('format', 'generaldelta')
1023 return (ui.configbool('format', 'generaldelta')
1024 or ui.configbool('format', 'usegeneraldelta'))
1024 or ui.configbool('format', 'usegeneraldelta'))
1025
1025
1026 def gddeltaconfig(ui):
1026 def gddeltaconfig(ui):
1027 """helper function to know if incoming delta should be optimised
1027 """helper function to know if incoming delta should be optimised
1028 """
1028 """
1029 # experimental config: format.generaldelta
1029 # experimental config: format.generaldelta
1030 return ui.configbool('format', 'generaldelta')
1030 return ui.configbool('format', 'generaldelta')
1031
1031
1032 class simplekeyvaluefile(object):
1032 class simplekeyvaluefile(object):
1033 """A simple file with key=value lines
1033 """A simple file with key=value lines
1034
1034
1035 Keys must be alphanumerics and start with a letter, values must not
1035 Keys must be alphanumerics and start with a letter, values must not
1036 contain '\n' characters"""
1036 contain '\n' characters"""
1037 firstlinekey = '__firstline'
1037 firstlinekey = '__firstline'
1038
1038
1039 def __init__(self, vfs, path, keys=None):
1039 def __init__(self, vfs, path, keys=None):
1040 self.vfs = vfs
1040 self.vfs = vfs
1041 self.path = path
1041 self.path = path
1042
1042
1043 def read(self, firstlinenonkeyval=False):
1043 def read(self, firstlinenonkeyval=False):
1044 """Read the contents of a simple key-value file
1044 """Read the contents of a simple key-value file
1045
1045
1046 'firstlinenonkeyval' indicates whether the first line of file should
1046 'firstlinenonkeyval' indicates whether the first line of file should
1047 be treated as a key-value pair or reuturned fully under the
1047 be treated as a key-value pair or reuturned fully under the
1048 __firstline key."""
1048 __firstline key."""
1049 lines = self.vfs.readlines(self.path)
1049 lines = self.vfs.readlines(self.path)
1050 d = {}
1050 d = {}
1051 if firstlinenonkeyval:
1051 if firstlinenonkeyval:
1052 if not lines:
1052 if not lines:
1053 e = _("empty simplekeyvalue file")
1053 e = _("empty simplekeyvalue file")
1054 raise error.CorruptedState(e)
1054 raise error.CorruptedState(e)
1055 # we don't want to include '\n' in the __firstline
1055 # we don't want to include '\n' in the __firstline
1056 d[self.firstlinekey] = lines[0][:-1]
1056 d[self.firstlinekey] = lines[0][:-1]
1057 del lines[0]
1057 del lines[0]
1058
1058
1059 try:
1059 try:
1060 # the 'if line.strip()' part prevents us from failing on empty
1060 # the 'if line.strip()' part prevents us from failing on empty
1061 # lines which only contain '\n' therefore are not skipped
1061 # lines which only contain '\n' therefore are not skipped
1062 # by 'if line'
1062 # by 'if line'
1063 updatedict = dict(line[:-1].split('=', 1) for line in lines
1063 updatedict = dict(line[:-1].split('=', 1) for line in lines
1064 if line.strip())
1064 if line.strip())
1065 if self.firstlinekey in updatedict:
1065 if self.firstlinekey in updatedict:
1066 e = _("%r can't be used as a key")
1066 e = _("%r can't be used as a key")
1067 raise error.CorruptedState(e % self.firstlinekey)
1067 raise error.CorruptedState(e % self.firstlinekey)
1068 d.update(updatedict)
1068 d.update(updatedict)
1069 except ValueError as e:
1069 except ValueError as e:
1070 raise error.CorruptedState(str(e))
1070 raise error.CorruptedState(str(e))
1071 return d
1071 return d
1072
1072
1073 def write(self, data, firstline=None):
1073 def write(self, data, firstline=None):
1074 """Write key=>value mapping to a file
1074 """Write key=>value mapping to a file
1075 data is a dict. Keys must be alphanumerical and start with a letter.
1075 data is a dict. Keys must be alphanumerical and start with a letter.
1076 Values must not contain newline characters.
1076 Values must not contain newline characters.
1077
1077
1078 If 'firstline' is not None, it is written to file before
1078 If 'firstline' is not None, it is written to file before
1079 everything else, as it is, not in a key=value form"""
1079 everything else, as it is, not in a key=value form"""
1080 lines = []
1080 lines = []
1081 if firstline is not None:
1081 if firstline is not None:
1082 lines.append('%s\n' % firstline)
1082 lines.append('%s\n' % firstline)
1083
1083
1084 for k, v in data.items():
1084 for k, v in data.items():
1085 if k == self.firstlinekey:
1085 if k == self.firstlinekey:
1086 e = "key name '%s' is reserved" % self.firstlinekey
1086 e = "key name '%s' is reserved" % self.firstlinekey
1087 raise error.ProgrammingError(e)
1087 raise error.ProgrammingError(e)
1088 if not k[0].isalpha():
1088 if not k[0].isalpha():
1089 e = "keys must start with a letter in a key-value file"
1089 e = "keys must start with a letter in a key-value file"
1090 raise error.ProgrammingError(e)
1090 raise error.ProgrammingError(e)
1091 if not k.isalnum():
1091 if not k.isalnum():
1092 e = "invalid key name in a simple key-value file"
1092 e = "invalid key name in a simple key-value file"
1093 raise error.ProgrammingError(e)
1093 raise error.ProgrammingError(e)
1094 if '\n' in v:
1094 if '\n' in v:
1095 e = "invalid value in a simple key-value file"
1095 e = "invalid value in a simple key-value file"
1096 raise error.ProgrammingError(e)
1096 raise error.ProgrammingError(e)
1097 lines.append("%s=%s\n" % (k, v))
1097 lines.append("%s=%s\n" % (k, v))
1098 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1098 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1099 fp.write(''.join(lines))
1099 fp.write(''.join(lines))
1100
1100
1101 _reportobsoletedsource = [
1101 _reportobsoletedsource = [
1102 'debugobsolete',
1102 'debugobsolete',
1103 'pull',
1103 'pull',
1104 'push',
1104 'push',
1105 'serve',
1105 'serve',
1106 'unbundle',
1106 'unbundle',
1107 ]
1107 ]
1108
1108
1109 def registersummarycallback(repo, otr, txnname=''):
1109 def registersummarycallback(repo, otr, txnname=''):
1110 """register a callback to issue a summary after the transaction is closed
1110 """register a callback to issue a summary after the transaction is closed
1111 """
1111 """
1112 for source in _reportobsoletedsource:
1112 for source in _reportobsoletedsource:
1113 if txnname.startswith(source):
1113 if txnname.startswith(source):
1114 reporef = weakref.ref(repo)
1114 reporef = weakref.ref(repo)
1115 def reportsummary(tr):
1115 def reportsummary(tr):
1116 """the actual callback reporting the summary"""
1116 """the actual callback reporting the summary"""
1117 repo = reporef()
1117 repo = reporef()
1118 obsoleted = obsutil.getobsoleted(repo, tr)
1118 obsoleted = obsutil.getobsoleted(repo, tr)
1119 if obsoleted:
1119 if obsoleted:
1120 repo.ui.status(_('obsoleted %i changesets\n')
1120 repo.ui.status(_('obsoleted %i changesets\n')
1121 % len(obsoleted))
1121 % len(obsoleted))
1122 otr.addpostclose('00-txnreport', reportsummary)
1122 otr.addpostclose('00-txnreport', reportsummary)
1123 break
1123 break
General Comments 0
You need to be logged in to leave comments. Login now